1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
985 fp->index, cid, command, bp->state,
986 rr_cqe->ramrod_cqe.ramrod_type);
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
996 fp->state = BNX2X_FP_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1002 fp->state = BNX2X_FP_STATE_HALTED;
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041 bp->set_mac_pending--;
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048 bp->set_mac_pending--;
1053 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1054 command, bp->state);
1057 mb(); /* force bnx2x_wait_ramrod() to see the change */
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct page *page = sw_buf->page;
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 /* Skip "next page" elements */
1071 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1075 sw_buf->page = NULL;
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, int last)
1085 for (i = 0; i < last; i++)
1086 bnx2x_free_rx_sge(bp, fp, i);
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090 struct bnx2x_fastpath *fp, u16 index)
1092 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1097 if (unlikely(page == NULL))
1100 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101 PCI_DMA_FROMDEVICE);
1102 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103 __free_pages(page, PAGES_PER_SGE_SHIFT);
1107 sw_buf->page = page;
1108 pci_unmap_addr_set(sw_buf, mapping, mapping);
1110 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117 struct bnx2x_fastpath *fp, u16 index)
1119 struct sk_buff *skb;
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1124 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125 if (unlikely(skb == NULL))
1128 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129 PCI_DMA_FROMDEVICE);
1130 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1136 pci_unmap_addr_set(rx_buf, mapping, mapping);
1138 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1144 /* note that we are not allocating a new skb,
1145 * we are just moving one from cons to prod
1146 * we are not creating a new mapping,
1147 * so there is no need to check for dma_mapping_error().
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150 struct sk_buff *skb, u16 cons, u16 prod)
1152 struct bnx2x *bp = fp->bp;
1153 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1158 pci_dma_sync_single_for_device(bp->pdev,
1159 pci_unmap_addr(cons_rx_buf, mapping),
1160 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1162 prod_rx_buf->skb = cons_rx_buf->skb;
1163 pci_unmap_addr_set(prod_rx_buf, mapping,
1164 pci_unmap_addr(cons_rx_buf, mapping));
1165 *prod_bd = *cons_bd;
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1171 u16 last_max = fp->last_max_sge;
1173 if (SUB_S16(idx, last_max) > 0)
1174 fp->last_max_sge = idx;
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1181 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182 int idx = RX_SGE_CNT * i - 1;
1184 for (j = 0; j < 2; j++) {
1185 SGE_MASK_CLEAR_BIT(fp, idx);
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192 struct eth_fast_path_rx_cqe *fp_cqe)
1194 struct bnx2x *bp = fp->bp;
1195 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196 le16_to_cpu(fp_cqe->len_on_bd)) >>
1198 u16 last_max, last_elem, first_elem;
1205 /* First mark all used pages */
1206 for (i = 0; i < sge_len; i++)
1207 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1209 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1212 /* Here we assume that the last SGE index is the biggest */
1213 prefetch((void *)(fp->sge_mask));
1214 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1216 last_max = RX_SGE(fp->last_max_sge);
1217 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1220 /* If ring is not full */
1221 if (last_elem + 1 != first_elem)
1224 /* Now update the prod */
1225 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226 if (likely(fp->sge_mask[i]))
1229 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230 delta += RX_SGE_MASK_ELEM_SZ;
1234 fp->rx_sge_prod += delta;
1235 /* clear page-end entries */
1236 bnx2x_clear_sge_mask_next_elems(fp);
1239 DP(NETIF_MSG_RX_STATUS,
1240 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1241 fp->last_max_sge, fp->rx_sge_prod);
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1246 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247 memset(fp->sge_mask, 0xff,
1248 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1250 /* Clear the two last indices in the page to 1:
1251 these are the indices that correspond to the "next" element,
1252 hence will never be indicated and should be removed from
1253 the calculations. */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258 struct sk_buff *skb, u16 cons, u16 prod)
1260 struct bnx2x *bp = fp->bp;
1261 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1266 /* move empty skb from pool to prod and map it */
1267 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1272 /* move partial skb from cons to pool (don't unmap yet) */
1273 fp->tpa_pool[queue] = *cons_rx_buf;
1275 /* mark bin state as start - print error if current state != stop */
1276 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1279 fp->tpa_state[queue] = BNX2X_TPA_START;
1281 /* point prod_bd to new skb */
1282 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1285 #ifdef BNX2X_STOP_ON_ERROR
1286 fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1290 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1292 fp->tpa_queue_used);
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 struct sk_buff *skb,
1298 struct eth_fast_path_rx_cqe *fp_cqe,
1301 struct sw_rx_page *rx_pg, old_rx_pg;
1302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303 u32 i, frag_len, frag_size, pages;
1307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1310 /* This is needed in order to enable forwarding support */
1312 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313 max(frag_size, (u32)len_on_bd));
1315 #ifdef BNX2X_STOP_ON_ERROR
1317 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1320 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1321 fp_cqe->pkt_len, len_on_bd);
1327 /* Run through the SGL and compose the fragmented skb */
1328 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1331 /* FW gives the indices of the SGE as if the ring is an array
1332 (meaning that "next" element will consume 2 indices) */
1333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334 rx_pg = &fp->rx_page_ring[sge_idx];
1337 /* If we fail to allocate a substitute page, we simply stop
1338 where we are and drop the whole packet */
1339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340 if (unlikely(err)) {
1341 fp->eth_q_stats.rx_skb_alloc_failed++;
1345 /* Unmap the page as we r going to pass it to the stack */
1346 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1349 /* Add one frag and update the appropriate fields in the skb */
1350 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1352 skb->data_len += frag_len;
1353 skb->truesize += frag_len;
1354 skb->len += frag_len;
1356 frag_size -= frag_len;
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1366 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367 struct sk_buff *skb = rx_buf->skb;
1369 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1371 /* Unmap skb in the pool anyway, as we are going to change
1372 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1374 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1377 if (likely(new_skb)) {
1378 /* fix ip xsum and give it to the stack */
1379 /* (no need to map the new skb) */
1382 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383 PARSING_FLAGS_VLAN);
1384 int is_not_hwaccel_vlan_cqe =
1385 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1389 prefetch(((char *)(skb)) + 128);
1391 #ifdef BNX2X_STOP_ON_ERROR
1392 if (pad + len > bp->rx_buf_size) {
1393 BNX2X_ERR("skb_put is about to fail... "
1394 "pad %d len %d rx_buf_size %d\n",
1395 pad, len, bp->rx_buf_size);
1401 skb_reserve(skb, pad);
1404 skb->protocol = eth_type_trans(skb, bp->dev);
1405 skb->ip_summed = CHECKSUM_UNNECESSARY;
1410 iph = (struct iphdr *)skb->data;
1412 /* If there is no Rx VLAN offloading -
1413 take VLAN tag into an account */
1414 if (unlikely(is_not_hwaccel_vlan_cqe))
1415 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1418 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1421 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422 &cqe->fast_path_cqe, cqe_idx)) {
1424 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425 (!is_not_hwaccel_vlan_cqe))
1426 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427 le16_to_cpu(cqe->fast_path_cqe.
1431 netif_receive_skb(skb);
1433 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434 " - dropping packet!\n");
1439 /* put new skb in bin */
1440 fp->tpa_pool[queue].skb = new_skb;
1443 /* else drop the packet and keep the buffer in the bin */
1444 DP(NETIF_MSG_RX_STATUS,
1445 "Failed to allocate new skb - dropping packet!\n");
1446 fp->eth_q_stats.rx_skb_alloc_failed++;
1449 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453 struct bnx2x_fastpath *fp,
1454 u16 bd_prod, u16 rx_comp_prod,
1457 struct ustorm_eth_rx_producers rx_prods = {0};
1460 /* Update producers */
1461 rx_prods.bd_prod = bd_prod;
1462 rx_prods.cqe_prod = rx_comp_prod;
1463 rx_prods.sge_prod = rx_sge_prod;
1466 * Make sure that the BD and SGE data is updated before updating the
1467 * producers since FW might read the BD/SGE right after the producer
1469 * This is only applicable for weak-ordered memory model archs such
1470 * as IA-64. The following barrier is also mandatory since FW will
1471 * assumes BDs must have buffers.
1475 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
1477 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478 ((u32 *)&rx_prods)[i]);
1480 mmiowb(); /* keep prod updates ordered */
1482 DP(NETIF_MSG_RX_STATUS,
1483 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1484 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1489 struct bnx2x *bp = fp->bp;
1490 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1494 #ifdef BNX2X_STOP_ON_ERROR
1495 if (unlikely(bp->panic))
1499 /* CQ "next element" is of the size of the regular element,
1500 that's why it's ok here */
1501 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1505 bd_cons = fp->rx_bd_cons;
1506 bd_prod = fp->rx_bd_prod;
1507 bd_prod_fw = bd_prod;
1508 sw_comp_cons = fp->rx_comp_cons;
1509 sw_comp_prod = fp->rx_comp_prod;
1511 /* Memory barrier necessary as speculative reads of the rx
1512 * buffer can be ahead of the index in the status block
1516 DP(NETIF_MSG_RX_STATUS,
1517 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1518 fp->index, hw_comp_cons, sw_comp_cons);
1520 while (sw_comp_cons != hw_comp_cons) {
1521 struct sw_rx_bd *rx_buf = NULL;
1522 struct sk_buff *skb;
1523 union eth_rx_cqe *cqe;
1527 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528 bd_prod = RX_BD(bd_prod);
1529 bd_cons = RX_BD(bd_cons);
1531 /* Prefetch the page containing the BD descriptor
1532 at producer's index. It will be needed when new skb is
1534 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535 (&fp->rx_desc_ring[bd_prod])) -
1538 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1541 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1542 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1543 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1548 /* is this a slowpath msg? */
1549 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550 bnx2x_sp_event(fp, cqe);
1553 /* this is an rx packet */
1555 rx_buf = &fp->rx_buf_ring[bd_cons];
1557 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558 pad = cqe->fast_path_cqe.placement_offset;
1560 /* If CQE is marked both TPA_START and TPA_END
1561 it is a non-TPA CQE */
1562 if ((!fp->disable_tpa) &&
1563 (TPA_TYPE(cqe_fp_flags) !=
1564 (TPA_TYPE_START | TPA_TYPE_END))) {
1565 u16 queue = cqe->fast_path_cqe.queue_index;
1567 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568 DP(NETIF_MSG_RX_STATUS,
1569 "calling tpa_start on queue %d\n",
1572 bnx2x_tpa_start(fp, queue, skb,
1577 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578 DP(NETIF_MSG_RX_STATUS,
1579 "calling tpa_stop on queue %d\n",
1582 if (!BNX2X_RX_SUM_FIX(cqe))
1583 BNX2X_ERR("STOP on none TCP "
1586 /* This is a size of the linear data
1588 len = le16_to_cpu(cqe->fast_path_cqe.
1590 bnx2x_tpa_stop(bp, fp, queue, pad,
1591 len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1597 bnx2x_update_sge_prod(fp,
1598 &cqe->fast_path_cqe);
1603 pci_dma_sync_single_for_device(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
1605 pad + RX_COPY_THRESH,
1606 PCI_DMA_FROMDEVICE);
1608 prefetch(((char *)(skb)) + 128);
1610 /* is this an error packet? */
1611 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612 DP(NETIF_MSG_RX_ERR,
1613 "ERROR flags %x rx packet %u\n",
1614 cqe_fp_flags, sw_comp_cons);
1615 fp->eth_q_stats.rx_err_discard_pkt++;
1619 /* Since we don't have a jumbo ring
1620 * copy small packets if mtu > 1500
1622 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623 (len <= RX_COPY_THRESH)) {
1624 struct sk_buff *new_skb;
1626 new_skb = netdev_alloc_skb(bp->dev,
1628 if (new_skb == NULL) {
1629 DP(NETIF_MSG_RX_ERR,
1630 "ERROR packet dropped "
1631 "because of alloc failure\n");
1632 fp->eth_q_stats.rx_skb_alloc_failed++;
1637 skb_copy_from_linear_data_offset(skb, pad,
1638 new_skb->data + pad, len);
1639 skb_reserve(new_skb, pad);
1640 skb_put(new_skb, len);
1642 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648 pci_unmap_single(bp->pdev,
1649 pci_unmap_addr(rx_buf, mapping),
1651 PCI_DMA_FROMDEVICE);
1652 skb_reserve(skb, pad);
1656 DP(NETIF_MSG_RX_ERR,
1657 "ERROR packet dropped because "
1658 "of alloc failure\n");
1659 fp->eth_q_stats.rx_skb_alloc_failed++;
1661 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1665 skb->protocol = eth_type_trans(skb, bp->dev);
1667 skb->ip_summed = CHECKSUM_NONE;
1669 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 fp->eth_q_stats.hw_csum_err++;
1676 skb_record_rx_queue(skb, fp->index);
1679 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681 PARSING_FLAGS_VLAN))
1682 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1686 netif_receive_skb(skb);
1692 bd_cons = NEXT_RX_IDX(bd_cons);
1693 bd_prod = NEXT_RX_IDX(bd_prod);
1694 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1697 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1700 if (rx_pkt == budget)
1704 fp->rx_bd_cons = bd_cons;
1705 fp->rx_bd_prod = bd_prod_fw;
1706 fp->rx_comp_cons = sw_comp_cons;
1707 fp->rx_comp_prod = sw_comp_prod;
1709 /* Update producers */
1710 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1713 fp->rx_pkt += rx_pkt;
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1721 struct bnx2x_fastpath *fp = fp_cookie;
1722 struct bnx2x *bp = fp->bp;
1724 /* Return here if interrupt is disabled */
1725 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1730 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731 fp->index, fp->sb_id);
1732 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1734 #ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1738 /* Handle Rx or Tx according to MSI-X vector */
1739 if (fp->is_rx_queue) {
1740 prefetch(fp->rx_cons_sb);
1741 prefetch(&fp->status_blk->u_status_block.status_block_index);
1743 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749 bnx2x_update_fpsb_idx(fp);
1753 /* Re-enable interrupts */
1754 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1765 struct bnx2x *bp = netdev_priv(dev_instance);
1766 u16 status = bnx2x_ack_int(bp);
1770 /* Return here if interrupt is shared and it's not for us */
1771 if (unlikely(status == 0)) {
1772 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1775 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1777 /* Return here if interrupt is disabled */
1778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1783 #ifdef BNX2X_STOP_ON_ERROR
1784 if (unlikely(bp->panic))
1788 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789 struct bnx2x_fastpath *fp = &bp->fp[i];
1791 mask = 0x2 << fp->sb_id;
1792 if (status & mask) {
1793 /* Handle Rx or Tx according to SB id */
1794 if (fp->is_rx_queue) {
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(&fp->status_blk->u_status_block.
1797 status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1802 prefetch(fp->tx_cons_sb);
1803 prefetch(&fp->status_blk->c_status_block.
1804 status_block_index);
1806 bnx2x_update_fpsb_idx(fp);
1810 /* Re-enable interrupts */
1811 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812 le16_to_cpu(fp->fp_u_idx),
1814 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815 le16_to_cpu(fp->fp_c_idx),
1823 mask = 0x2 << CNIC_SB_ID(bp);
1824 if (status & (mask | 0x1)) {
1825 struct cnic_ops *c_ops = NULL;
1828 c_ops = rcu_dereference(bp->cnic_ops);
1830 c_ops->cnic_handler(bp->cnic_data, NULL);
1837 if (unlikely(status & 0x1)) {
1838 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1846 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1852 /* end of fast path */
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1859 * General service functions
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1865 u32 resource_bit = (1 << resource);
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1885 /* Validating that the resource is not already taken */
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
1887 if (lock_status & resource_bit) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
1895 /* Try to acquire the lock */
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
1898 if (lock_status & resource_bit)
1903 DP(NETIF_MSG_HW, "Timeout\n");
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1910 u32 resource_bit = (1 << resource);
1911 int func = BP_FUNC(bp);
1912 u32 hw_lock_control_reg;
1914 /* Validating that the resource is within range */
1915 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1917 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1923 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1925 hw_lock_control_reg =
1926 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1929 /* Validating that the resource is currently taken */
1930 lock_status = REG_RD(bp, hw_lock_control_reg);
1931 if (!(lock_status & resource_bit)) {
1932 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1933 lock_status, resource_bit);
1937 REG_WR(bp, hw_lock_control_reg, resource_bit);
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1944 mutex_lock(&bp->port.phy_mutex);
1946 if (bp->port.need_hw_lock)
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1952 if (bp->port.need_hw_lock)
1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1955 mutex_unlock(&bp->port.phy_mutex);
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1974 /* read GPIO value */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1977 /* get the requested pin value */
1978 if ((gpio_reg & gpio_mask) == gpio_mask)
1983 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO and mask except the float bits */
2005 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2008 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear FLOAT and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2016 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018 gpio_num, gpio_shift);
2019 /* clear FLOAT and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026 gpio_num, gpio_shift);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2035 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2058 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063 "output low\n", gpio_num, gpio_shift);
2064 /* clear SET and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2069 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071 "output high\n", gpio_num, gpio_shift);
2072 /* clear CLR and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2089 u32 spio_mask = (1 << spio_num);
2092 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093 (spio_num > MISC_REGISTERS_SPIO_7)) {
2094 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2098 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099 /* read SPIO and mask except the float bits */
2100 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2103 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105 /* clear FLOAT and set CLR */
2106 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2110 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112 /* clear FLOAT and set SET */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2127 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2135 switch (bp->link_vars.ieee_fc &
2136 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2142 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2147 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148 bp->port.advertising |= ADVERTISED_Asym_Pause;
2152 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2158 static void bnx2x_link_report(struct bnx2x *bp)
2160 if (bp->state == BNX2X_STATE_DISABLED) {
2161 netif_carrier_off(bp->dev);
2162 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2166 if (bp->link_vars.link_up) {
2167 if (bp->state == BNX2X_STATE_OPEN)
2168 netif_carrier_on(bp->dev);
2169 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2171 printk("%d Mbps ", bp->link_vars.line_speed);
2173 if (bp->link_vars.duplex == DUPLEX_FULL)
2174 printk("full duplex");
2176 printk("half duplex");
2178 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180 printk(", receive ");
2181 if (bp->link_vars.flow_ctrl &
2183 printk("& transmit ");
2185 printk(", transmit ");
2187 printk("flow control ON");
2191 } else { /* link_down */
2192 netif_carrier_off(bp->dev);
2193 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2199 if (!BP_NOMCP(bp)) {
2202 /* Initialize link parameters structure variables */
2203 /* It is recommended to turn off RX FC for jumbo frames
2204 for better performance */
2205 if (bp->dev->mtu > 5000)
2206 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2210 bnx2x_acquire_phy_lock(bp);
2212 if (load_mode == LOAD_DIAG)
2213 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2215 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2217 bnx2x_release_phy_lock(bp);
2219 bnx2x_calc_fc_adv(bp);
2221 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223 bnx2x_link_report(bp);
2228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2232 static void bnx2x_link_set(struct bnx2x *bp)
2234 if (!BP_NOMCP(bp)) {
2235 bnx2x_acquire_phy_lock(bp);
2236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237 bnx2x_release_phy_lock(bp);
2239 bnx2x_calc_fc_adv(bp);
2241 BNX2X_ERR("Bootcode is missing - can not set link\n");
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2246 if (!BP_NOMCP(bp)) {
2247 bnx2x_acquire_phy_lock(bp);
2248 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249 bnx2x_release_phy_lock(bp);
2251 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2258 bnx2x_acquire_phy_lock(bp);
2259 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260 bnx2x_release_phy_lock(bp);
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2267 u32 r_param = bp->link_vars.line_speed / 8;
2268 u32 fair_periodic_timeout_usec;
2271 memset(&(bp->cmng.rs_vars), 0,
2272 sizeof(struct rate_shaping_vars_per_port));
2273 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2275 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2278 /* this is the threshold below which no timer arming will occur
2279 1.25 coefficient is for the threshold to be a little bigger
2280 than the real time, to compensate for timer in-accuracy */
2281 bp->cmng.rs_vars.rs_threshold =
2282 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2284 /* resolution of fairness timer */
2285 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2289 /* this is the threshold below which we won't arm the timer anymore */
2290 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2292 /* we multiply by 1e3/8 to get bytes/msec.
2293 We don't want the credits to pass a credit
2294 of the t_fair*FAIR_MEM (algorithm resolution) */
2295 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296 /* since each tick is 4 usec */
2297 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2300 /* Calculates the sum of vn_min_rates.
2301 It's needed for further normalizing of the min_rates.
2303 sum of vn_min_rates.
2305 0 - if all the min_rates are 0.
2306 In the later case fainess algorithm should be deactivated.
2307 If not all min_rates are zero then those that are zeroes will be set to 1.
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312 int port = BP_PORT(bp);
2315 bp->vn_weight_sum = 0;
2316 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317 int func = 2*vn + port;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326 /* If min rate is zero - set it to 1 */
2328 vn_min_rate = DEF_MIN_RATE;
2332 bp->vn_weight_sum += vn_min_rate;
2335 /* ... only if all min rates are zeros - disable fairness */
2337 bp->cmng.flags.cmng_enables &=
2338 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2339 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2340 " fairness will be disabled\n");
2342 bp->cmng.flags.cmng_enables |=
2343 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2346 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2348 struct rate_shaping_vars_per_vn m_rs_vn;
2349 struct fairness_vars_per_vn m_fair_vn;
2350 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2351 u16 vn_min_rate, vn_max_rate;
2354 /* If function is hidden - set min and max to zeroes */
2355 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2360 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2361 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2362 /* If min rate is zero - set it to 1 */
2364 vn_min_rate = DEF_MIN_RATE;
2365 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2366 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2369 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2370 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2372 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2373 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2375 /* global vn counter - maximal Mbps for this vn */
2376 m_rs_vn.vn_counter.rate = vn_max_rate;
2378 /* quota - number of bytes transmitted in this period */
2379 m_rs_vn.vn_counter.quota =
2380 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2382 if (bp->vn_weight_sum) {
2383 /* credit for each period of the fairness algorithm:
2384 number of bytes in T_FAIR (the vn share the port rate).
2385 vn_weight_sum should not be larger than 10000, thus
2386 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 m_fair_vn.vn_credit_delta =
2389 max((u32)(vn_min_rate * (T_FAIR_COEF /
2390 (8 * bp->vn_weight_sum))),
2391 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2392 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2393 m_fair_vn.vn_credit_delta);
2396 /* Store it to internal memory */
2397 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2398 REG_WR(bp, BAR_XSTRORM_INTMEM +
2399 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2400 ((u32 *)(&m_rs_vn))[i]);
2402 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2403 REG_WR(bp, BAR_XSTRORM_INTMEM +
2404 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2405 ((u32 *)(&m_fair_vn))[i]);
2409 /* This function is called upon link interrupt */
2410 static void bnx2x_link_attn(struct bnx2x *bp)
2412 /* Make sure that we are synced with the current statistics */
2413 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2417 if (bp->link_vars.link_up) {
2419 /* dropless flow control */
2420 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2421 int port = BP_PORT(bp);
2422 u32 pause_enabled = 0;
2424 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2427 REG_WR(bp, BAR_USTRORM_INTMEM +
2428 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2432 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2433 struct host_port_stats *pstats;
2435 pstats = bnx2x_sp(bp, port_stats);
2436 /* reset old bmac stats */
2437 memset(&(pstats->mac_stx[0]), 0,
2438 sizeof(struct mac_stx));
2440 if ((bp->state == BNX2X_STATE_OPEN) ||
2441 (bp->state == BNX2X_STATE_DISABLED))
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
2449 int port = BP_PORT(bp);
2453 /* Set the attention towards other drivers on the same port */
2454 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455 if (vn == BP_E1HVN(bp))
2458 func = ((vn << 1) | port);
2459 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2463 if (bp->link_vars.link_up) {
2466 /* Init rate shaping and fairness contexts */
2467 bnx2x_init_port_minmax(bp);
2469 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2470 bnx2x_init_vn_minmax(bp, 2*vn + port);
2472 /* Store it to internal memory */
2474 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477 ((u32 *)(&bp->cmng))[i]);
2482 static void bnx2x__link_status_update(struct bnx2x *bp)
2484 int func = BP_FUNC(bp);
2486 if (bp->state != BNX2X_STATE_OPEN)
2489 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2491 if (bp->link_vars.link_up)
2492 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2494 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2496 bnx2x_calc_vn_weight_sum(bp);
2498 /* indicate link status */
2499 bnx2x_link_report(bp);
2502 static void bnx2x_pmf_update(struct bnx2x *bp)
2504 int port = BP_PORT(bp);
2508 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2510 /* enable nig attention */
2511 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2512 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2513 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2515 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2523 * General service functions
2526 /* send the MCP a request, block until there is a reply */
2527 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2529 int func = BP_FUNC(bp);
2530 u32 seq = ++bp->fw_seq;
2533 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2535 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2539 /* let the FW do it's magic ... */
2542 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2544 /* Give the FW up to 2 second (200*10ms) */
2545 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2547 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548 cnt*delay, rc, seq);
2550 /* is this a reply to our command? */
2551 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552 rc &= FW_MSG_CODE_MASK;
2555 BNX2X_ERR("FW failed to respond!\n");
2563 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565 static void bnx2x_set_rx_mode(struct net_device *dev);
2567 static void bnx2x_e1h_disable(struct bnx2x *bp)
2569 int port = BP_PORT(bp);
2572 bp->rx_mode = BNX2X_RX_MODE_NONE;
2573 bnx2x_set_storm_rx_mode(bp);
2575 netif_tx_disable(bp->dev);
2576 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2580 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2582 for (i = 0; i < MC_HASH_SIZE; i++)
2583 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2585 netif_carrier_off(bp->dev);
2588 static void bnx2x_e1h_enable(struct bnx2x *bp)
2590 int port = BP_PORT(bp);
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2596 /* Tx queue should be only reenabled */
2597 netif_tx_wake_all_queues(bp->dev);
2599 /* Initialize the receive filter. */
2600 bnx2x_set_rx_mode(bp->dev);
2603 static void bnx2x_update_min_max(struct bnx2x *bp)
2605 int port = BP_PORT(bp);
2608 /* Init rate shaping and fairness contexts */
2609 bnx2x_init_port_minmax(bp);
2611 bnx2x_calc_vn_weight_sum(bp);
2613 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2614 bnx2x_init_vn_minmax(bp, 2*vn + port);
2619 /* Set the attention towards other drivers on the same port */
2620 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2621 if (vn == BP_E1HVN(bp))
2624 func = ((vn << 1) | port);
2625 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2626 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2629 /* Store it to internal memory */
2630 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2631 REG_WR(bp, BAR_XSTRORM_INTMEM +
2632 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2633 ((u32 *)(&bp->cmng))[i]);
2637 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2639 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2641 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2644 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2645 bp->state = BNX2X_STATE_DISABLED;
2647 bnx2x_e1h_disable(bp);
2649 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2650 bp->state = BNX2X_STATE_OPEN;
2652 bnx2x_e1h_enable(bp);
2654 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2656 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2658 bnx2x_update_min_max(bp);
2659 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2662 /* Report results to MCP */
2664 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2666 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2669 /* must be called under the spq lock */
2670 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2672 struct eth_spe *next_spe = bp->spq_prod_bd;
2674 if (bp->spq_prod_bd == bp->spq_last_bd) {
2675 bp->spq_prod_bd = bp->spq;
2676 bp->spq_prod_idx = 0;
2677 DP(NETIF_MSG_TIMER, "end of spq\n");
2685 /* must be called under the spq lock */
2686 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2688 int func = BP_FUNC(bp);
2690 /* Make sure that BD data is updated before writing the producer */
2693 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2698 /* the slow path queue is odd since completions arrive on the fastpath ring */
2699 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2700 u32 data_hi, u32 data_lo, int common)
2702 struct eth_spe *spe;
2704 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2705 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2706 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2707 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2708 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2710 #ifdef BNX2X_STOP_ON_ERROR
2711 if (unlikely(bp->panic))
2715 spin_lock_bh(&bp->spq_lock);
2717 if (!bp->spq_left) {
2718 BNX2X_ERR("BUG! SPQ ring full!\n");
2719 spin_unlock_bh(&bp->spq_lock);
2724 spe = bnx2x_sp_get_next(bp);
2726 /* CID needs port number to be encoded int it */
2727 spe->hdr.conn_and_cmd_data =
2728 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2730 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2733 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2735 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2736 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2740 bnx2x_sp_prod_update(bp);
2741 spin_unlock_bh(&bp->spq_lock);
2745 /* acquire split MCP access lock register */
2746 static int bnx2x_acquire_alr(struct bnx2x *bp)
2753 for (j = 0; j < i*10; j++) {
2755 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2756 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2757 if (val & (1L << 31))
2762 if (!(val & (1L << 31))) {
2763 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2770 /* release split MCP access lock register */
2771 static void bnx2x_release_alr(struct bnx2x *bp)
2775 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2778 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2780 struct host_def_status_block *def_sb = bp->def_status_blk;
2783 barrier(); /* status block is written to by the chip */
2784 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2785 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2788 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2789 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2792 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2793 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2796 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2797 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2800 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2801 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2808 * slow path service functions
2811 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2813 int port = BP_PORT(bp);
2814 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2815 COMMAND_REG_ATTN_BITS_SET);
2816 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2818 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2819 NIG_REG_MASK_INTERRUPT_PORT0;
2823 if (bp->attn_state & asserted)
2824 BNX2X_ERR("IGU ERROR\n");
2826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2827 aeu_mask = REG_RD(bp, aeu_addr);
2829 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2830 aeu_mask, asserted);
2831 aeu_mask &= ~(asserted & 0xff);
2832 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2834 REG_WR(bp, aeu_addr, aeu_mask);
2835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2837 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2838 bp->attn_state |= asserted;
2839 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2841 if (asserted & ATTN_HARD_WIRED_MASK) {
2842 if (asserted & ATTN_NIG_FOR_FUNC) {
2844 bnx2x_acquire_phy_lock(bp);
2846 /* save nig interrupt mask */
2847 nig_mask = REG_RD(bp, nig_int_mask_addr);
2848 REG_WR(bp, nig_int_mask_addr, 0);
2850 bnx2x_link_attn(bp);
2852 /* handle unicore attn? */
2854 if (asserted & ATTN_SW_TIMER_4_FUNC)
2855 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2857 if (asserted & GPIO_2_FUNC)
2858 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2860 if (asserted & GPIO_3_FUNC)
2861 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2863 if (asserted & GPIO_4_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2867 if (asserted & ATTN_GENERAL_ATTN_1) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2871 if (asserted & ATTN_GENERAL_ATTN_2) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2875 if (asserted & ATTN_GENERAL_ATTN_3) {
2876 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2877 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2880 if (asserted & ATTN_GENERAL_ATTN_4) {
2881 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2882 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2884 if (asserted & ATTN_GENERAL_ATTN_5) {
2885 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2886 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2888 if (asserted & ATTN_GENERAL_ATTN_6) {
2889 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2890 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2894 } /* if hardwired */
2896 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2898 REG_WR(bp, hc_addr, asserted);
2900 /* now set back the mask */
2901 if (asserted & ATTN_NIG_FOR_FUNC) {
2902 REG_WR(bp, nig_int_mask_addr, nig_mask);
2903 bnx2x_release_phy_lock(bp);
2907 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2909 int port = BP_PORT(bp);
2911 /* mark the failure */
2912 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2913 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2914 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2915 bp->link_params.ext_phy_config);
2917 /* log the failure */
2918 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2919 " the driver to shutdown the card to prevent permanent"
2920 " damage. Please contact Dell Support for assistance\n",
2924 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2926 int port = BP_PORT(bp);
2928 u32 val, swap_val, swap_override;
2930 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2931 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2933 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2935 val = REG_RD(bp, reg_offset);
2936 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2937 REG_WR(bp, reg_offset, val);
2939 BNX2X_ERR("SPIO5 hw attention\n");
2941 /* Fan failure attention */
2942 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2944 /* Low power mode is controlled by GPIO 2 */
2945 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2946 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2947 /* The PHY reset is controlled by GPIO 1 */
2948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2949 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2952 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2953 /* The PHY reset is controlled by GPIO 1 */
2954 /* fake the port number to cancel the swap done in
2956 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2957 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2958 port = (swap_val && swap_override) ^ 1;
2959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2960 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2966 bnx2x_fan_failure(bp);
2969 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2970 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2971 bnx2x_acquire_phy_lock(bp);
2972 bnx2x_handle_module_detect_int(&bp->link_params);
2973 bnx2x_release_phy_lock(bp);
2976 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2978 val = REG_RD(bp, reg_offset);
2979 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2980 REG_WR(bp, reg_offset, val);
2982 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2983 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2988 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2992 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2994 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2995 BNX2X_ERR("DB hw attention 0x%x\n", val);
2996 /* DORQ discard attention */
2998 BNX2X_ERR("FATAL error from DORQ\n");
3001 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3003 int port = BP_PORT(bp);
3006 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3007 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3009 val = REG_RD(bp, reg_offset);
3010 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3011 REG_WR(bp, reg_offset, val);
3013 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3014 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3019 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3023 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3025 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3026 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3027 /* CFC error attention */
3029 BNX2X_ERR("FATAL error from CFC\n");
3032 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3034 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3035 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3036 /* RQ_USDMDP_FIFO_OVERFLOW */
3038 BNX2X_ERR("FATAL error from PXP\n");
3041 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3043 int port = BP_PORT(bp);
3046 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3047 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3049 val = REG_RD(bp, reg_offset);
3050 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3051 REG_WR(bp, reg_offset, val);
3053 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3054 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3059 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3063 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3065 if (attn & BNX2X_PMF_LINK_ASSERT) {
3066 int func = BP_FUNC(bp);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3069 bp->mf_config = SHMEM_RD(bp,
3070 mf_cfg.func_mf_config[func].config);
3071 val = SHMEM_RD(bp, func_mb[func].drv_status);
3072 if (val & DRV_STATUS_DCC_EVENT_MASK)
3074 (val & DRV_STATUS_DCC_EVENT_MASK));
3075 bnx2x__link_status_update(bp);
3076 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3077 bnx2x_pmf_update(bp);
3079 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3081 BNX2X_ERR("MC assert!\n");
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3088 } else if (attn & BNX2X_MCP_ASSERT) {
3090 BNX2X_ERR("MCP assert!\n");
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3095 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3098 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3099 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3100 if (attn & BNX2X_GRC_TIMEOUT) {
3101 val = CHIP_IS_E1H(bp) ?
3102 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3103 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3105 if (attn & BNX2X_GRC_RSV) {
3106 val = CHIP_IS_E1H(bp) ?
3107 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3108 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3110 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3114 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3116 struct attn_route attn;
3117 struct attn_route group_mask;
3118 int port = BP_PORT(bp);
3124 /* need to take HW lock because MCP or other port might also
3125 try to handle this event */
3126 bnx2x_acquire_alr(bp);
3128 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3129 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3130 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3131 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3132 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3133 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3135 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3136 if (deasserted & (1 << index)) {
3137 group_mask = bp->attn_group[index];
3139 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3140 index, group_mask.sig[0], group_mask.sig[1],
3141 group_mask.sig[2], group_mask.sig[3]);
3143 bnx2x_attn_int_deasserted3(bp,
3144 attn.sig[3] & group_mask.sig[3]);
3145 bnx2x_attn_int_deasserted1(bp,
3146 attn.sig[1] & group_mask.sig[1]);
3147 bnx2x_attn_int_deasserted2(bp,
3148 attn.sig[2] & group_mask.sig[2]);
3149 bnx2x_attn_int_deasserted0(bp,
3150 attn.sig[0] & group_mask.sig[0]);
3152 if ((attn.sig[0] & group_mask.sig[0] &
3153 HW_PRTY_ASSERT_SET_0) ||
3154 (attn.sig[1] & group_mask.sig[1] &
3155 HW_PRTY_ASSERT_SET_1) ||
3156 (attn.sig[2] & group_mask.sig[2] &
3157 HW_PRTY_ASSERT_SET_2))
3158 BNX2X_ERR("FATAL HW block parity attention\n");
3162 bnx2x_release_alr(bp);
3164 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3167 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3169 REG_WR(bp, reg_addr, val);
3171 if (~bp->attn_state & deasserted)
3172 BNX2X_ERR("IGU ERROR\n");
3174 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3175 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3177 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3178 aeu_mask = REG_RD(bp, reg_addr);
3180 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3181 aeu_mask, deasserted);
3182 aeu_mask |= (deasserted & 0xff);
3183 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3185 REG_WR(bp, reg_addr, aeu_mask);
3186 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3188 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3189 bp->attn_state &= ~deasserted;
3190 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3193 static void bnx2x_attn_int(struct bnx2x *bp)
3195 /* read local copy of bits */
3196 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3198 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3200 u32 attn_state = bp->attn_state;
3202 /* look for changed bits */
3203 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3204 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3207 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3208 attn_bits, attn_ack, asserted, deasserted);
3210 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3211 BNX2X_ERR("BAD attention state\n");
3213 /* handle bits that were raised */
3215 bnx2x_attn_int_asserted(bp, asserted);
3218 bnx2x_attn_int_deasserted(bp, deasserted);
3221 static void bnx2x_sp_task(struct work_struct *work)
3223 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3227 /* Return here if interrupt is disabled */
3228 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3229 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3233 status = bnx2x_update_dsb_idx(bp);
3234 /* if (status == 0) */
3235 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3237 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3243 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3245 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3247 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3249 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3251 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3256 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3258 struct net_device *dev = dev_instance;
3259 struct bnx2x *bp = netdev_priv(dev);
3261 /* Return here if interrupt is disabled */
3262 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3263 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3267 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3269 #ifdef BNX2X_STOP_ON_ERROR
3270 if (unlikely(bp->panic))
3276 struct cnic_ops *c_ops;
3279 c_ops = rcu_dereference(bp->cnic_ops);
3281 c_ops->cnic_handler(bp->cnic_data, NULL);
3285 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3290 /* end of slow path */
3294 /****************************************************************************
3296 ****************************************************************************/
3298 /* sum[hi:lo] += add[hi:lo] */
3299 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3302 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3305 /* difference = minuend - subtrahend */
3306 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3308 if (m_lo < s_lo) { \
3310 d_hi = m_hi - s_hi; \
3312 /* we can 'loan' 1 */ \
3314 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3316 /* m_hi <= s_hi */ \
3321 /* m_lo >= s_lo */ \
3322 if (m_hi < s_hi) { \
3326 /* m_hi >= s_hi */ \
3327 d_hi = m_hi - s_hi; \
3328 d_lo = m_lo - s_lo; \
3333 #define UPDATE_STAT64(s, t) \
3335 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3336 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3337 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3338 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3339 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3340 pstats->mac_stx[1].t##_lo, diff.lo); \
3343 #define UPDATE_STAT64_NIG(s, t) \
3345 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3346 diff.lo, new->s##_lo, old->s##_lo); \
3347 ADD_64(estats->t##_hi, diff.hi, \
3348 estats->t##_lo, diff.lo); \
3351 /* sum[hi:lo] += add */
3352 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3355 s_hi += (s_lo < a) ? 1 : 0; \
3358 #define UPDATE_EXTEND_STAT(s) \
3360 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3361 pstats->mac_stx[1].s##_lo, \
3365 #define UPDATE_EXTEND_TSTAT(s, t) \
3367 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3368 old_tclient->s = tclient->s; \
3369 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3372 #define UPDATE_EXTEND_USTAT(s, t) \
3374 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3375 old_uclient->s = uclient->s; \
3376 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3379 #define UPDATE_EXTEND_XSTAT(s, t) \
3381 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3382 old_xclient->s = xclient->s; \
3383 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3386 /* minuend -= subtrahend */
3387 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3389 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3392 /* minuend[hi:lo] -= subtrahend */
3393 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3395 SUB_64(m_hi, 0, m_lo, s); \
3398 #define SUB_EXTEND_USTAT(s, t) \
3400 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3401 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3405 * General service functions
3408 static inline long bnx2x_hilo(u32 *hiref)
3410 u32 lo = *(hiref + 1);
3411 #if (BITS_PER_LONG == 64)
3414 return HILO_U64(hi, lo);
3421 * Init service functions
3424 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3426 if (!bp->stats_pending) {
3427 struct eth_query_ramrod_data ramrod_data = {0};
3430 ramrod_data.drv_counter = bp->stats_counter++;
3431 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3432 for_each_queue(bp, i)
3433 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3435 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3436 ((u32 *)&ramrod_data)[1],
3437 ((u32 *)&ramrod_data)[0], 0);
3439 /* stats ramrod has it's own slot on the spq */
3441 bp->stats_pending = 1;
3446 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3448 struct dmae_command *dmae = &bp->stats_dmae;
3449 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3451 *stats_comp = DMAE_COMP_VAL;
3452 if (CHIP_REV_IS_SLOW(bp))
3456 if (bp->executer_idx) {
3457 int loader_idx = PMF_DMAE_C(bp);
3459 memset(dmae, 0, sizeof(struct dmae_command));
3461 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3462 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3463 DMAE_CMD_DST_RESET |
3465 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3467 DMAE_CMD_ENDIANITY_DW_SWAP |
3469 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3471 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3472 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3473 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3474 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3475 sizeof(struct dmae_command) *
3476 (loader_idx + 1)) >> 2;
3477 dmae->dst_addr_hi = 0;
3478 dmae->len = sizeof(struct dmae_command) >> 2;
3481 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3482 dmae->comp_addr_hi = 0;
3486 bnx2x_post_dmae(bp, dmae, loader_idx);
3488 } else if (bp->func_stx) {
3490 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3494 static int bnx2x_stats_comp(struct bnx2x *bp)
3496 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3500 while (*stats_comp != DMAE_COMP_VAL) {
3502 BNX2X_ERR("timeout waiting for stats finished\n");
3512 * Statistics service functions
3515 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3517 struct dmae_command *dmae;
3519 int loader_idx = PMF_DMAE_C(bp);
3520 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3523 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3524 BNX2X_ERR("BUG!\n");
3528 bp->executer_idx = 0;
3530 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3532 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3534 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3536 DMAE_CMD_ENDIANITY_DW_SWAP |
3538 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3539 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3541 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3542 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3543 dmae->src_addr_lo = bp->port.port_stx >> 2;
3544 dmae->src_addr_hi = 0;
3545 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3546 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3547 dmae->len = DMAE_LEN32_RD_MAX;
3548 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3549 dmae->comp_addr_hi = 0;
3552 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3553 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3554 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3555 dmae->src_addr_hi = 0;
3556 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3557 DMAE_LEN32_RD_MAX * 4);
3558 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3559 DMAE_LEN32_RD_MAX * 4);
3560 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3561 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3562 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3563 dmae->comp_val = DMAE_COMP_VAL;
3566 bnx2x_hw_stats_post(bp);
3567 bnx2x_stats_comp(bp);
3570 static void bnx2x_port_stats_init(struct bnx2x *bp)
3572 struct dmae_command *dmae;
3573 int port = BP_PORT(bp);
3574 int vn = BP_E1HVN(bp);
3576 int loader_idx = PMF_DMAE_C(bp);
3578 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3581 if (!bp->link_vars.link_up || !bp->port.pmf) {
3582 BNX2X_ERR("BUG!\n");
3586 bp->executer_idx = 0;
3589 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3590 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3591 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3593 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3595 DMAE_CMD_ENDIANITY_DW_SWAP |
3597 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3598 (vn << DMAE_CMD_E1HVN_SHIFT));
3600 if (bp->port.port_stx) {
3602 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3603 dmae->opcode = opcode;
3604 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3605 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3606 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3607 dmae->dst_addr_hi = 0;
3608 dmae->len = sizeof(struct host_port_stats) >> 2;
3609 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3610 dmae->comp_addr_hi = 0;
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3619 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3620 dmae->dst_addr_lo = bp->func_stx >> 2;
3621 dmae->dst_addr_hi = 0;
3622 dmae->len = sizeof(struct host_func_stats) >> 2;
3623 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624 dmae->comp_addr_hi = 0;
3629 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3630 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3631 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3633 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3635 DMAE_CMD_ENDIANITY_DW_SWAP |
3637 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3638 (vn << DMAE_CMD_E1HVN_SHIFT));
3640 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3642 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3643 NIG_REG_INGRESS_BMAC0_MEM);
3645 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3646 BIGMAC_REGISTER_TX_STAT_GTBYT */
3647 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648 dmae->opcode = opcode;
3649 dmae->src_addr_lo = (mac_addr +
3650 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3651 dmae->src_addr_hi = 0;
3652 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3655 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3660 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3661 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3662 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3663 dmae->opcode = opcode;
3664 dmae->src_addr_lo = (mac_addr +
3665 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666 dmae->src_addr_hi = 0;
3667 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3668 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3669 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3670 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3671 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3672 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3677 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3679 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3681 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3682 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3683 dmae->opcode = opcode;
3684 dmae->src_addr_lo = (mac_addr +
3685 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3686 dmae->src_addr_hi = 0;
3687 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3688 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3689 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3690 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3691 dmae->comp_addr_hi = 0;
3694 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3695 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696 dmae->opcode = opcode;
3697 dmae->src_addr_lo = (mac_addr +
3698 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3701 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3703 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3705 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706 dmae->comp_addr_hi = 0;
3709 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3710 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711 dmae->opcode = opcode;
3712 dmae->src_addr_lo = (mac_addr +
3713 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3714 dmae->src_addr_hi = 0;
3715 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3716 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3717 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3718 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3719 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3720 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721 dmae->comp_addr_hi = 0;
3726 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3727 dmae->opcode = opcode;
3728 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3729 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3730 dmae->src_addr_hi = 0;
3731 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3732 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3733 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3734 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735 dmae->comp_addr_hi = 0;
3738 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3739 dmae->opcode = opcode;
3740 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3741 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3742 dmae->src_addr_hi = 0;
3743 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3744 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3745 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3746 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3747 dmae->len = (2*sizeof(u32)) >> 2;
3748 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749 dmae->comp_addr_hi = 0;
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3754 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3755 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3757 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3759 DMAE_CMD_ENDIANITY_DW_SWAP |
3761 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3762 (vn << DMAE_CMD_E1HVN_SHIFT));
3763 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3764 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3765 dmae->src_addr_hi = 0;
3766 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3767 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3768 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3769 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3770 dmae->len = (2*sizeof(u32)) >> 2;
3771 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3772 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3773 dmae->comp_val = DMAE_COMP_VAL;
3778 static void bnx2x_func_stats_init(struct bnx2x *bp)
3780 struct dmae_command *dmae = &bp->stats_dmae;
3781 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3784 if (!bp->func_stx) {
3785 BNX2X_ERR("BUG!\n");
3789 bp->executer_idx = 0;
3790 memset(dmae, 0, sizeof(struct dmae_command));
3792 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3793 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3794 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3796 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3798 DMAE_CMD_ENDIANITY_DW_SWAP |
3800 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3801 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3802 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3803 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3804 dmae->dst_addr_lo = bp->func_stx >> 2;
3805 dmae->dst_addr_hi = 0;
3806 dmae->len = sizeof(struct host_func_stats) >> 2;
3807 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3808 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3809 dmae->comp_val = DMAE_COMP_VAL;
3814 static void bnx2x_stats_start(struct bnx2x *bp)
3817 bnx2x_port_stats_init(bp);
3819 else if (bp->func_stx)
3820 bnx2x_func_stats_init(bp);
3822 bnx2x_hw_stats_post(bp);
3823 bnx2x_storm_stats_post(bp);
3826 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3828 bnx2x_stats_comp(bp);
3829 bnx2x_stats_pmf_update(bp);
3830 bnx2x_stats_start(bp);
3833 static void bnx2x_stats_restart(struct bnx2x *bp)
3835 bnx2x_stats_comp(bp);
3836 bnx2x_stats_start(bp);
3839 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3841 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3842 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3843 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3849 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3850 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3851 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3852 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3853 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3854 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3855 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3856 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3857 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3858 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3859 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3860 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3861 UPDATE_STAT64(tx_stat_gt127,
3862 tx_stat_etherstatspkts65octetsto127octets);
3863 UPDATE_STAT64(tx_stat_gt255,
3864 tx_stat_etherstatspkts128octetsto255octets);
3865 UPDATE_STAT64(tx_stat_gt511,
3866 tx_stat_etherstatspkts256octetsto511octets);
3867 UPDATE_STAT64(tx_stat_gt1023,
3868 tx_stat_etherstatspkts512octetsto1023octets);
3869 UPDATE_STAT64(tx_stat_gt1518,
3870 tx_stat_etherstatspkts1024octetsto1522octets);
3871 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3872 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3873 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3874 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3875 UPDATE_STAT64(tx_stat_gterr,
3876 tx_stat_dot3statsinternalmactransmiterrors);
3877 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3879 estats->pause_frames_received_hi =
3880 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3881 estats->pause_frames_received_lo =
3882 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3884 estats->pause_frames_sent_hi =
3885 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3886 estats->pause_frames_sent_lo =
3887 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3890 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3892 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3893 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3894 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3897 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3898 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3900 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3901 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3902 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3903 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3904 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3905 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3906 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3907 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3908 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3909 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3910 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3911 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3912 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3914 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3918 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3926 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3928 estats->pause_frames_received_hi =
3929 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3930 estats->pause_frames_received_lo =
3931 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3932 ADD_64(estats->pause_frames_received_hi,
3933 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3934 estats->pause_frames_received_lo,
3935 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3937 estats->pause_frames_sent_hi =
3938 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3939 estats->pause_frames_sent_lo =
3940 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3941 ADD_64(estats->pause_frames_sent_hi,
3942 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3943 estats->pause_frames_sent_lo,
3944 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3947 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3949 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3950 struct nig_stats *old = &(bp->port.old_nig_stats);
3951 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3952 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3959 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3960 bnx2x_bmac_stats_update(bp);
3962 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3963 bnx2x_emac_stats_update(bp);
3965 else { /* unreached */
3966 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3970 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3971 new->brb_discard - old->brb_discard);
3972 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3973 new->brb_truncate - old->brb_truncate);
3975 UPDATE_STAT64_NIG(egress_mac_pkt0,
3976 etherstatspkts1024octetsto1522octets);
3977 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3979 memcpy(old, new, sizeof(struct nig_stats));
3981 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3982 sizeof(struct mac_stx));
3983 estats->brb_drop_hi = pstats->brb_drop_hi;
3984 estats->brb_drop_lo = pstats->brb_drop_lo;
3986 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3988 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3989 if (nig_timer_max != estats->nig_timer_max) {
3990 estats->nig_timer_max = nig_timer_max;
3991 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3997 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3999 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4000 struct tstorm_per_port_stats *tport =
4001 &stats->tstorm_common.port_statistics;
4002 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4003 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4006 memcpy(&(fstats->total_bytes_received_hi),
4007 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4008 sizeof(struct host_func_stats) - 2*sizeof(u32));
4009 estats->error_bytes_received_hi = 0;
4010 estats->error_bytes_received_lo = 0;
4011 estats->etherstatsoverrsizepkts_hi = 0;
4012 estats->etherstatsoverrsizepkts_lo = 0;
4013 estats->no_buff_discard_hi = 0;
4014 estats->no_buff_discard_lo = 0;
4016 for_each_rx_queue(bp, i) {
4017 struct bnx2x_fastpath *fp = &bp->fp[i];
4018 int cl_id = fp->cl_id;
4019 struct tstorm_per_client_stats *tclient =
4020 &stats->tstorm_common.client_statistics[cl_id];
4021 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4022 struct ustorm_per_client_stats *uclient =
4023 &stats->ustorm_common.client_statistics[cl_id];
4024 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4025 struct xstorm_per_client_stats *xclient =
4026 &stats->xstorm_common.client_statistics[cl_id];
4027 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4028 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4031 /* are storm stats valid? */
4032 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4033 bp->stats_counter) {
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4035 " xstorm counter (%d) != stats_counter (%d)\n",
4036 i, xclient->stats_counter, bp->stats_counter);
4039 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4040 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4042 " tstorm counter (%d) != stats_counter (%d)\n",
4043 i, tclient->stats_counter, bp->stats_counter);
4046 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4047 bp->stats_counter) {
4048 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4049 " ustorm counter (%d) != stats_counter (%d)\n",
4050 i, uclient->stats_counter, bp->stats_counter);
4054 qstats->total_bytes_received_hi =
4055 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4056 qstats->total_bytes_received_lo =
4057 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4059 ADD_64(qstats->total_bytes_received_hi,
4060 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4061 qstats->total_bytes_received_lo,
4062 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4064 ADD_64(qstats->total_bytes_received_hi,
4065 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4066 qstats->total_bytes_received_lo,
4067 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4069 qstats->valid_bytes_received_hi =
4070 qstats->total_bytes_received_hi;
4071 qstats->valid_bytes_received_lo =
4072 qstats->total_bytes_received_lo;
4074 qstats->error_bytes_received_hi =
4075 le32_to_cpu(tclient->rcv_error_bytes.hi);
4076 qstats->error_bytes_received_lo =
4077 le32_to_cpu(tclient->rcv_error_bytes.lo);
4079 ADD_64(qstats->total_bytes_received_hi,
4080 qstats->error_bytes_received_hi,
4081 qstats->total_bytes_received_lo,
4082 qstats->error_bytes_received_lo);
4084 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4085 total_unicast_packets_received);
4086 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4087 total_multicast_packets_received);
4088 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4089 total_broadcast_packets_received);
4090 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4091 etherstatsoverrsizepkts);
4092 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4094 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4095 total_unicast_packets_received);
4096 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4097 total_multicast_packets_received);
4098 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4099 total_broadcast_packets_received);
4100 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4101 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4102 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4104 qstats->total_bytes_transmitted_hi =
4105 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4106 qstats->total_bytes_transmitted_lo =
4107 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4109 ADD_64(qstats->total_bytes_transmitted_hi,
4110 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4111 qstats->total_bytes_transmitted_lo,
4112 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4114 ADD_64(qstats->total_bytes_transmitted_hi,
4115 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4116 qstats->total_bytes_transmitted_lo,
4117 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4119 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4120 total_unicast_packets_transmitted);
4121 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4122 total_multicast_packets_transmitted);
4123 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4124 total_broadcast_packets_transmitted);
4126 old_tclient->checksum_discard = tclient->checksum_discard;
4127 old_tclient->ttl0_discard = tclient->ttl0_discard;
4129 ADD_64(fstats->total_bytes_received_hi,
4130 qstats->total_bytes_received_hi,
4131 fstats->total_bytes_received_lo,
4132 qstats->total_bytes_received_lo);
4133 ADD_64(fstats->total_bytes_transmitted_hi,
4134 qstats->total_bytes_transmitted_hi,
4135 fstats->total_bytes_transmitted_lo,
4136 qstats->total_bytes_transmitted_lo);
4137 ADD_64(fstats->total_unicast_packets_received_hi,
4138 qstats->total_unicast_packets_received_hi,
4139 fstats->total_unicast_packets_received_lo,
4140 qstats->total_unicast_packets_received_lo);
4141 ADD_64(fstats->total_multicast_packets_received_hi,
4142 qstats->total_multicast_packets_received_hi,
4143 fstats->total_multicast_packets_received_lo,
4144 qstats->total_multicast_packets_received_lo);
4145 ADD_64(fstats->total_broadcast_packets_received_hi,
4146 qstats->total_broadcast_packets_received_hi,
4147 fstats->total_broadcast_packets_received_lo,
4148 qstats->total_broadcast_packets_received_lo);
4149 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4150 qstats->total_unicast_packets_transmitted_hi,
4151 fstats->total_unicast_packets_transmitted_lo,
4152 qstats->total_unicast_packets_transmitted_lo);
4153 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4154 qstats->total_multicast_packets_transmitted_hi,
4155 fstats->total_multicast_packets_transmitted_lo,
4156 qstats->total_multicast_packets_transmitted_lo);
4157 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4158 qstats->total_broadcast_packets_transmitted_hi,
4159 fstats->total_broadcast_packets_transmitted_lo,
4160 qstats->total_broadcast_packets_transmitted_lo);
4161 ADD_64(fstats->valid_bytes_received_hi,
4162 qstats->valid_bytes_received_hi,
4163 fstats->valid_bytes_received_lo,
4164 qstats->valid_bytes_received_lo);
4166 ADD_64(estats->error_bytes_received_hi,
4167 qstats->error_bytes_received_hi,
4168 estats->error_bytes_received_lo,
4169 qstats->error_bytes_received_lo);
4170 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171 qstats->etherstatsoverrsizepkts_hi,
4172 estats->etherstatsoverrsizepkts_lo,
4173 qstats->etherstatsoverrsizepkts_lo);
4174 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4175 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4178 ADD_64(fstats->total_bytes_received_hi,
4179 estats->rx_stat_ifhcinbadoctets_hi,
4180 fstats->total_bytes_received_lo,
4181 estats->rx_stat_ifhcinbadoctets_lo);
4183 memcpy(estats, &(fstats->total_bytes_received_hi),
4184 sizeof(struct host_func_stats) - 2*sizeof(u32));
4186 ADD_64(estats->etherstatsoverrsizepkts_hi,
4187 estats->rx_stat_dot3statsframestoolong_hi,
4188 estats->etherstatsoverrsizepkts_lo,
4189 estats->rx_stat_dot3statsframestoolong_lo);
4190 ADD_64(estats->error_bytes_received_hi,
4191 estats->rx_stat_ifhcinbadoctets_hi,
4192 estats->error_bytes_received_lo,
4193 estats->rx_stat_ifhcinbadoctets_lo);
4196 estats->mac_filter_discard =
4197 le32_to_cpu(tport->mac_filter_discard);
4198 estats->xxoverflow_discard =
4199 le32_to_cpu(tport->xxoverflow_discard);
4200 estats->brb_truncate_discard =
4201 le32_to_cpu(tport->brb_truncate_discard);
4202 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4205 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4207 bp->stats_pending = 0;
4212 static void bnx2x_net_stats_update(struct bnx2x *bp)
4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215 struct net_device_stats *nstats = &bp->dev->stats;
4218 nstats->rx_packets =
4219 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4220 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4221 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4223 nstats->tx_packets =
4224 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4225 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4226 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4228 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4230 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4232 nstats->rx_dropped = estats->mac_discard;
4233 for_each_rx_queue(bp, i)
4234 nstats->rx_dropped +=
4235 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4237 nstats->tx_dropped = 0;
4240 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4242 nstats->collisions =
4243 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4245 nstats->rx_length_errors =
4246 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4247 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4248 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4249 bnx2x_hilo(&estats->brb_truncate_hi);
4250 nstats->rx_crc_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4252 nstats->rx_frame_errors =
4253 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4254 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4255 nstats->rx_missed_errors = estats->xxoverflow_discard;
4257 nstats->rx_errors = nstats->rx_length_errors +
4258 nstats->rx_over_errors +
4259 nstats->rx_crc_errors +
4260 nstats->rx_frame_errors +
4261 nstats->rx_fifo_errors +
4262 nstats->rx_missed_errors;
4264 nstats->tx_aborted_errors =
4265 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4266 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4267 nstats->tx_carrier_errors =
4268 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4269 nstats->tx_fifo_errors = 0;
4270 nstats->tx_heartbeat_errors = 0;
4271 nstats->tx_window_errors = 0;
4273 nstats->tx_errors = nstats->tx_aborted_errors +
4274 nstats->tx_carrier_errors +
4275 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4278 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4280 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4283 estats->driver_xoff = 0;
4284 estats->rx_err_discard_pkt = 0;
4285 estats->rx_skb_alloc_failed = 0;
4286 estats->hw_csum_err = 0;
4287 for_each_rx_queue(bp, i) {
4288 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4290 estats->driver_xoff += qstats->driver_xoff;
4291 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4292 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4293 estats->hw_csum_err += qstats->hw_csum_err;
4297 static void bnx2x_stats_update(struct bnx2x *bp)
4299 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4301 if (*stats_comp != DMAE_COMP_VAL)
4305 bnx2x_hw_stats_update(bp);
4307 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4308 BNX2X_ERR("storm stats were not updated for 3 times\n");
4313 bnx2x_net_stats_update(bp);
4314 bnx2x_drv_stats_update(bp);
4316 if (bp->msglevel & NETIF_MSG_TIMER) {
4317 struct bnx2x_fastpath *fp0_rx = bp->fp;
4318 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4319 struct tstorm_per_client_stats *old_tclient =
4320 &bp->fp->old_tclient;
4321 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4322 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4323 struct net_device_stats *nstats = &bp->dev->stats;
4326 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4327 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4329 bnx2x_tx_avail(fp0_tx),
4330 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4331 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4333 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4334 fp0_rx->rx_comp_cons),
4335 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4336 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4337 "brb truncate %u\n",
4338 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4339 qstats->driver_xoff,
4340 estats->brb_drop_lo, estats->brb_truncate_lo);
4341 printk(KERN_DEBUG "tstats: checksum_discard %u "
4342 "packets_too_big_discard %lu no_buff_discard %lu "
4343 "mac_discard %u mac_filter_discard %u "
4344 "xxovrflow_discard %u brb_truncate_discard %u "
4345 "ttl0_discard %u\n",
4346 le32_to_cpu(old_tclient->checksum_discard),
4347 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4348 bnx2x_hilo(&qstats->no_buff_discard_hi),
4349 estats->mac_discard, estats->mac_filter_discard,
4350 estats->xxoverflow_discard, estats->brb_truncate_discard,
4351 le32_to_cpu(old_tclient->ttl0_discard));
4353 for_each_queue(bp, i) {
4354 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4355 bnx2x_fp(bp, i, tx_pkt),
4356 bnx2x_fp(bp, i, rx_pkt),
4357 bnx2x_fp(bp, i, rx_calls));
4361 bnx2x_hw_stats_post(bp);
4362 bnx2x_storm_stats_post(bp);
4365 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4367 struct dmae_command *dmae;
4369 int loader_idx = PMF_DMAE_C(bp);
4370 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4372 bp->executer_idx = 0;
4374 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4376 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4378 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4380 DMAE_CMD_ENDIANITY_DW_SWAP |
4382 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4383 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4385 if (bp->port.port_stx) {
4387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4389 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4391 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4392 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4393 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4394 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4395 dmae->dst_addr_hi = 0;
4396 dmae->len = sizeof(struct host_port_stats) >> 2;
4398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4399 dmae->comp_addr_hi = 0;
4402 dmae->comp_addr_lo =
4403 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4404 dmae->comp_addr_hi =
4405 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4415 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4416 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4417 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4418 dmae->dst_addr_lo = bp->func_stx >> 2;
4419 dmae->dst_addr_hi = 0;
4420 dmae->len = sizeof(struct host_func_stats) >> 2;
4421 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4422 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4423 dmae->comp_val = DMAE_COMP_VAL;
4429 static void bnx2x_stats_stop(struct bnx2x *bp)
4433 bnx2x_stats_comp(bp);
4436 update = (bnx2x_hw_stats_update(bp) == 0);
4438 update |= (bnx2x_storm_stats_update(bp) == 0);
4441 bnx2x_net_stats_update(bp);
4444 bnx2x_port_stats_stop(bp);
4446 bnx2x_hw_stats_post(bp);
4447 bnx2x_stats_comp(bp);
4451 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4455 static const struct {
4456 void (*action)(struct bnx2x *bp);
4457 enum bnx2x_stats_state next_state;
4458 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4461 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4462 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4463 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4464 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4467 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4468 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4469 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4470 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4474 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4476 enum bnx2x_stats_state state = bp->stats_state;
4478 bnx2x_stats_stm[state][event].action(bp);
4479 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4481 /* Make sure the state has been "changed" */
4484 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4485 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4486 state, event, bp->stats_state);
4489 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4491 struct dmae_command *dmae;
4492 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4495 if (!bp->port.pmf || !bp->port.port_stx) {
4496 BNX2X_ERR("BUG!\n");
4500 bp->executer_idx = 0;
4502 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4503 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4504 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4505 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4507 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4509 DMAE_CMD_ENDIANITY_DW_SWAP |
4511 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4512 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4513 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4514 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4515 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4516 dmae->dst_addr_hi = 0;
4517 dmae->len = sizeof(struct host_port_stats) >> 2;
4518 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4519 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4520 dmae->comp_val = DMAE_COMP_VAL;
4523 bnx2x_hw_stats_post(bp);
4524 bnx2x_stats_comp(bp);
4527 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4529 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4530 int port = BP_PORT(bp);
4535 if (!bp->port.pmf || !bp->func_stx) {
4536 BNX2X_ERR("BUG!\n");
4540 /* save our func_stx */
4541 func_stx = bp->func_stx;
4543 for (vn = VN_0; vn < vn_max; vn++) {
4546 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4547 bnx2x_func_stats_init(bp);
4548 bnx2x_hw_stats_post(bp);
4549 bnx2x_stats_comp(bp);
4552 /* restore our func_stx */
4553 bp->func_stx = func_stx;
4556 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4558 struct dmae_command *dmae = &bp->stats_dmae;
4559 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4562 if (!bp->func_stx) {
4563 BNX2X_ERR("BUG!\n");
4567 bp->executer_idx = 0;
4568 memset(dmae, 0, sizeof(struct dmae_command));
4570 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4571 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4572 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4574 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4576 DMAE_CMD_ENDIANITY_DW_SWAP |
4578 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4579 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4580 dmae->src_addr_lo = bp->func_stx >> 2;
4581 dmae->src_addr_hi = 0;
4582 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4583 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4584 dmae->len = sizeof(struct host_func_stats) >> 2;
4585 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4586 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4587 dmae->comp_val = DMAE_COMP_VAL;
4590 bnx2x_hw_stats_post(bp);
4591 bnx2x_stats_comp(bp);
4594 static void bnx2x_stats_init(struct bnx2x *bp)
4596 int port = BP_PORT(bp);
4597 int func = BP_FUNC(bp);
4600 bp->stats_pending = 0;
4601 bp->executer_idx = 0;
4602 bp->stats_counter = 0;
4604 /* port and func stats for management */
4605 if (!BP_NOMCP(bp)) {
4606 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4607 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4610 bp->port.port_stx = 0;
4613 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4614 bp->port.port_stx, bp->func_stx);
4617 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4618 bp->port.old_nig_stats.brb_discard =
4619 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4620 bp->port.old_nig_stats.brb_truncate =
4621 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4622 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4623 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4624 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4625 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4627 /* function stats */
4628 for_each_queue(bp, i) {
4629 struct bnx2x_fastpath *fp = &bp->fp[i];
4631 memset(&fp->old_tclient, 0,
4632 sizeof(struct tstorm_per_client_stats));
4633 memset(&fp->old_uclient, 0,
4634 sizeof(struct ustorm_per_client_stats));
4635 memset(&fp->old_xclient, 0,
4636 sizeof(struct xstorm_per_client_stats));
4637 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4640 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4641 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4643 bp->stats_state = STATS_STATE_DISABLED;
4646 if (bp->port.port_stx)
4647 bnx2x_port_stats_base_init(bp);
4650 bnx2x_func_stats_base_init(bp);
4652 } else if (bp->func_stx)
4653 bnx2x_func_stats_base_update(bp);
4656 static void bnx2x_timer(unsigned long data)
4658 struct bnx2x *bp = (struct bnx2x *) data;
4660 if (!netif_running(bp->dev))
4663 if (atomic_read(&bp->intr_sem) != 0)
4667 struct bnx2x_fastpath *fp = &bp->fp[0];
4671 rc = bnx2x_rx_int(fp, 1000);
4674 if (!BP_NOMCP(bp)) {
4675 int func = BP_FUNC(bp);
4679 ++bp->fw_drv_pulse_wr_seq;
4680 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4681 /* TBD - add SYSTEM_TIME */
4682 drv_pulse = bp->fw_drv_pulse_wr_seq;
4683 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4685 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4686 MCP_PULSE_SEQ_MASK);
4687 /* The delta between driver pulse and mcp response
4688 * should be 1 (before mcp response) or 0 (after mcp response)
4690 if ((drv_pulse != mcp_pulse) &&
4691 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4692 /* someone lost a heartbeat... */
4693 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4694 drv_pulse, mcp_pulse);
4698 if ((bp->state == BNX2X_STATE_OPEN) ||
4699 (bp->state == BNX2X_STATE_DISABLED))
4700 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4703 mod_timer(&bp->timer, jiffies + bp->current_interval);
4706 /* end of Statistics */
4711 * nic init service functions
4714 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4716 int port = BP_PORT(bp);
4719 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4720 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4721 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4722 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4723 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4724 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4727 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4728 dma_addr_t mapping, int sb_id)
4730 int port = BP_PORT(bp);
4731 int func = BP_FUNC(bp);
4736 section = ((u64)mapping) + offsetof(struct host_status_block,
4738 sb->u_status_block.status_block_id = sb_id;
4740 REG_WR(bp, BAR_CSTRORM_INTMEM +
4741 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4742 REG_WR(bp, BAR_CSTRORM_INTMEM +
4743 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4746 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4748 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4753 section = ((u64)mapping) + offsetof(struct host_status_block,
4755 sb->c_status_block.status_block_id = sb_id;
4757 REG_WR(bp, BAR_CSTRORM_INTMEM +
4758 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4759 REG_WR(bp, BAR_CSTRORM_INTMEM +
4760 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4762 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4763 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4765 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4766 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4767 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4769 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4772 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4774 int func = BP_FUNC(bp);
4776 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4777 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4778 sizeof(struct tstorm_def_status_block)/4);
4779 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4780 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4781 sizeof(struct cstorm_def_status_block_u)/4);
4782 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4783 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4784 sizeof(struct cstorm_def_status_block_c)/4);
4785 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4786 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4787 sizeof(struct xstorm_def_status_block)/4);
4790 static void bnx2x_init_def_sb(struct bnx2x *bp,
4791 struct host_def_status_block *def_sb,
4792 dma_addr_t mapping, int sb_id)
4794 int port = BP_PORT(bp);
4795 int func = BP_FUNC(bp);
4796 int index, val, reg_offset;
4800 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4801 atten_status_block);
4802 def_sb->atten_status_block.status_block_id = sb_id;
4806 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4807 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4809 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4810 bp->attn_group[index].sig[0] = REG_RD(bp,
4811 reg_offset + 0x10*index);
4812 bp->attn_group[index].sig[1] = REG_RD(bp,
4813 reg_offset + 0x4 + 0x10*index);
4814 bp->attn_group[index].sig[2] = REG_RD(bp,
4815 reg_offset + 0x8 + 0x10*index);
4816 bp->attn_group[index].sig[3] = REG_RD(bp,
4817 reg_offset + 0xc + 0x10*index);
4820 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4821 HC_REG_ATTN_MSG0_ADDR_L);
4823 REG_WR(bp, reg_offset, U64_LO(section));
4824 REG_WR(bp, reg_offset + 4, U64_HI(section));
4826 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4828 val = REG_RD(bp, reg_offset);
4830 REG_WR(bp, reg_offset, val);
4833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834 u_def_status_block);
4835 def_sb->u_def_status_block.status_block_id = sb_id;
4837 REG_WR(bp, BAR_CSTRORM_INTMEM +
4838 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4839 REG_WR(bp, BAR_CSTRORM_INTMEM +
4840 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4845 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851 c_def_status_block);
4852 def_sb->c_def_status_block.status_block_id = sb_id;
4854 REG_WR(bp, BAR_CSTRORM_INTMEM +
4855 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4856 REG_WR(bp, BAR_CSTRORM_INTMEM +
4857 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4859 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4860 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4862 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4863 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4864 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868 t_def_status_block);
4869 def_sb->t_def_status_block.status_block_id = sb_id;
4871 REG_WR(bp, BAR_TSTRORM_INTMEM +
4872 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
4874 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4876 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4877 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4879 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4880 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4881 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4884 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4885 x_def_status_block);
4886 def_sb->x_def_status_block.status_block_id = sb_id;
4888 REG_WR(bp, BAR_XSTRORM_INTMEM +
4889 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4890 REG_WR(bp, BAR_XSTRORM_INTMEM +
4891 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4893 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4894 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4896 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4897 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4898 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4900 bp->stats_pending = 0;
4901 bp->set_mac_pending = 0;
4903 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4906 static void bnx2x_update_coalesce(struct bnx2x *bp)
4908 int port = BP_PORT(bp);
4911 for_each_queue(bp, i) {
4912 int sb_id = bp->fp[i].sb_id;
4914 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4915 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4916 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4917 U_SB_ETH_RX_CQ_INDEX),
4919 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4920 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4921 U_SB_ETH_RX_CQ_INDEX),
4922 (bp->rx_ticks/12) ? 0 : 1);
4924 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4925 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4926 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4927 C_SB_ETH_TX_CQ_INDEX),
4929 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4930 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4931 C_SB_ETH_TX_CQ_INDEX),
4932 (bp->tx_ticks/12) ? 0 : 1);
4936 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4937 struct bnx2x_fastpath *fp, int last)
4941 for (i = 0; i < last; i++) {
4942 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4943 struct sk_buff *skb = rx_buf->skb;
4946 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4950 if (fp->tpa_state[i] == BNX2X_TPA_START)
4951 pci_unmap_single(bp->pdev,
4952 pci_unmap_addr(rx_buf, mapping),
4953 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4960 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4962 int func = BP_FUNC(bp);
4963 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4964 ETH_MAX_AGGREGATION_QUEUES_E1H;
4965 u16 ring_prod, cqe_ring_prod;
4968 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4970 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4972 if (bp->flags & TPA_ENABLE_FLAG) {
4974 for_each_rx_queue(bp, j) {
4975 struct bnx2x_fastpath *fp = &bp->fp[j];
4977 for (i = 0; i < max_agg_queues; i++) {
4978 fp->tpa_pool[i].skb =
4979 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4980 if (!fp->tpa_pool[i].skb) {
4981 BNX2X_ERR("Failed to allocate TPA "
4982 "skb pool for queue[%d] - "
4983 "disabling TPA on this "
4985 bnx2x_free_tpa_pool(bp, fp, i);
4986 fp->disable_tpa = 1;
4989 pci_unmap_addr_set((struct sw_rx_bd *)
4990 &bp->fp->tpa_pool[i],
4992 fp->tpa_state[i] = BNX2X_TPA_STOP;
4997 for_each_rx_queue(bp, j) {
4998 struct bnx2x_fastpath *fp = &bp->fp[j];
5001 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5002 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5004 /* Mark queue as Rx */
5005 fp->is_rx_queue = 1;
5007 /* "next page" elements initialization */
5009 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5010 struct eth_rx_sge *sge;
5012 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5014 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5015 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5017 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5018 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5021 bnx2x_init_sge_ring_bit_mask(fp);
5024 for (i = 1; i <= NUM_RX_RINGS; i++) {
5025 struct eth_rx_bd *rx_bd;
5027 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5029 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5030 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5032 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5033 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5037 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5038 struct eth_rx_cqe_next_page *nextpg;
5040 nextpg = (struct eth_rx_cqe_next_page *)
5041 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5043 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5044 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5046 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5047 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5050 /* Allocate SGEs and initialize the ring elements */
5051 for (i = 0, ring_prod = 0;
5052 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5054 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5055 BNX2X_ERR("was only able to allocate "
5057 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5058 /* Cleanup already allocated elements */
5059 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5060 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5061 fp->disable_tpa = 1;
5065 ring_prod = NEXT_SGE_IDX(ring_prod);
5067 fp->rx_sge_prod = ring_prod;
5069 /* Allocate BDs and initialize BD ring */
5070 fp->rx_comp_cons = 0;
5071 cqe_ring_prod = ring_prod = 0;
5072 for (i = 0; i < bp->rx_ring_size; i++) {
5073 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5074 BNX2X_ERR("was only able to allocate "
5075 "%d rx skbs on queue[%d]\n", i, j);
5076 fp->eth_q_stats.rx_skb_alloc_failed++;
5079 ring_prod = NEXT_RX_IDX(ring_prod);
5080 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5081 WARN_ON(ring_prod <= i);
5084 fp->rx_bd_prod = ring_prod;
5085 /* must not have more available CQEs than BDs */
5086 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5088 fp->rx_pkt = fp->rx_calls = 0;
5091 * this will generate an interrupt (to the TSTORM)
5092 * must only be done after chip is initialized
5094 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5099 REG_WR(bp, BAR_USTRORM_INTMEM +
5100 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5101 U64_LO(fp->rx_comp_mapping));
5102 REG_WR(bp, BAR_USTRORM_INTMEM +
5103 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5104 U64_HI(fp->rx_comp_mapping));
5108 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5112 for_each_tx_queue(bp, j) {
5113 struct bnx2x_fastpath *fp = &bp->fp[j];
5115 for (i = 1; i <= NUM_TX_RINGS; i++) {
5116 struct eth_tx_next_bd *tx_next_bd =
5117 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5119 tx_next_bd->addr_hi =
5120 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5121 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5122 tx_next_bd->addr_lo =
5123 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5124 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5127 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5128 fp->tx_db.data.zero_fill1 = 0;
5129 fp->tx_db.data.prod = 0;
5131 fp->tx_pkt_prod = 0;
5132 fp->tx_pkt_cons = 0;
5135 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5139 /* clean tx statistics */
5140 for_each_rx_queue(bp, i)
5141 bnx2x_fp(bp, i, tx_pkt) = 0;
5144 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5146 int func = BP_FUNC(bp);
5148 spin_lock_init(&bp->spq_lock);
5150 bp->spq_left = MAX_SPQ_PENDING;
5151 bp->spq_prod_idx = 0;
5152 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5153 bp->spq_prod_bd = bp->spq;
5154 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5156 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5157 U64_LO(bp->spq_mapping));
5159 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5160 U64_HI(bp->spq_mapping));
5162 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5166 static void bnx2x_init_context(struct bnx2x *bp)
5170 for_each_rx_queue(bp, i) {
5171 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5172 struct bnx2x_fastpath *fp = &bp->fp[i];
5173 u8 cl_id = fp->cl_id;
5175 context->ustorm_st_context.common.sb_index_numbers =
5176 BNX2X_RX_SB_INDEX_NUM;
5177 context->ustorm_st_context.common.clientId = cl_id;
5178 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5179 context->ustorm_st_context.common.flags =
5180 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5182 context->ustorm_st_context.common.statistics_counter_id =
5184 context->ustorm_st_context.common.mc_alignment_log_size =
5185 BNX2X_RX_ALIGN_SHIFT;
5186 context->ustorm_st_context.common.bd_buff_size =
5188 context->ustorm_st_context.common.bd_page_base_hi =
5189 U64_HI(fp->rx_desc_mapping);
5190 context->ustorm_st_context.common.bd_page_base_lo =
5191 U64_LO(fp->rx_desc_mapping);
5192 if (!fp->disable_tpa) {
5193 context->ustorm_st_context.common.flags |=
5194 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5195 context->ustorm_st_context.common.sge_buff_size =
5196 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5198 context->ustorm_st_context.common.sge_page_base_hi =
5199 U64_HI(fp->rx_sge_mapping);
5200 context->ustorm_st_context.common.sge_page_base_lo =
5201 U64_LO(fp->rx_sge_mapping);
5203 context->ustorm_st_context.common.max_sges_for_packet =
5204 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5205 context->ustorm_st_context.common.max_sges_for_packet =
5206 ((context->ustorm_st_context.common.
5207 max_sges_for_packet + PAGES_PER_SGE - 1) &
5208 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5211 context->ustorm_ag_context.cdu_usage =
5212 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5213 CDU_REGION_NUMBER_UCM_AG,
5214 ETH_CONNECTION_TYPE);
5216 context->xstorm_ag_context.cdu_reserved =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_XCM_AG,
5219 ETH_CONNECTION_TYPE);
5222 for_each_tx_queue(bp, i) {
5223 struct bnx2x_fastpath *fp = &bp->fp[i];
5224 struct eth_context *context =
5225 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5227 context->cstorm_st_context.sb_index_number =
5228 C_SB_ETH_TX_CQ_INDEX;
5229 context->cstorm_st_context.status_block_id = fp->sb_id;
5231 context->xstorm_st_context.tx_bd_page_base_hi =
5232 U64_HI(fp->tx_desc_mapping);
5233 context->xstorm_st_context.tx_bd_page_base_lo =
5234 U64_LO(fp->tx_desc_mapping);
5235 context->xstorm_st_context.statistics_data = (fp->cl_id |
5236 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5240 static void bnx2x_init_ind_table(struct bnx2x *bp)
5242 int func = BP_FUNC(bp);
5245 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5249 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5250 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5251 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5252 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5253 bp->fp->cl_id + (i % bp->num_rx_queues));
5256 static void bnx2x_set_client_config(struct bnx2x *bp)
5258 struct tstorm_eth_client_config tstorm_client = {0};
5259 int port = BP_PORT(bp);
5262 tstorm_client.mtu = bp->dev->mtu;
5263 tstorm_client.config_flags =
5264 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5265 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5267 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5268 tstorm_client.config_flags |=
5269 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5270 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5274 for_each_queue(bp, i) {
5275 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5277 REG_WR(bp, BAR_TSTRORM_INTMEM +
5278 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5279 ((u32 *)&tstorm_client)[0]);
5280 REG_WR(bp, BAR_TSTRORM_INTMEM +
5281 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5282 ((u32 *)&tstorm_client)[1]);
5285 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5286 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5289 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5291 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5292 int mode = bp->rx_mode;
5293 int mask = bp->rx_mode_cl_mask;
5294 int func = BP_FUNC(bp);
5295 int port = BP_PORT(bp);
5297 /* All but management unicast packets should pass to the host as well */
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5302 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5304 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5307 case BNX2X_RX_MODE_NONE: /* no Rx */
5308 tstorm_mac_filter.ucast_drop_all = mask;
5309 tstorm_mac_filter.mcast_drop_all = mask;
5310 tstorm_mac_filter.bcast_drop_all = mask;
5313 case BNX2X_RX_MODE_NORMAL:
5314 tstorm_mac_filter.bcast_accept_all = mask;
5317 case BNX2X_RX_MODE_ALLMULTI:
5318 tstorm_mac_filter.mcast_accept_all = mask;
5319 tstorm_mac_filter.bcast_accept_all = mask;
5322 case BNX2X_RX_MODE_PROMISC:
5323 tstorm_mac_filter.ucast_accept_all = mask;
5324 tstorm_mac_filter.mcast_accept_all = mask;
5325 tstorm_mac_filter.bcast_accept_all = mask;
5326 /* pass management unicast packets as well */
5327 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5331 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5336 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5339 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5340 REG_WR(bp, BAR_TSTRORM_INTMEM +
5341 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5342 ((u32 *)&tstorm_mac_filter)[i]);
5344 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5345 ((u32 *)&tstorm_mac_filter)[i]); */
5348 if (mode != BNX2X_RX_MODE_NONE)
5349 bnx2x_set_client_config(bp);
5352 static void bnx2x_init_internal_common(struct bnx2x *bp)
5356 /* Zero this manually as its initialization is
5357 currently missing in the initTool */
5358 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5359 REG_WR(bp, BAR_USTRORM_INTMEM +
5360 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5363 static void bnx2x_init_internal_port(struct bnx2x *bp)
5365 int port = BP_PORT(bp);
5368 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5370 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5375 static void bnx2x_init_internal_func(struct bnx2x *bp)
5377 struct tstorm_eth_function_common_config tstorm_config = {0};
5378 struct stats_indication_flags stats_flags = {0};
5379 int port = BP_PORT(bp);
5380 int func = BP_FUNC(bp);
5386 tstorm_config.config_flags = MULTI_FLAGS(bp);
5387 tstorm_config.rss_result_mask = MULTI_MASK;
5390 /* Enable TPA if needed */
5391 if (bp->flags & TPA_ENABLE_FLAG)
5392 tstorm_config.config_flags |=
5393 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5396 tstorm_config.config_flags |=
5397 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5399 tstorm_config.leading_client_id = BP_L_ID(bp);
5401 REG_WR(bp, BAR_TSTRORM_INTMEM +
5402 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5403 (*(u32 *)&tstorm_config));
5405 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5406 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5407 bnx2x_set_storm_rx_mode(bp);
5409 for_each_queue(bp, i) {
5410 u8 cl_id = bp->fp[i].cl_id;
5412 /* reset xstorm per client statistics */
5413 offset = BAR_XSTRORM_INTMEM +
5414 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5416 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5417 REG_WR(bp, offset + j*4, 0);
5419 /* reset tstorm per client statistics */
5420 offset = BAR_TSTRORM_INTMEM +
5421 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5423 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5424 REG_WR(bp, offset + j*4, 0);
5426 /* reset ustorm per client statistics */
5427 offset = BAR_USTRORM_INTMEM +
5428 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5430 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5431 REG_WR(bp, offset + j*4, 0);
5434 /* Init statistics related context */
5435 stats_flags.collect_eth = 1;
5437 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5438 ((u32 *)&stats_flags)[0]);
5439 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5440 ((u32 *)&stats_flags)[1]);
5442 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5443 ((u32 *)&stats_flags)[0]);
5444 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5445 ((u32 *)&stats_flags)[1]);
5447 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5448 ((u32 *)&stats_flags)[0]);
5449 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5450 ((u32 *)&stats_flags)[1]);
5452 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5453 ((u32 *)&stats_flags)[0]);
5454 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5455 ((u32 *)&stats_flags)[1]);
5457 REG_WR(bp, BAR_XSTRORM_INTMEM +
5458 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5459 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5460 REG_WR(bp, BAR_XSTRORM_INTMEM +
5461 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5462 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5464 REG_WR(bp, BAR_TSTRORM_INTMEM +
5465 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5466 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5467 REG_WR(bp, BAR_TSTRORM_INTMEM +
5468 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5469 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5471 REG_WR(bp, BAR_USTRORM_INTMEM +
5472 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5473 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5474 REG_WR(bp, BAR_USTRORM_INTMEM +
5475 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5476 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5478 if (CHIP_IS_E1H(bp)) {
5479 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5481 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5483 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5485 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5488 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5492 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5494 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5495 SGE_PAGE_SIZE * PAGES_PER_SGE),
5497 for_each_rx_queue(bp, i) {
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
5500 REG_WR(bp, BAR_USTRORM_INTMEM +
5501 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5502 U64_LO(fp->rx_comp_mapping));
5503 REG_WR(bp, BAR_USTRORM_INTMEM +
5504 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5505 U64_HI(fp->rx_comp_mapping));
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
5509 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5510 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5511 REG_WR(bp, BAR_USTRORM_INTMEM +
5512 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5513 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5515 REG_WR16(bp, BAR_USTRORM_INTMEM +
5516 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5520 /* dropless flow control */
5521 if (CHIP_IS_E1H(bp)) {
5522 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5524 rx_pause.bd_thr_low = 250;
5525 rx_pause.cqe_thr_low = 250;
5527 rx_pause.sge_thr_low = 0;
5528 rx_pause.bd_thr_high = 350;
5529 rx_pause.cqe_thr_high = 350;
5530 rx_pause.sge_thr_high = 0;
5532 for_each_rx_queue(bp, i) {
5533 struct bnx2x_fastpath *fp = &bp->fp[i];
5535 if (!fp->disable_tpa) {
5536 rx_pause.sge_thr_low = 150;
5537 rx_pause.sge_thr_high = 250;
5541 offset = BAR_USTRORM_INTMEM +
5542 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5545 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5547 REG_WR(bp, offset + j*4,
5548 ((u32 *)&rx_pause)[j]);
5552 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5554 /* Init rate shaping and fairness contexts */
5558 /* During init there is no active link
5559 Until link is up, set link rate to 10Gbps */
5560 bp->link_vars.line_speed = SPEED_10000;
5561 bnx2x_init_port_minmax(bp);
5565 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5566 bnx2x_calc_vn_weight_sum(bp);
5568 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5569 bnx2x_init_vn_minmax(bp, 2*vn + port);
5571 /* Enable rate shaping and fairness */
5572 bp->cmng.flags.cmng_enables |=
5573 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5576 /* rate shaping and fairness are disabled */
5578 "single function mode minmax will be disabled\n");
5582 /* Store it to internal memory */
5584 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5585 REG_WR(bp, BAR_XSTRORM_INTMEM +
5586 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5587 ((u32 *)(&bp->cmng))[i]);
5590 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5592 switch (load_code) {
5593 case FW_MSG_CODE_DRV_LOAD_COMMON:
5594 bnx2x_init_internal_common(bp);
5597 case FW_MSG_CODE_DRV_LOAD_PORT:
5598 bnx2x_init_internal_port(bp);
5601 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5602 bnx2x_init_internal_func(bp);
5606 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5611 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5615 for_each_queue(bp, i) {
5616 struct bnx2x_fastpath *fp = &bp->fp[i];
5619 fp->state = BNX2X_FP_STATE_CLOSED;
5621 fp->cl_id = BP_L_ID(bp) + i;
5623 fp->sb_id = fp->cl_id + 1;
5625 fp->sb_id = fp->cl_id;
5627 /* Suitable Rx and Tx SBs are served by the same client */
5628 if (i >= bp->num_rx_queues)
5629 fp->cl_id -= bp->num_rx_queues;
5631 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5632 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5633 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5635 bnx2x_update_fpsb_idx(fp);
5638 /* ensure status block indices were read */
5642 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5644 bnx2x_update_dsb_idx(bp);
5645 bnx2x_update_coalesce(bp);
5646 bnx2x_init_rx_rings(bp);
5647 bnx2x_init_tx_ring(bp);
5648 bnx2x_init_sp_ring(bp);
5649 bnx2x_init_context(bp);
5650 bnx2x_init_internal(bp, load_code);
5651 bnx2x_init_ind_table(bp);
5652 bnx2x_stats_init(bp);
5654 /* At this point, we are ready for interrupts */
5655 atomic_set(&bp->intr_sem, 0);
5657 /* flush all before enabling interrupts */
5661 bnx2x_int_enable(bp);
5663 /* Check for SPIO5 */
5664 bnx2x_attn_int_deasserted0(bp,
5665 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5666 AEU_INPUTS_ATTN_BITS_SPIO5);
5669 /* end of nic init */
5672 * gzip service functions
5675 static int bnx2x_gunzip_init(struct bnx2x *bp)
5677 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5678 &bp->gunzip_mapping);
5679 if (bp->gunzip_buf == NULL)
5682 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5683 if (bp->strm == NULL)
5686 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5688 if (bp->strm->workspace == NULL)
5698 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5699 bp->gunzip_mapping);
5700 bp->gunzip_buf = NULL;
5703 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5704 " un-compression\n", bp->dev->name);
5708 static void bnx2x_gunzip_end(struct bnx2x *bp)
5710 kfree(bp->strm->workspace);
5715 if (bp->gunzip_buf) {
5716 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5717 bp->gunzip_mapping);
5718 bp->gunzip_buf = NULL;
5722 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5726 /* check gzip header */
5727 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5728 BNX2X_ERR("Bad gzip header\n");
5736 if (zbuf[3] & FNAME)
5737 while ((zbuf[n++] != 0) && (n < len));
5739 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5740 bp->strm->avail_in = len - n;
5741 bp->strm->next_out = bp->gunzip_buf;
5742 bp->strm->avail_out = FW_BUF_SIZE;
5744 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);