1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
985 fp->index, cid, command, bp->state,
986 rr_cqe->ramrod_cqe.ramrod_type);
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
996 fp->state = BNX2X_FP_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1002 fp->state = BNX2X_FP_STATE_HALTED;
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041 bp->set_mac_pending--;
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048 bp->set_mac_pending--;
1053 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1054 command, bp->state);
1057 mb(); /* force bnx2x_wait_ramrod() to see the change */
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct page *page = sw_buf->page;
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 /* Skip "next page" elements */
1071 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1075 sw_buf->page = NULL;
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, int last)
1085 for (i = 0; i < last; i++)
1086 bnx2x_free_rx_sge(bp, fp, i);
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090 struct bnx2x_fastpath *fp, u16 index)
1092 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1097 if (unlikely(page == NULL))
1100 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101 PCI_DMA_FROMDEVICE);
1102 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103 __free_pages(page, PAGES_PER_SGE_SHIFT);
1107 sw_buf->page = page;
1108 pci_unmap_addr_set(sw_buf, mapping, mapping);
1110 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117 struct bnx2x_fastpath *fp, u16 index)
1119 struct sk_buff *skb;
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1124 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125 if (unlikely(skb == NULL))
1128 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129 PCI_DMA_FROMDEVICE);
1130 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1136 pci_unmap_addr_set(rx_buf, mapping, mapping);
1138 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1144 /* note that we are not allocating a new skb,
1145 * we are just moving one from cons to prod
1146 * we are not creating a new mapping,
1147 * so there is no need to check for dma_mapping_error().
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150 struct sk_buff *skb, u16 cons, u16 prod)
1152 struct bnx2x *bp = fp->bp;
1153 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1158 pci_dma_sync_single_for_device(bp->pdev,
1159 pci_unmap_addr(cons_rx_buf, mapping),
1160 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1162 prod_rx_buf->skb = cons_rx_buf->skb;
1163 pci_unmap_addr_set(prod_rx_buf, mapping,
1164 pci_unmap_addr(cons_rx_buf, mapping));
1165 *prod_bd = *cons_bd;
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1171 u16 last_max = fp->last_max_sge;
1173 if (SUB_S16(idx, last_max) > 0)
1174 fp->last_max_sge = idx;
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1181 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182 int idx = RX_SGE_CNT * i - 1;
1184 for (j = 0; j < 2; j++) {
1185 SGE_MASK_CLEAR_BIT(fp, idx);
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192 struct eth_fast_path_rx_cqe *fp_cqe)
1194 struct bnx2x *bp = fp->bp;
1195 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196 le16_to_cpu(fp_cqe->len_on_bd)) >>
1198 u16 last_max, last_elem, first_elem;
1205 /* First mark all used pages */
1206 for (i = 0; i < sge_len; i++)
1207 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1209 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1212 /* Here we assume that the last SGE index is the biggest */
1213 prefetch((void *)(fp->sge_mask));
1214 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1216 last_max = RX_SGE(fp->last_max_sge);
1217 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1220 /* If ring is not full */
1221 if (last_elem + 1 != first_elem)
1224 /* Now update the prod */
1225 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226 if (likely(fp->sge_mask[i]))
1229 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230 delta += RX_SGE_MASK_ELEM_SZ;
1234 fp->rx_sge_prod += delta;
1235 /* clear page-end entries */
1236 bnx2x_clear_sge_mask_next_elems(fp);
1239 DP(NETIF_MSG_RX_STATUS,
1240 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1241 fp->last_max_sge, fp->rx_sge_prod);
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1246 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247 memset(fp->sge_mask, 0xff,
1248 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1250 /* Clear the two last indices in the page to 1:
1251 these are the indices that correspond to the "next" element,
1252 hence will never be indicated and should be removed from
1253 the calculations. */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258 struct sk_buff *skb, u16 cons, u16 prod)
1260 struct bnx2x *bp = fp->bp;
1261 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1266 /* move empty skb from pool to prod and map it */
1267 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1272 /* move partial skb from cons to pool (don't unmap yet) */
1273 fp->tpa_pool[queue] = *cons_rx_buf;
1275 /* mark bin state as start - print error if current state != stop */
1276 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1279 fp->tpa_state[queue] = BNX2X_TPA_START;
1281 /* point prod_bd to new skb */
1282 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1285 #ifdef BNX2X_STOP_ON_ERROR
1286 fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1290 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1292 fp->tpa_queue_used);
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 struct sk_buff *skb,
1298 struct eth_fast_path_rx_cqe *fp_cqe,
1301 struct sw_rx_page *rx_pg, old_rx_pg;
1302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303 u32 i, frag_len, frag_size, pages;
1307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1310 /* This is needed in order to enable forwarding support */
1312 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313 max(frag_size, (u32)len_on_bd));
1315 #ifdef BNX2X_STOP_ON_ERROR
1317 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1320 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1321 fp_cqe->pkt_len, len_on_bd);
1327 /* Run through the SGL and compose the fragmented skb */
1328 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1331 /* FW gives the indices of the SGE as if the ring is an array
1332 (meaning that "next" element will consume 2 indices) */
1333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334 rx_pg = &fp->rx_page_ring[sge_idx];
1337 /* If we fail to allocate a substitute page, we simply stop
1338 where we are and drop the whole packet */
1339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340 if (unlikely(err)) {
1341 fp->eth_q_stats.rx_skb_alloc_failed++;
1345 /* Unmap the page as we r going to pass it to the stack */
1346 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1349 /* Add one frag and update the appropriate fields in the skb */
1350 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1352 skb->data_len += frag_len;
1353 skb->truesize += frag_len;
1354 skb->len += frag_len;
1356 frag_size -= frag_len;
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1366 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367 struct sk_buff *skb = rx_buf->skb;
1369 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1371 /* Unmap skb in the pool anyway, as we are going to change
1372 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1374 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1377 if (likely(new_skb)) {
1378 /* fix ip xsum and give it to the stack */
1379 /* (no need to map the new skb) */
1382 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383 PARSING_FLAGS_VLAN);
1384 int is_not_hwaccel_vlan_cqe =
1385 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1389 prefetch(((char *)(skb)) + 128);
1391 #ifdef BNX2X_STOP_ON_ERROR
1392 if (pad + len > bp->rx_buf_size) {
1393 BNX2X_ERR("skb_put is about to fail... "
1394 "pad %d len %d rx_buf_size %d\n",
1395 pad, len, bp->rx_buf_size);
1401 skb_reserve(skb, pad);
1404 skb->protocol = eth_type_trans(skb, bp->dev);
1405 skb->ip_summed = CHECKSUM_UNNECESSARY;
1410 iph = (struct iphdr *)skb->data;
1412 /* If there is no Rx VLAN offloading -
1413 take VLAN tag into an account */
1414 if (unlikely(is_not_hwaccel_vlan_cqe))
1415 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1418 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1421 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422 &cqe->fast_path_cqe, cqe_idx)) {
1424 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425 (!is_not_hwaccel_vlan_cqe))
1426 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427 le16_to_cpu(cqe->fast_path_cqe.
1431 netif_receive_skb(skb);
1433 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434 " - dropping packet!\n");
1439 /* put new skb in bin */
1440 fp->tpa_pool[queue].skb = new_skb;
1443 /* else drop the packet and keep the buffer in the bin */
1444 DP(NETIF_MSG_RX_STATUS,
1445 "Failed to allocate new skb - dropping packet!\n");
1446 fp->eth_q_stats.rx_skb_alloc_failed++;
1449 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453 struct bnx2x_fastpath *fp,
1454 u16 bd_prod, u16 rx_comp_prod,
1457 struct ustorm_eth_rx_producers rx_prods = {0};
1460 /* Update producers */
1461 rx_prods.bd_prod = bd_prod;
1462 rx_prods.cqe_prod = rx_comp_prod;
1463 rx_prods.sge_prod = rx_sge_prod;
1466 * Make sure that the BD and SGE data is updated before updating the
1467 * producers since FW might read the BD/SGE right after the producer
1469 * This is only applicable for weak-ordered memory model archs such
1470 * as IA-64. The following barrier is also mandatory since FW will
1471 * assumes BDs must have buffers.
1475 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
1477 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478 ((u32 *)&rx_prods)[i]);
1480 mmiowb(); /* keep prod updates ordered */
1482 DP(NETIF_MSG_RX_STATUS,
1483 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1484 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1489 struct bnx2x *bp = fp->bp;
1490 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1494 #ifdef BNX2X_STOP_ON_ERROR
1495 if (unlikely(bp->panic))
1499 /* CQ "next element" is of the size of the regular element,
1500 that's why it's ok here */
1501 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1505 bd_cons = fp->rx_bd_cons;
1506 bd_prod = fp->rx_bd_prod;
1507 bd_prod_fw = bd_prod;
1508 sw_comp_cons = fp->rx_comp_cons;
1509 sw_comp_prod = fp->rx_comp_prod;
1511 /* Memory barrier necessary as speculative reads of the rx
1512 * buffer can be ahead of the index in the status block
1516 DP(NETIF_MSG_RX_STATUS,
1517 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1518 fp->index, hw_comp_cons, sw_comp_cons);
1520 while (sw_comp_cons != hw_comp_cons) {
1521 struct sw_rx_bd *rx_buf = NULL;
1522 struct sk_buff *skb;
1523 union eth_rx_cqe *cqe;
1527 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528 bd_prod = RX_BD(bd_prod);
1529 bd_cons = RX_BD(bd_cons);
1531 /* Prefetch the page containing the BD descriptor
1532 at producer's index. It will be needed when new skb is
1534 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535 (&fp->rx_desc_ring[bd_prod])) -
1538 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1541 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1542 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1543 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1548 /* is this a slowpath msg? */
1549 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550 bnx2x_sp_event(fp, cqe);
1553 /* this is an rx packet */
1555 rx_buf = &fp->rx_buf_ring[bd_cons];
1557 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558 pad = cqe->fast_path_cqe.placement_offset;
1560 /* If CQE is marked both TPA_START and TPA_END
1561 it is a non-TPA CQE */
1562 if ((!fp->disable_tpa) &&
1563 (TPA_TYPE(cqe_fp_flags) !=
1564 (TPA_TYPE_START | TPA_TYPE_END))) {
1565 u16 queue = cqe->fast_path_cqe.queue_index;
1567 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568 DP(NETIF_MSG_RX_STATUS,
1569 "calling tpa_start on queue %d\n",
1572 bnx2x_tpa_start(fp, queue, skb,
1577 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578 DP(NETIF_MSG_RX_STATUS,
1579 "calling tpa_stop on queue %d\n",
1582 if (!BNX2X_RX_SUM_FIX(cqe))
1583 BNX2X_ERR("STOP on none TCP "
1586 /* This is a size of the linear data
1588 len = le16_to_cpu(cqe->fast_path_cqe.
1590 bnx2x_tpa_stop(bp, fp, queue, pad,
1591 len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1597 bnx2x_update_sge_prod(fp,
1598 &cqe->fast_path_cqe);
1603 pci_dma_sync_single_for_device(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
1605 pad + RX_COPY_THRESH,
1606 PCI_DMA_FROMDEVICE);
1608 prefetch(((char *)(skb)) + 128);
1610 /* is this an error packet? */
1611 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612 DP(NETIF_MSG_RX_ERR,
1613 "ERROR flags %x rx packet %u\n",
1614 cqe_fp_flags, sw_comp_cons);
1615 fp->eth_q_stats.rx_err_discard_pkt++;
1619 /* Since we don't have a jumbo ring
1620 * copy small packets if mtu > 1500
1622 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623 (len <= RX_COPY_THRESH)) {
1624 struct sk_buff *new_skb;
1626 new_skb = netdev_alloc_skb(bp->dev,
1628 if (new_skb == NULL) {
1629 DP(NETIF_MSG_RX_ERR,
1630 "ERROR packet dropped "
1631 "because of alloc failure\n");
1632 fp->eth_q_stats.rx_skb_alloc_failed++;
1637 skb_copy_from_linear_data_offset(skb, pad,
1638 new_skb->data + pad, len);
1639 skb_reserve(new_skb, pad);
1640 skb_put(new_skb, len);
1642 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648 pci_unmap_single(bp->pdev,
1649 pci_unmap_addr(rx_buf, mapping),
1651 PCI_DMA_FROMDEVICE);
1652 skb_reserve(skb, pad);
1656 DP(NETIF_MSG_RX_ERR,
1657 "ERROR packet dropped because "
1658 "of alloc failure\n");
1659 fp->eth_q_stats.rx_skb_alloc_failed++;
1661 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1665 skb->protocol = eth_type_trans(skb, bp->dev);
1667 skb->ip_summed = CHECKSUM_NONE;
1669 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 fp->eth_q_stats.hw_csum_err++;
1676 skb_record_rx_queue(skb, fp->index);
1679 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681 PARSING_FLAGS_VLAN))
1682 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1686 netif_receive_skb(skb);
1692 bd_cons = NEXT_RX_IDX(bd_cons);
1693 bd_prod = NEXT_RX_IDX(bd_prod);
1694 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1697 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1700 if (rx_pkt == budget)
1704 fp->rx_bd_cons = bd_cons;
1705 fp->rx_bd_prod = bd_prod_fw;
1706 fp->rx_comp_cons = sw_comp_cons;
1707 fp->rx_comp_prod = sw_comp_prod;
1709 /* Update producers */
1710 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1713 fp->rx_pkt += rx_pkt;
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1721 struct bnx2x_fastpath *fp = fp_cookie;
1722 struct bnx2x *bp = fp->bp;
1724 /* Return here if interrupt is disabled */
1725 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1730 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731 fp->index, fp->sb_id);
1732 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1734 #ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1738 /* Handle Rx or Tx according to MSI-X vector */
1739 if (fp->is_rx_queue) {
1740 prefetch(fp->rx_cons_sb);
1741 prefetch(&fp->status_blk->u_status_block.status_block_index);
1743 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749 bnx2x_update_fpsb_idx(fp);
1753 /* Re-enable interrupts */
1754 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1765 struct bnx2x *bp = netdev_priv(dev_instance);
1766 u16 status = bnx2x_ack_int(bp);
1770 /* Return here if interrupt is shared and it's not for us */
1771 if (unlikely(status == 0)) {
1772 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1775 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1777 /* Return here if interrupt is disabled */
1778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1783 #ifdef BNX2X_STOP_ON_ERROR
1784 if (unlikely(bp->panic))
1788 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789 struct bnx2x_fastpath *fp = &bp->fp[i];
1791 mask = 0x2 << fp->sb_id;
1792 if (status & mask) {
1793 /* Handle Rx or Tx according to SB id */
1794 if (fp->is_rx_queue) {
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(&fp->status_blk->u_status_block.
1797 status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1802 prefetch(fp->tx_cons_sb);
1803 prefetch(&fp->status_blk->c_status_block.
1804 status_block_index);
1806 bnx2x_update_fpsb_idx(fp);
1810 /* Re-enable interrupts */
1811 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812 le16_to_cpu(fp->fp_u_idx),
1814 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815 le16_to_cpu(fp->fp_c_idx),
1823 mask = 0x2 << CNIC_SB_ID(bp);
1824 if (status & (mask | 0x1)) {
1825 struct cnic_ops *c_ops = NULL;
1828 c_ops = rcu_dereference(bp->cnic_ops);
1830 c_ops->cnic_handler(bp->cnic_data, NULL);
1837 if (unlikely(status & 0x1)) {
1838 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1846 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1852 /* end of fast path */
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1859 * General service functions
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1865 u32 resource_bit = (1 << resource);
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1885 /* Validating that the resource is not already taken */
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
1887 if (lock_status & resource_bit) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
1895 /* Try to acquire the lock */
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
1898 if (lock_status & resource_bit)
1903 DP(NETIF_MSG_HW, "Timeout\n");
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1910 u32 resource_bit = (1 << resource);
1911 int func = BP_FUNC(bp);
1912 u32 hw_lock_control_reg;
1914 /* Validating that the resource is within range */
1915 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1917 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1923 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1925 hw_lock_control_reg =
1926 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1929 /* Validating that the resource is currently taken */
1930 lock_status = REG_RD(bp, hw_lock_control_reg);
1931 if (!(lock_status & resource_bit)) {
1932 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1933 lock_status, resource_bit);
1937 REG_WR(bp, hw_lock_control_reg, resource_bit);
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1944 mutex_lock(&bp->port.phy_mutex);
1946 if (bp->port.need_hw_lock)
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1952 if (bp->port.need_hw_lock)
1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1955 mutex_unlock(&bp->port.phy_mutex);
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1974 /* read GPIO value */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1977 /* get the requested pin value */
1978 if ((gpio_reg & gpio_mask) == gpio_mask)
1983 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO and mask except the float bits */
2005 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2008 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear FLOAT and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2016 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018 gpio_num, gpio_shift);
2019 /* clear FLOAT and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026 gpio_num, gpio_shift);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2035 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2058 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063 "output low\n", gpio_num, gpio_shift);
2064 /* clear SET and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2069 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071 "output high\n", gpio_num, gpio_shift);
2072 /* clear CLR and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2089 u32 spio_mask = (1 << spio_num);
2092 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093 (spio_num > MISC_REGISTERS_SPIO_7)) {
2094 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2098 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099 /* read SPIO and mask except the float bits */
2100 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2103 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105 /* clear FLOAT and set CLR */
2106 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2110 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112 /* clear FLOAT and set SET */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2127 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2135 switch (bp->link_vars.ieee_fc &
2136 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2142 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2147 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148 bp->port.advertising |= ADVERTISED_Asym_Pause;
2152 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2158 static void bnx2x_link_report(struct bnx2x *bp)
2160 if (bp->state == BNX2X_STATE_DISABLED) {
2161 netif_carrier_off(bp->dev);
2162 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2166 if (bp->link_vars.link_up) {
2167 if (bp->state == BNX2X_STATE_OPEN)
2168 netif_carrier_on(bp->dev);
2169 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2171 printk("%d Mbps ", bp->link_vars.line_speed);
2173 if (bp->link_vars.duplex == DUPLEX_FULL)
2174 printk("full duplex");
2176 printk("half duplex");
2178 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180 printk(", receive ");
2181 if (bp->link_vars.flow_ctrl &
2183 printk("& transmit ");
2185 printk(", transmit ");
2187 printk("flow control ON");
2191 } else { /* link_down */
2192 netif_carrier_off(bp->dev);
2193 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2199 if (!BP_NOMCP(bp)) {
2202 /* Initialize link parameters structure variables */
2203 /* It is recommended to turn off RX FC for jumbo frames
2204 for better performance */
2205 if (bp->dev->mtu > 5000)
2206 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2210 bnx2x_acquire_phy_lock(bp);
2212 if (load_mode == LOAD_DIAG)
2213 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2215 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2217 bnx2x_release_phy_lock(bp);
2219 bnx2x_calc_fc_adv(bp);
2221 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223 bnx2x_link_report(bp);
2228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2232 static void bnx2x_link_set(struct bnx2x *bp)
2234 if (!BP_NOMCP(bp)) {
2235 bnx2x_acquire_phy_lock(bp);
2236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237 bnx2x_release_phy_lock(bp);
2239 bnx2x_calc_fc_adv(bp);
2241 BNX2X_ERR("Bootcode is missing - can not set link\n");
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2246 if (!BP_NOMCP(bp)) {
2247 bnx2x_acquire_phy_lock(bp);
2248 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249 bnx2x_release_phy_lock(bp);
2251 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2258 bnx2x_acquire_phy_lock(bp);
2259 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260 bnx2x_release_phy_lock(bp);
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2267 u32 r_param = bp->link_vars.line_speed / 8;
2268 u32 fair_periodic_timeout_usec;
2271 memset(&(bp->cmng.rs_vars), 0,
2272 sizeof(struct rate_shaping_vars_per_port));
2273 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2275 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2278 /* this is the threshold below which no timer arming will occur
2279 1.25 coefficient is for the threshold to be a little bigger
2280 than the real time, to compensate for timer in-accuracy */
2281 bp->cmng.rs_vars.rs_threshold =
2282 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2284 /* resolution of fairness timer */
2285 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2289 /* this is the threshold below which we won't arm the timer anymore */
2290 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2292 /* we multiply by 1e3/8 to get bytes/msec.
2293 We don't want the credits to pass a credit
2294 of the t_fair*FAIR_MEM (algorithm resolution) */
2295 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296 /* since each tick is 4 usec */
2297 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2300 /* Calculates the sum of vn_min_rates.
2301 It's needed for further normalizing of the min_rates.
2303 sum of vn_min_rates.
2305 0 - if all the min_rates are 0.
2306 In the later case fainess algorithm should be deactivated.
2307 If not all min_rates are zero then those that are zeroes will be set to 1.
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312 int port = BP_PORT(bp);
2315 bp->vn_weight_sum = 0;
2316 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317 int func = 2*vn + port;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326 /* If min rate is zero - set it to 1 */
2328 vn_min_rate = DEF_MIN_RATE;
2332 bp->vn_weight_sum += vn_min_rate;
2335 /* ... only if all min rates are zeros - disable fairness */
2337 bp->vn_weight_sum = 0;
2340 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2342 struct rate_shaping_vars_per_vn m_rs_vn;
2343 struct fairness_vars_per_vn m_fair_vn;
2344 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2345 u16 vn_min_rate, vn_max_rate;
2348 /* If function is hidden - set min and max to zeroes */
2349 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2354 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2355 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2356 /* If fairness is enabled (not all min rates are zeroes) and
2357 if current min rate is zero - set it to 1.
2358 This is a requirement of the algorithm. */
2359 if (bp->vn_weight_sum && (vn_min_rate == 0))
2360 vn_min_rate = DEF_MIN_RATE;
2361 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2362 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2366 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2367 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2369 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2370 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2372 /* global vn counter - maximal Mbps for this vn */
2373 m_rs_vn.vn_counter.rate = vn_max_rate;
2375 /* quota - number of bytes transmitted in this period */
2376 m_rs_vn.vn_counter.quota =
2377 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2379 if (bp->vn_weight_sum) {
2380 /* credit for each period of the fairness algorithm:
2381 number of bytes in T_FAIR (the vn share the port rate).
2382 vn_weight_sum should not be larger than 10000, thus
2383 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2385 m_fair_vn.vn_credit_delta =
2386 max((u32)(vn_min_rate * (T_FAIR_COEF /
2387 (8 * bp->vn_weight_sum))),
2388 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2389 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2390 m_fair_vn.vn_credit_delta);
2393 /* Store it to internal memory */
2394 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2395 REG_WR(bp, BAR_XSTRORM_INTMEM +
2396 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2397 ((u32 *)(&m_rs_vn))[i]);
2399 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2400 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2402 ((u32 *)(&m_fair_vn))[i]);
2406 /* This function is called upon link interrupt */
2407 static void bnx2x_link_attn(struct bnx2x *bp)
2409 /* Make sure that we are synced with the current statistics */
2410 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2412 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2414 if (bp->link_vars.link_up) {
2416 /* dropless flow control */
2417 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2418 int port = BP_PORT(bp);
2419 u32 pause_enabled = 0;
2421 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2424 REG_WR(bp, BAR_USTRORM_INTMEM +
2425 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2429 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2430 struct host_port_stats *pstats;
2432 pstats = bnx2x_sp(bp, port_stats);
2433 /* reset old bmac stats */
2434 memset(&(pstats->mac_stx[0]), 0,
2435 sizeof(struct mac_stx));
2437 if ((bp->state == BNX2X_STATE_OPEN) ||
2438 (bp->state == BNX2X_STATE_DISABLED))
2439 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2442 /* indicate link status */
2443 bnx2x_link_report(bp);
2446 int port = BP_PORT(bp);
2450 /* Set the attention towards other drivers on the same port */
2451 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2452 if (vn == BP_E1HVN(bp))
2455 func = ((vn << 1) | port);
2456 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2457 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2460 if (bp->link_vars.link_up) {
2463 /* Init rate shaping and fairness contexts */
2464 bnx2x_init_port_minmax(bp);
2466 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2467 bnx2x_init_vn_minmax(bp, 2*vn + port);
2469 /* Store it to internal memory */
2471 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2472 REG_WR(bp, BAR_XSTRORM_INTMEM +
2473 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2474 ((u32 *)(&bp->cmng))[i]);
2479 static void bnx2x__link_status_update(struct bnx2x *bp)
2481 int func = BP_FUNC(bp);
2483 if (bp->state != BNX2X_STATE_OPEN)
2486 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2488 if (bp->link_vars.link_up)
2489 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2493 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2494 bnx2x_calc_vn_weight_sum(bp);
2496 /* indicate link status */
2497 bnx2x_link_report(bp);
2500 static void bnx2x_pmf_update(struct bnx2x *bp)
2502 int port = BP_PORT(bp);
2506 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2508 /* enable nig attention */
2509 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2510 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2511 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2513 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2521 * General service functions
2524 /* send the MCP a request, block until there is a reply */
2525 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2527 int func = BP_FUNC(bp);
2528 u32 seq = ++bp->fw_seq;
2531 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2533 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2534 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537 /* let the FW do it's magic ... */
2540 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2542 /* Give the FW up to 2 second (200*10ms) */
2543 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2545 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2546 cnt*delay, rc, seq);
2548 /* is this a reply to our command? */
2549 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2550 rc &= FW_MSG_CODE_MASK;
2553 BNX2X_ERR("FW failed to respond!\n");
2561 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2562 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2563 static void bnx2x_set_rx_mode(struct net_device *dev);
2565 static void bnx2x_e1h_disable(struct bnx2x *bp)
2567 int port = BP_PORT(bp);
2570 bp->rx_mode = BNX2X_RX_MODE_NONE;
2571 bnx2x_set_storm_rx_mode(bp);
2573 netif_tx_disable(bp->dev);
2574 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2576 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2578 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2580 for (i = 0; i < MC_HASH_SIZE; i++)
2581 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2583 netif_carrier_off(bp->dev);
2586 static void bnx2x_e1h_enable(struct bnx2x *bp)
2588 int port = BP_PORT(bp);
2590 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2594 /* Tx queue should be only reenabled */
2595 netif_tx_wake_all_queues(bp->dev);
2597 /* Initialize the receive filter. */
2598 bnx2x_set_rx_mode(bp->dev);
2601 static void bnx2x_update_min_max(struct bnx2x *bp)
2603 int port = BP_PORT(bp);
2606 /* Init rate shaping and fairness contexts */
2607 bnx2x_init_port_minmax(bp);
2609 bnx2x_calc_vn_weight_sum(bp);
2611 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2612 bnx2x_init_vn_minmax(bp, 2*vn + port);
2617 /* Set the attention towards other drivers on the same port */
2618 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2619 if (vn == BP_E1HVN(bp))
2622 func = ((vn << 1) | port);
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2624 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2627 /* Store it to internal memory */
2628 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2629 REG_WR(bp, BAR_XSTRORM_INTMEM +
2630 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2631 ((u32 *)(&bp->cmng))[i]);
2635 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2637 int func = BP_FUNC(bp);
2639 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2640 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2642 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2644 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2645 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2646 bp->state = BNX2X_STATE_DISABLED;
2648 bnx2x_e1h_disable(bp);
2650 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2651 bp->state = BNX2X_STATE_OPEN;
2653 bnx2x_e1h_enable(bp);
2655 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2657 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2659 bnx2x_update_min_max(bp);
2660 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2663 /* Report results to MCP */
2665 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2667 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2670 /* must be called under the spq lock */
2671 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2673 struct eth_spe *next_spe = bp->spq_prod_bd;
2675 if (bp->spq_prod_bd == bp->spq_last_bd) {
2676 bp->spq_prod_bd = bp->spq;
2677 bp->spq_prod_idx = 0;
2678 DP(NETIF_MSG_TIMER, "end of spq\n");
2686 /* must be called under the spq lock */
2687 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2689 int func = BP_FUNC(bp);
2691 /* Make sure that BD data is updated before writing the producer */
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2699 /* the slow path queue is odd since completions arrive on the fastpath ring */
2700 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2701 u32 data_hi, u32 data_lo, int common)
2703 struct eth_spe *spe;
2705 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2706 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2707 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2708 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2709 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2711 #ifdef BNX2X_STOP_ON_ERROR
2712 if (unlikely(bp->panic))
2716 spin_lock_bh(&bp->spq_lock);
2718 if (!bp->spq_left) {
2719 BNX2X_ERR("BUG! SPQ ring full!\n");
2720 spin_unlock_bh(&bp->spq_lock);
2725 spe = bnx2x_sp_get_next(bp);
2727 /* CID needs port number to be encoded int it */
2728 spe->hdr.conn_and_cmd_data =
2729 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2731 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2734 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2736 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2737 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2741 bnx2x_sp_prod_update(bp);
2742 spin_unlock_bh(&bp->spq_lock);
2746 /* acquire split MCP access lock register */
2747 static int bnx2x_acquire_alr(struct bnx2x *bp)
2754 for (j = 0; j < i*10; j++) {
2756 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2757 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2758 if (val & (1L << 31))
2763 if (!(val & (1L << 31))) {
2764 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2771 /* release split MCP access lock register */
2772 static void bnx2x_release_alr(struct bnx2x *bp)
2776 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2779 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2781 struct host_def_status_block *def_sb = bp->def_status_blk;
2784 barrier(); /* status block is written to by the chip */
2785 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2786 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2789 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2790 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2793 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2794 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2797 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2798 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2801 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2802 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2809 * slow path service functions
2812 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2814 int port = BP_PORT(bp);
2815 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2816 COMMAND_REG_ATTN_BITS_SET);
2817 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2819 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820 NIG_REG_MASK_INTERRUPT_PORT0;
2824 if (bp->attn_state & asserted)
2825 BNX2X_ERR("IGU ERROR\n");
2827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2828 aeu_mask = REG_RD(bp, aeu_addr);
2830 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2831 aeu_mask, asserted);
2832 aeu_mask &= ~(asserted & 0xff);
2833 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2835 REG_WR(bp, aeu_addr, aeu_mask);
2836 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2838 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2839 bp->attn_state |= asserted;
2840 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2842 if (asserted & ATTN_HARD_WIRED_MASK) {
2843 if (asserted & ATTN_NIG_FOR_FUNC) {
2845 bnx2x_acquire_phy_lock(bp);
2847 /* save nig interrupt mask */
2848 nig_mask = REG_RD(bp, nig_int_mask_addr);
2849 REG_WR(bp, nig_int_mask_addr, 0);
2851 bnx2x_link_attn(bp);
2853 /* handle unicore attn? */
2855 if (asserted & ATTN_SW_TIMER_4_FUNC)
2856 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2858 if (asserted & GPIO_2_FUNC)
2859 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2861 if (asserted & GPIO_3_FUNC)
2862 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2864 if (asserted & GPIO_4_FUNC)
2865 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2868 if (asserted & ATTN_GENERAL_ATTN_1) {
2869 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2870 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2872 if (asserted & ATTN_GENERAL_ATTN_2) {
2873 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2874 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2876 if (asserted & ATTN_GENERAL_ATTN_3) {
2877 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2878 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2881 if (asserted & ATTN_GENERAL_ATTN_4) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2885 if (asserted & ATTN_GENERAL_ATTN_5) {
2886 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2887 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2889 if (asserted & ATTN_GENERAL_ATTN_6) {
2890 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2891 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2895 } /* if hardwired */
2897 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2899 REG_WR(bp, hc_addr, asserted);
2901 /* now set back the mask */
2902 if (asserted & ATTN_NIG_FOR_FUNC) {
2903 REG_WR(bp, nig_int_mask_addr, nig_mask);
2904 bnx2x_release_phy_lock(bp);
2908 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2910 int port = BP_PORT(bp);
2912 /* mark the failure */
2913 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2914 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2915 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2916 bp->link_params.ext_phy_config);
2918 /* log the failure */
2919 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2920 " the driver to shutdown the card to prevent permanent"
2921 " damage. Please contact Dell Support for assistance\n",
2925 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2927 int port = BP_PORT(bp);
2929 u32 val, swap_val, swap_override;
2931 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2932 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2934 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2936 val = REG_RD(bp, reg_offset);
2937 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2938 REG_WR(bp, reg_offset, val);
2940 BNX2X_ERR("SPIO5 hw attention\n");
2942 /* Fan failure attention */
2943 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2945 /* Low power mode is controlled by GPIO 2 */
2946 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2947 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2948 /* The PHY reset is controlled by GPIO 1 */
2949 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2950 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2954 /* The PHY reset is controlled by GPIO 1 */
2955 /* fake the port number to cancel the swap done in
2957 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2958 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2959 port = (swap_val && swap_override) ^ 1;
2960 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2961 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2967 bnx2x_fan_failure(bp);
2970 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2972 bnx2x_acquire_phy_lock(bp);
2973 bnx2x_handle_module_detect_int(&bp->link_params);
2974 bnx2x_release_phy_lock(bp);
2977 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2979 val = REG_RD(bp, reg_offset);
2980 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2981 REG_WR(bp, reg_offset, val);
2983 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2984 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2989 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2993 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2995 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2996 BNX2X_ERR("DB hw attention 0x%x\n", val);
2997 /* DORQ discard attention */
2999 BNX2X_ERR("FATAL error from DORQ\n");
3002 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3004 int port = BP_PORT(bp);
3007 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3008 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3010 val = REG_RD(bp, reg_offset);
3011 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3012 REG_WR(bp, reg_offset, val);
3014 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3015 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3020 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3024 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3026 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3027 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3028 /* CFC error attention */
3030 BNX2X_ERR("FATAL error from CFC\n");
3033 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3035 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3036 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3037 /* RQ_USDMDP_FIFO_OVERFLOW */
3039 BNX2X_ERR("FATAL error from PXP\n");
3042 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3044 int port = BP_PORT(bp);
3047 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3048 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3050 val = REG_RD(bp, reg_offset);
3051 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3052 REG_WR(bp, reg_offset, val);
3054 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3055 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3060 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3064 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3066 if (attn & BNX2X_PMF_LINK_ASSERT) {
3067 int func = BP_FUNC(bp);
3069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3070 val = SHMEM_RD(bp, func_mb[func].drv_status);
3071 if (val & DRV_STATUS_DCC_EVENT_MASK)
3073 (val & DRV_STATUS_DCC_EVENT_MASK));
3074 bnx2x__link_status_update(bp);
3075 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3076 bnx2x_pmf_update(bp);
3078 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3080 BNX2X_ERR("MC assert!\n");
3081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3087 } else if (attn & BNX2X_MCP_ASSERT) {
3089 BNX2X_ERR("MCP assert!\n");
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3094 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3097 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3098 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3099 if (attn & BNX2X_GRC_TIMEOUT) {
3100 val = CHIP_IS_E1H(bp) ?
3101 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3102 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3104 if (attn & BNX2X_GRC_RSV) {
3105 val = CHIP_IS_E1H(bp) ?
3106 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3107 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3109 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3113 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3115 struct attn_route attn;
3116 struct attn_route group_mask;
3117 int port = BP_PORT(bp);
3123 /* need to take HW lock because MCP or other port might also
3124 try to handle this event */
3125 bnx2x_acquire_alr(bp);
3127 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3128 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3129 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3130 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3131 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3132 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3134 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3135 if (deasserted & (1 << index)) {
3136 group_mask = bp->attn_group[index];
3138 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3139 index, group_mask.sig[0], group_mask.sig[1],
3140 group_mask.sig[2], group_mask.sig[3]);
3142 bnx2x_attn_int_deasserted3(bp,
3143 attn.sig[3] & group_mask.sig[3]);
3144 bnx2x_attn_int_deasserted1(bp,
3145 attn.sig[1] & group_mask.sig[1]);
3146 bnx2x_attn_int_deasserted2(bp,
3147 attn.sig[2] & group_mask.sig[2]);
3148 bnx2x_attn_int_deasserted0(bp,
3149 attn.sig[0] & group_mask.sig[0]);
3151 if ((attn.sig[0] & group_mask.sig[0] &
3152 HW_PRTY_ASSERT_SET_0) ||
3153 (attn.sig[1] & group_mask.sig[1] &
3154 HW_PRTY_ASSERT_SET_1) ||
3155 (attn.sig[2] & group_mask.sig[2] &
3156 HW_PRTY_ASSERT_SET_2))
3157 BNX2X_ERR("FATAL HW block parity attention\n");
3161 bnx2x_release_alr(bp);
3163 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3166 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3168 REG_WR(bp, reg_addr, val);
3170 if (~bp->attn_state & deasserted)
3171 BNX2X_ERR("IGU ERROR\n");
3173 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3174 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3177 aeu_mask = REG_RD(bp, reg_addr);
3179 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3180 aeu_mask, deasserted);
3181 aeu_mask |= (deasserted & 0xff);
3182 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3184 REG_WR(bp, reg_addr, aeu_mask);
3185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3187 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3188 bp->attn_state &= ~deasserted;
3189 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3192 static void bnx2x_attn_int(struct bnx2x *bp)
3194 /* read local copy of bits */
3195 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3197 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3199 u32 attn_state = bp->attn_state;
3201 /* look for changed bits */
3202 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3203 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3206 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3207 attn_bits, attn_ack, asserted, deasserted);
3209 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3210 BNX2X_ERR("BAD attention state\n");
3212 /* handle bits that were raised */
3214 bnx2x_attn_int_asserted(bp, asserted);
3217 bnx2x_attn_int_deasserted(bp, deasserted);
3220 static void bnx2x_sp_task(struct work_struct *work)
3222 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3226 /* Return here if interrupt is disabled */
3227 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3228 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3232 status = bnx2x_update_dsb_idx(bp);
3233 /* if (status == 0) */
3234 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3236 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3242 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3244 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3246 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3248 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3255 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3257 struct net_device *dev = dev_instance;
3258 struct bnx2x *bp = netdev_priv(dev);
3260 /* Return here if interrupt is disabled */
3261 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3262 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3266 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3268 #ifdef BNX2X_STOP_ON_ERROR
3269 if (unlikely(bp->panic))
3275 struct cnic_ops *c_ops;
3278 c_ops = rcu_dereference(bp->cnic_ops);
3280 c_ops->cnic_handler(bp->cnic_data, NULL);
3284 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3289 /* end of slow path */
3293 /****************************************************************************
3295 ****************************************************************************/
3297 /* sum[hi:lo] += add[hi:lo] */
3298 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3301 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3304 /* difference = minuend - subtrahend */
3305 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3307 if (m_lo < s_lo) { \
3309 d_hi = m_hi - s_hi; \
3311 /* we can 'loan' 1 */ \
3313 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3315 /* m_hi <= s_hi */ \
3320 /* m_lo >= s_lo */ \
3321 if (m_hi < s_hi) { \
3325 /* m_hi >= s_hi */ \
3326 d_hi = m_hi - s_hi; \
3327 d_lo = m_lo - s_lo; \
3332 #define UPDATE_STAT64(s, t) \
3334 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3335 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3336 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3337 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3338 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3339 pstats->mac_stx[1].t##_lo, diff.lo); \
3342 #define UPDATE_STAT64_NIG(s, t) \
3344 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3345 diff.lo, new->s##_lo, old->s##_lo); \
3346 ADD_64(estats->t##_hi, diff.hi, \
3347 estats->t##_lo, diff.lo); \
3350 /* sum[hi:lo] += add */
3351 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3354 s_hi += (s_lo < a) ? 1 : 0; \
3357 #define UPDATE_EXTEND_STAT(s) \
3359 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3360 pstats->mac_stx[1].s##_lo, \
3364 #define UPDATE_EXTEND_TSTAT(s, t) \
3366 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3367 old_tclient->s = tclient->s; \
3368 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3371 #define UPDATE_EXTEND_USTAT(s, t) \
3373 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3374 old_uclient->s = uclient->s; \
3375 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3378 #define UPDATE_EXTEND_XSTAT(s, t) \
3380 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3381 old_xclient->s = xclient->s; \
3382 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385 /* minuend -= subtrahend */
3386 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3388 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3391 /* minuend[hi:lo] -= subtrahend */
3392 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3394 SUB_64(m_hi, 0, m_lo, s); \
3397 #define SUB_EXTEND_USTAT(s, t) \
3399 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3400 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3404 * General service functions
3407 static inline long bnx2x_hilo(u32 *hiref)
3409 u32 lo = *(hiref + 1);
3410 #if (BITS_PER_LONG == 64)
3413 return HILO_U64(hi, lo);
3420 * Init service functions
3423 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3425 if (!bp->stats_pending) {
3426 struct eth_query_ramrod_data ramrod_data = {0};
3429 ramrod_data.drv_counter = bp->stats_counter++;
3430 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3431 for_each_queue(bp, i)
3432 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3434 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3435 ((u32 *)&ramrod_data)[1],
3436 ((u32 *)&ramrod_data)[0], 0);
3438 /* stats ramrod has it's own slot on the spq */
3440 bp->stats_pending = 1;
3445 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3447 struct dmae_command *dmae = &bp->stats_dmae;
3448 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3450 *stats_comp = DMAE_COMP_VAL;
3451 if (CHIP_REV_IS_SLOW(bp))
3455 if (bp->executer_idx) {
3456 int loader_idx = PMF_DMAE_C(bp);
3458 memset(dmae, 0, sizeof(struct dmae_command));
3460 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3461 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3462 DMAE_CMD_DST_RESET |
3464 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3466 DMAE_CMD_ENDIANITY_DW_SWAP |
3468 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3470 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3471 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3472 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3473 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3474 sizeof(struct dmae_command) *
3475 (loader_idx + 1)) >> 2;
3476 dmae->dst_addr_hi = 0;
3477 dmae->len = sizeof(struct dmae_command) >> 2;
3480 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3481 dmae->comp_addr_hi = 0;
3485 bnx2x_post_dmae(bp, dmae, loader_idx);
3487 } else if (bp->func_stx) {
3489 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3493 static int bnx2x_stats_comp(struct bnx2x *bp)
3495 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3499 while (*stats_comp != DMAE_COMP_VAL) {
3501 BNX2X_ERR("timeout waiting for stats finished\n");
3511 * Statistics service functions
3514 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3516 struct dmae_command *dmae;
3518 int loader_idx = PMF_DMAE_C(bp);
3519 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3522 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3523 BNX2X_ERR("BUG!\n");
3527 bp->executer_idx = 0;
3529 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3531 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3533 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3535 DMAE_CMD_ENDIANITY_DW_SWAP |
3537 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3538 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3540 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3541 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3542 dmae->src_addr_lo = bp->port.port_stx >> 2;
3543 dmae->src_addr_hi = 0;
3544 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3545 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3546 dmae->len = DMAE_LEN32_RD_MAX;
3547 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3548 dmae->comp_addr_hi = 0;
3551 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3552 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3553 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3554 dmae->src_addr_hi = 0;
3555 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3556 DMAE_LEN32_RD_MAX * 4);
3557 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3558 DMAE_LEN32_RD_MAX * 4);
3559 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3560 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3561 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3562 dmae->comp_val = DMAE_COMP_VAL;
3565 bnx2x_hw_stats_post(bp);
3566 bnx2x_stats_comp(bp);
3569 static void bnx2x_port_stats_init(struct bnx2x *bp)
3571 struct dmae_command *dmae;
3572 int port = BP_PORT(bp);
3573 int vn = BP_E1HVN(bp);
3575 int loader_idx = PMF_DMAE_C(bp);
3577 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3580 if (!bp->link_vars.link_up || !bp->port.pmf) {
3581 BNX2X_ERR("BUG!\n");
3585 bp->executer_idx = 0;
3588 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3589 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3592 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3594 DMAE_CMD_ENDIANITY_DW_SWAP |
3596 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597 (vn << DMAE_CMD_E1HVN_SHIFT));
3599 if (bp->port.port_stx) {
3601 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3602 dmae->opcode = opcode;
3603 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3604 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3605 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3606 dmae->dst_addr_hi = 0;
3607 dmae->len = sizeof(struct host_port_stats) >> 2;
3608 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3609 dmae->comp_addr_hi = 0;
3615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3616 dmae->opcode = opcode;
3617 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3618 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3619 dmae->dst_addr_lo = bp->func_stx >> 2;
3620 dmae->dst_addr_hi = 0;
3621 dmae->len = sizeof(struct host_func_stats) >> 2;
3622 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3623 dmae->comp_addr_hi = 0;
3628 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3629 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3630 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3632 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3634 DMAE_CMD_ENDIANITY_DW_SWAP |
3636 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3637 (vn << DMAE_CMD_E1HVN_SHIFT));
3639 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3641 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3642 NIG_REG_INGRESS_BMAC0_MEM);
3644 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3645 BIGMAC_REGISTER_TX_STAT_GTBYT */
3646 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647 dmae->opcode = opcode;
3648 dmae->src_addr_lo = (mac_addr +
3649 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650 dmae->src_addr_hi = 0;
3651 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3654 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3656 dmae->comp_addr_hi = 0;
3659 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3660 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3661 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3662 dmae->opcode = opcode;
3663 dmae->src_addr_lo = (mac_addr +
3664 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3665 dmae->src_addr_hi = 0;
3666 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3667 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3668 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3669 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3670 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3671 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3673 dmae->comp_addr_hi = 0;
3676 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3678 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3680 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3681 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682 dmae->opcode = opcode;
3683 dmae->src_addr_lo = (mac_addr +
3684 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3685 dmae->src_addr_hi = 0;
3686 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3687 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3688 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3693 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695 dmae->opcode = opcode;
3696 dmae->src_addr_lo = (mac_addr +
3697 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3698 dmae->src_addr_hi = 0;
3699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3700 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3702 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3708 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (mac_addr +
3712 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3715 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3717 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3718 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3725 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3726 dmae->opcode = opcode;
3727 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3728 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3729 dmae->src_addr_hi = 0;
3730 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3731 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3732 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3733 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3734 dmae->comp_addr_hi = 0;
3737 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3738 dmae->opcode = opcode;
3739 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3740 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3741 dmae->src_addr_hi = 0;
3742 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3743 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3744 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3745 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3746 dmae->len = (2*sizeof(u32)) >> 2;
3747 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3748 dmae->comp_addr_hi = 0;
3751 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3752 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3753 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3754 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3756 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3758 DMAE_CMD_ENDIANITY_DW_SWAP |
3760 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3761 (vn << DMAE_CMD_E1HVN_SHIFT));
3762 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3763 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3764 dmae->src_addr_hi = 0;
3765 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3766 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3767 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3768 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3769 dmae->len = (2*sizeof(u32)) >> 2;
3770 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3771 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3772 dmae->comp_val = DMAE_COMP_VAL;
3777 static void bnx2x_func_stats_init(struct bnx2x *bp)
3779 struct dmae_command *dmae = &bp->stats_dmae;
3780 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3783 if (!bp->func_stx) {
3784 BNX2X_ERR("BUG!\n");
3788 bp->executer_idx = 0;
3789 memset(dmae, 0, sizeof(struct dmae_command));
3791 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3792 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3793 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3795 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3797 DMAE_CMD_ENDIANITY_DW_SWAP |
3799 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3800 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3801 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3802 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3803 dmae->dst_addr_lo = bp->func_stx >> 2;
3804 dmae->dst_addr_hi = 0;
3805 dmae->len = sizeof(struct host_func_stats) >> 2;
3806 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3807 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3808 dmae->comp_val = DMAE_COMP_VAL;
3813 static void bnx2x_stats_start(struct bnx2x *bp)
3816 bnx2x_port_stats_init(bp);
3818 else if (bp->func_stx)
3819 bnx2x_func_stats_init(bp);
3821 bnx2x_hw_stats_post(bp);
3822 bnx2x_storm_stats_post(bp);
3825 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3827 bnx2x_stats_comp(bp);
3828 bnx2x_stats_pmf_update(bp);
3829 bnx2x_stats_start(bp);
3832 static void bnx2x_stats_restart(struct bnx2x *bp)
3834 bnx2x_stats_comp(bp);
3835 bnx2x_stats_start(bp);
3838 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3840 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3841 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3842 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3848 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3849 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3850 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3851 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3852 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3853 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3854 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3855 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3856 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3857 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3858 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3859 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3860 UPDATE_STAT64(tx_stat_gt127,
3861 tx_stat_etherstatspkts65octetsto127octets);
3862 UPDATE_STAT64(tx_stat_gt255,
3863 tx_stat_etherstatspkts128octetsto255octets);
3864 UPDATE_STAT64(tx_stat_gt511,
3865 tx_stat_etherstatspkts256octetsto511octets);
3866 UPDATE_STAT64(tx_stat_gt1023,
3867 tx_stat_etherstatspkts512octetsto1023octets);
3868 UPDATE_STAT64(tx_stat_gt1518,
3869 tx_stat_etherstatspkts1024octetsto1522octets);
3870 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3871 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3872 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3873 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3874 UPDATE_STAT64(tx_stat_gterr,
3875 tx_stat_dot3statsinternalmactransmiterrors);
3876 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3878 estats->pause_frames_received_hi =
3879 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3880 estats->pause_frames_received_lo =
3881 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3883 estats->pause_frames_sent_hi =
3884 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3885 estats->pause_frames_sent_lo =
3886 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3889 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3891 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3892 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3893 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3896 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3897 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3898 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3900 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3901 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3902 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3903 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3904 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3905 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3906 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3907 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3908 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3909 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3910 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3911 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3912 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3913 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3914 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3925 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3927 estats->pause_frames_received_hi =
3928 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3929 estats->pause_frames_received_lo =
3930 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3931 ADD_64(estats->pause_frames_received_hi,
3932 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3933 estats->pause_frames_received_lo,
3934 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3936 estats->pause_frames_sent_hi =
3937 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3938 estats->pause_frames_sent_lo =
3939 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3940 ADD_64(estats->pause_frames_sent_hi,
3941 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3942 estats->pause_frames_sent_lo,
3943 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3946 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3948 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3949 struct nig_stats *old = &(bp->port.old_nig_stats);
3950 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3951 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3958 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3959 bnx2x_bmac_stats_update(bp);
3961 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3962 bnx2x_emac_stats_update(bp);
3964 else { /* unreached */
3965 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3969 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3970 new->brb_discard - old->brb_discard);
3971 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3972 new->brb_truncate - old->brb_truncate);
3974 UPDATE_STAT64_NIG(egress_mac_pkt0,
3975 etherstatspkts1024octetsto1522octets);
3976 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3978 memcpy(old, new, sizeof(struct nig_stats));
3980 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3981 sizeof(struct mac_stx));
3982 estats->brb_drop_hi = pstats->brb_drop_hi;
3983 estats->brb_drop_lo = pstats->brb_drop_lo;
3985 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3987 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3988 if (nig_timer_max != estats->nig_timer_max) {
3989 estats->nig_timer_max = nig_timer_max;
3990 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3996 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3998 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3999 struct tstorm_per_port_stats *tport =
4000 &stats->tstorm_common.port_statistics;
4001 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4002 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4005 memcpy(&(fstats->total_bytes_received_hi),
4006 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4007 sizeof(struct host_func_stats) - 2*sizeof(u32));
4008 estats->error_bytes_received_hi = 0;
4009 estats->error_bytes_received_lo = 0;
4010 estats->etherstatsoverrsizepkts_hi = 0;
4011 estats->etherstatsoverrsizepkts_lo = 0;
4012 estats->no_buff_discard_hi = 0;
4013 estats->no_buff_discard_lo = 0;
4015 for_each_rx_queue(bp, i) {
4016 struct bnx2x_fastpath *fp = &bp->fp[i];
4017 int cl_id = fp->cl_id;
4018 struct tstorm_per_client_stats *tclient =
4019 &stats->tstorm_common.client_statistics[cl_id];
4020 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4021 struct ustorm_per_client_stats *uclient =
4022 &stats->ustorm_common.client_statistics[cl_id];
4023 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4024 struct xstorm_per_client_stats *xclient =
4025 &stats->xstorm_common.client_statistics[cl_id];
4026 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4027 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4030 /* are storm stats valid? */
4031 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4032 bp->stats_counter) {
4033 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4034 " xstorm counter (%d) != stats_counter (%d)\n",
4035 i, xclient->stats_counter, bp->stats_counter);
4038 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4039 bp->stats_counter) {
4040 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4041 " tstorm counter (%d) != stats_counter (%d)\n",
4042 i, tclient->stats_counter, bp->stats_counter);
4045 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4046 bp->stats_counter) {
4047 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4048 " ustorm counter (%d) != stats_counter (%d)\n",
4049 i, uclient->stats_counter, bp->stats_counter);
4053 qstats->total_bytes_received_hi =
4054 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4055 qstats->total_bytes_received_lo =
4056 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4058 ADD_64(qstats->total_bytes_received_hi,
4059 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4060 qstats->total_bytes_received_lo,
4061 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4063 ADD_64(qstats->total_bytes_received_hi,
4064 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4065 qstats->total_bytes_received_lo,
4066 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4068 qstats->valid_bytes_received_hi =
4069 qstats->total_bytes_received_hi;
4070 qstats->valid_bytes_received_lo =
4071 qstats->total_bytes_received_lo;
4073 qstats->error_bytes_received_hi =
4074 le32_to_cpu(tclient->rcv_error_bytes.hi);
4075 qstats->error_bytes_received_lo =
4076 le32_to_cpu(tclient->rcv_error_bytes.lo);
4078 ADD_64(qstats->total_bytes_received_hi,
4079 qstats->error_bytes_received_hi,
4080 qstats->total_bytes_received_lo,
4081 qstats->error_bytes_received_lo);
4083 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4084 total_unicast_packets_received);
4085 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4086 total_multicast_packets_received);
4087 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4088 total_broadcast_packets_received);
4089 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4090 etherstatsoverrsizepkts);
4091 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4093 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4094 total_unicast_packets_received);
4095 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4096 total_multicast_packets_received);
4097 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4098 total_broadcast_packets_received);
4099 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4100 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4101 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4103 qstats->total_bytes_transmitted_hi =
4104 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4105 qstats->total_bytes_transmitted_lo =
4106 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4108 ADD_64(qstats->total_bytes_transmitted_hi,
4109 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4110 qstats->total_bytes_transmitted_lo,
4111 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4113 ADD_64(qstats->total_bytes_transmitted_hi,
4114 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4115 qstats->total_bytes_transmitted_lo,
4116 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4118 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4119 total_unicast_packets_transmitted);
4120 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4121 total_multicast_packets_transmitted);
4122 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4123 total_broadcast_packets_transmitted);
4125 old_tclient->checksum_discard = tclient->checksum_discard;
4126 old_tclient->ttl0_discard = tclient->ttl0_discard;
4128 ADD_64(fstats->total_bytes_received_hi,
4129 qstats->total_bytes_received_hi,
4130 fstats->total_bytes_received_lo,
4131 qstats->total_bytes_received_lo);
4132 ADD_64(fstats->total_bytes_transmitted_hi,
4133 qstats->total_bytes_transmitted_hi,
4134 fstats->total_bytes_transmitted_lo,
4135 qstats->total_bytes_transmitted_lo);
4136 ADD_64(fstats->total_unicast_packets_received_hi,
4137 qstats->total_unicast_packets_received_hi,
4138 fstats->total_unicast_packets_received_lo,
4139 qstats->total_unicast_packets_received_lo);
4140 ADD_64(fstats->total_multicast_packets_received_hi,
4141 qstats->total_multicast_packets_received_hi,
4142 fstats->total_multicast_packets_received_lo,
4143 qstats->total_multicast_packets_received_lo);
4144 ADD_64(fstats->total_broadcast_packets_received_hi,
4145 qstats->total_broadcast_packets_received_hi,
4146 fstats->total_broadcast_packets_received_lo,
4147 qstats->total_broadcast_packets_received_lo);
4148 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4149 qstats->total_unicast_packets_transmitted_hi,
4150 fstats->total_unicast_packets_transmitted_lo,
4151 qstats->total_unicast_packets_transmitted_lo);
4152 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4153 qstats->total_multicast_packets_transmitted_hi,
4154 fstats->total_multicast_packets_transmitted_lo,
4155 qstats->total_multicast_packets_transmitted_lo);
4156 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4157 qstats->total_broadcast_packets_transmitted_hi,
4158 fstats->total_broadcast_packets_transmitted_lo,
4159 qstats->total_broadcast_packets_transmitted_lo);
4160 ADD_64(fstats->valid_bytes_received_hi,
4161 qstats->valid_bytes_received_hi,
4162 fstats->valid_bytes_received_lo,
4163 qstats->valid_bytes_received_lo);
4165 ADD_64(estats->error_bytes_received_hi,
4166 qstats->error_bytes_received_hi,
4167 estats->error_bytes_received_lo,
4168 qstats->error_bytes_received_lo);
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 qstats->etherstatsoverrsizepkts_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 qstats->etherstatsoverrsizepkts_lo);
4173 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4174 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4177 ADD_64(fstats->total_bytes_received_hi,
4178 estats->rx_stat_ifhcinbadoctets_hi,
4179 fstats->total_bytes_received_lo,
4180 estats->rx_stat_ifhcinbadoctets_lo);
4182 memcpy(estats, &(fstats->total_bytes_received_hi),
4183 sizeof(struct host_func_stats) - 2*sizeof(u32));
4185 ADD_64(estats->etherstatsoverrsizepkts_hi,
4186 estats->rx_stat_dot3statsframestoolong_hi,
4187 estats->etherstatsoverrsizepkts_lo,
4188 estats->rx_stat_dot3statsframestoolong_lo);
4189 ADD_64(estats->error_bytes_received_hi,
4190 estats->rx_stat_ifhcinbadoctets_hi,
4191 estats->error_bytes_received_lo,
4192 estats->rx_stat_ifhcinbadoctets_lo);
4195 estats->mac_filter_discard =
4196 le32_to_cpu(tport->mac_filter_discard);
4197 estats->xxoverflow_discard =
4198 le32_to_cpu(tport->xxoverflow_discard);
4199 estats->brb_truncate_discard =
4200 le32_to_cpu(tport->brb_truncate_discard);
4201 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4204 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4206 bp->stats_pending = 0;
4211 static void bnx2x_net_stats_update(struct bnx2x *bp)
4213 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4214 struct net_device_stats *nstats = &bp->dev->stats;
4217 nstats->rx_packets =
4218 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4219 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4220 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4222 nstats->tx_packets =
4223 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4224 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4225 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4227 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4229 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4231 nstats->rx_dropped = estats->mac_discard;
4232 for_each_rx_queue(bp, i)
4233 nstats->rx_dropped +=
4234 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4236 nstats->tx_dropped = 0;
4239 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4241 nstats->collisions =
4242 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4244 nstats->rx_length_errors =
4245 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4246 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4247 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4248 bnx2x_hilo(&estats->brb_truncate_hi);
4249 nstats->rx_crc_errors =
4250 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4251 nstats->rx_frame_errors =
4252 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4253 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4254 nstats->rx_missed_errors = estats->xxoverflow_discard;
4256 nstats->rx_errors = nstats->rx_length_errors +
4257 nstats->rx_over_errors +
4258 nstats->rx_crc_errors +
4259 nstats->rx_frame_errors +
4260 nstats->rx_fifo_errors +
4261 nstats->rx_missed_errors;
4263 nstats->tx_aborted_errors =
4264 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4265 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4266 nstats->tx_carrier_errors =
4267 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4268 nstats->tx_fifo_errors = 0;
4269 nstats->tx_heartbeat_errors = 0;
4270 nstats->tx_window_errors = 0;
4272 nstats->tx_errors = nstats->tx_aborted_errors +
4273 nstats->tx_carrier_errors +
4274 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4277 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4279 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4282 estats->driver_xoff = 0;
4283 estats->rx_err_discard_pkt = 0;
4284 estats->rx_skb_alloc_failed = 0;
4285 estats->hw_csum_err = 0;
4286 for_each_rx_queue(bp, i) {
4287 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4289 estats->driver_xoff += qstats->driver_xoff;
4290 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4291 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4292 estats->hw_csum_err += qstats->hw_csum_err;
4296 static void bnx2x_stats_update(struct bnx2x *bp)
4298 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4300 if (*stats_comp != DMAE_COMP_VAL)
4304 bnx2x_hw_stats_update(bp);
4306 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4307 BNX2X_ERR("storm stats were not updated for 3 times\n");
4312 bnx2x_net_stats_update(bp);
4313 bnx2x_drv_stats_update(bp);
4315 if (bp->msglevel & NETIF_MSG_TIMER) {
4316 struct bnx2x_fastpath *fp0_rx = bp->fp;
4317 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4318 struct tstorm_per_client_stats *old_tclient =
4319 &bp->fp->old_tclient;
4320 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4321 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4322 struct net_device_stats *nstats = &bp->dev->stats;
4325 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4326 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4328 bnx2x_tx_avail(fp0_tx),
4329 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4330 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4332 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4333 fp0_rx->rx_comp_cons),
4334 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4335 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4336 "brb truncate %u\n",
4337 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4338 qstats->driver_xoff,
4339 estats->brb_drop_lo, estats->brb_truncate_lo);
4340 printk(KERN_DEBUG "tstats: checksum_discard %u "
4341 "packets_too_big_discard %lu no_buff_discard %lu "
4342 "mac_discard %u mac_filter_discard %u "
4343 "xxovrflow_discard %u brb_truncate_discard %u "
4344 "ttl0_discard %u\n",
4345 le32_to_cpu(old_tclient->checksum_discard),
4346 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4347 bnx2x_hilo(&qstats->no_buff_discard_hi),
4348 estats->mac_discard, estats->mac_filter_discard,
4349 estats->xxoverflow_discard, estats->brb_truncate_discard,
4350 le32_to_cpu(old_tclient->ttl0_discard));
4352 for_each_queue(bp, i) {
4353 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4354 bnx2x_fp(bp, i, tx_pkt),
4355 bnx2x_fp(bp, i, rx_pkt),
4356 bnx2x_fp(bp, i, rx_calls));
4360 bnx2x_hw_stats_post(bp);
4361 bnx2x_storm_stats_post(bp);
4364 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4366 struct dmae_command *dmae;
4368 int loader_idx = PMF_DMAE_C(bp);
4369 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4371 bp->executer_idx = 0;
4373 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4375 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4377 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4379 DMAE_CMD_ENDIANITY_DW_SWAP |
4381 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4382 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4384 if (bp->port.port_stx) {
4386 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4388 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4390 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4391 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4392 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4393 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4394 dmae->dst_addr_hi = 0;
4395 dmae->len = sizeof(struct host_port_stats) >> 2;
4397 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4398 dmae->comp_addr_hi = 0;
4401 dmae->comp_addr_lo =
4402 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403 dmae->comp_addr_hi =
4404 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_val = DMAE_COMP_VAL;
4413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4414 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4415 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4416 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4417 dmae->dst_addr_lo = bp->func_stx >> 2;
4418 dmae->dst_addr_hi = 0;
4419 dmae->len = sizeof(struct host_func_stats) >> 2;
4420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4422 dmae->comp_val = DMAE_COMP_VAL;
4428 static void bnx2x_stats_stop(struct bnx2x *bp)
4432 bnx2x_stats_comp(bp);
4435 update = (bnx2x_hw_stats_update(bp) == 0);
4437 update |= (bnx2x_storm_stats_update(bp) == 0);
4440 bnx2x_net_stats_update(bp);
4443 bnx2x_port_stats_stop(bp);
4445 bnx2x_hw_stats_post(bp);
4446 bnx2x_stats_comp(bp);
4450 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4454 static const struct {
4455 void (*action)(struct bnx2x *bp);
4456 enum bnx2x_stats_state next_state;
4457 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4460 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4461 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4462 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4463 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4466 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4467 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4468 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4469 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4473 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4475 enum bnx2x_stats_state state = bp->stats_state;
4477 bnx2x_stats_stm[state][event].action(bp);
4478 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4480 /* Make sure the state has been "changed" */
4483 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4484 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4485 state, event, bp->stats_state);
4488 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4490 struct dmae_command *dmae;
4491 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4494 if (!bp->port.pmf || !bp->port.port_stx) {
4495 BNX2X_ERR("BUG!\n");
4499 bp->executer_idx = 0;
4501 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4502 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4503 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4504 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4506 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4508 DMAE_CMD_ENDIANITY_DW_SWAP |
4510 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4511 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4512 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4513 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4514 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4515 dmae->dst_addr_hi = 0;
4516 dmae->len = sizeof(struct host_port_stats) >> 2;
4517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4519 dmae->comp_val = DMAE_COMP_VAL;
4522 bnx2x_hw_stats_post(bp);
4523 bnx2x_stats_comp(bp);
4526 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4528 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4529 int port = BP_PORT(bp);
4534 if (!bp->port.pmf || !bp->func_stx) {
4535 BNX2X_ERR("BUG!\n");
4539 /* save our func_stx */
4540 func_stx = bp->func_stx;
4542 for (vn = VN_0; vn < vn_max; vn++) {
4545 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4546 bnx2x_func_stats_init(bp);
4547 bnx2x_hw_stats_post(bp);
4548 bnx2x_stats_comp(bp);
4551 /* restore our func_stx */
4552 bp->func_stx = func_stx;
4555 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4557 struct dmae_command *dmae = &bp->stats_dmae;
4558 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4561 if (!bp->func_stx) {
4562 BNX2X_ERR("BUG!\n");
4566 bp->executer_idx = 0;
4567 memset(dmae, 0, sizeof(struct dmae_command));
4569 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4570 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4571 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4573 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4575 DMAE_CMD_ENDIANITY_DW_SWAP |
4577 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4578 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4579 dmae->src_addr_lo = bp->func_stx >> 2;
4580 dmae->src_addr_hi = 0;
4581 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4582 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4583 dmae->len = sizeof(struct host_func_stats) >> 2;
4584 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4585 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4586 dmae->comp_val = DMAE_COMP_VAL;
4589 bnx2x_hw_stats_post(bp);
4590 bnx2x_stats_comp(bp);
4593 static void bnx2x_stats_init(struct bnx2x *bp)
4595 int port = BP_PORT(bp);
4596 int func = BP_FUNC(bp);
4599 bp->stats_pending = 0;
4600 bp->executer_idx = 0;
4601 bp->stats_counter = 0;
4603 /* port and func stats for management */
4604 if (!BP_NOMCP(bp)) {
4605 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4606 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4609 bp->port.port_stx = 0;
4612 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4613 bp->port.port_stx, bp->func_stx);
4616 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4617 bp->port.old_nig_stats.brb_discard =
4618 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4619 bp->port.old_nig_stats.brb_truncate =
4620 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4621 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4622 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4623 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4624 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4626 /* function stats */
4627 for_each_queue(bp, i) {
4628 struct bnx2x_fastpath *fp = &bp->fp[i];
4630 memset(&fp->old_tclient, 0,
4631 sizeof(struct tstorm_per_client_stats));
4632 memset(&fp->old_uclient, 0,
4633 sizeof(struct ustorm_per_client_stats));
4634 memset(&fp->old_xclient, 0,
4635 sizeof(struct xstorm_per_client_stats));
4636 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4639 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4640 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4642 bp->stats_state = STATS_STATE_DISABLED;
4645 if (bp->port.port_stx)
4646 bnx2x_port_stats_base_init(bp);
4649 bnx2x_func_stats_base_init(bp);
4651 } else if (bp->func_stx)
4652 bnx2x_func_stats_base_update(bp);
4655 static void bnx2x_timer(unsigned long data)
4657 struct bnx2x *bp = (struct bnx2x *) data;
4659 if (!netif_running(bp->dev))
4662 if (atomic_read(&bp->intr_sem) != 0)
4666 struct bnx2x_fastpath *fp = &bp->fp[0];
4670 rc = bnx2x_rx_int(fp, 1000);
4673 if (!BP_NOMCP(bp)) {
4674 int func = BP_FUNC(bp);
4678 ++bp->fw_drv_pulse_wr_seq;
4679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4680 /* TBD - add SYSTEM_TIME */
4681 drv_pulse = bp->fw_drv_pulse_wr_seq;
4682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4685 MCP_PULSE_SEQ_MASK);
4686 /* The delta between driver pulse and mcp response
4687 * should be 1 (before mcp response) or 0 (after mcp response)
4689 if ((drv_pulse != mcp_pulse) &&
4690 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4691 /* someone lost a heartbeat... */
4692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4693 drv_pulse, mcp_pulse);
4697 if ((bp->state == BNX2X_STATE_OPEN) ||
4698 (bp->state == BNX2X_STATE_DISABLED))
4699 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4702 mod_timer(&bp->timer, jiffies + bp->current_interval);
4705 /* end of Statistics */
4710 * nic init service functions
4713 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4715 int port = BP_PORT(bp);
4718 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4719 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4720 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4721 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4723 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4726 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4727 dma_addr_t mapping, int sb_id)
4729 int port = BP_PORT(bp);
4730 int func = BP_FUNC(bp);
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4737 sb->u_status_block.status_block_id = sb_id;
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
4740 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4741 REG_WR(bp, BAR_CSTRORM_INTMEM +
4742 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4747 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4752 section = ((u64)mapping) + offsetof(struct host_status_block,
4754 sb->c_status_block.status_block_id = sb_id;
4756 REG_WR(bp, BAR_CSTRORM_INTMEM +
4757 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4762 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4764 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4768 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4771 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4773 int func = BP_FUNC(bp);
4775 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4776 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4777 sizeof(struct tstorm_def_status_block)/4);
4778 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4780 sizeof(struct cstorm_def_status_block_u)/4);
4781 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4782 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4783 sizeof(struct cstorm_def_status_block_c)/4);
4784 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4785 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4786 sizeof(struct xstorm_def_status_block)/4);
4789 static void bnx2x_init_def_sb(struct bnx2x *bp,
4790 struct host_def_status_block *def_sb,
4791 dma_addr_t mapping, int sb_id)
4793 int port = BP_PORT(bp);
4794 int func = BP_FUNC(bp);
4795 int index, val, reg_offset;
4799 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4800 atten_status_block);
4801 def_sb->atten_status_block.status_block_id = sb_id;
4805 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4806 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4808 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4809 bp->attn_group[index].sig[0] = REG_RD(bp,
4810 reg_offset + 0x10*index);
4811 bp->attn_group[index].sig[1] = REG_RD(bp,
4812 reg_offset + 0x4 + 0x10*index);
4813 bp->attn_group[index].sig[2] = REG_RD(bp,
4814 reg_offset + 0x8 + 0x10*index);
4815 bp->attn_group[index].sig[3] = REG_RD(bp,
4816 reg_offset + 0xc + 0x10*index);
4819 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4820 HC_REG_ATTN_MSG0_ADDR_L);
4822 REG_WR(bp, reg_offset, U64_LO(section));
4823 REG_WR(bp, reg_offset + 4, U64_HI(section));
4825 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4827 val = REG_RD(bp, reg_offset);
4829 REG_WR(bp, reg_offset, val);
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 u_def_status_block);
4834 def_sb->u_def_status_block.status_block_id = sb_id;
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
4837 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4838 REG_WR(bp, BAR_CSTRORM_INTMEM +
4839 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4844 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 c_def_status_block);
4851 def_sb->c_def_status_block.status_block_id = sb_id;
4853 REG_WR(bp, BAR_CSTRORM_INTMEM +
4854 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4855 REG_WR(bp, BAR_CSTRORM_INTMEM +
4856 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4858 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4859 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4861 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4863 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 t_def_status_block);
4868 def_sb->t_def_status_block.status_block_id = sb_id;
4870 REG_WR(bp, BAR_TSTRORM_INTMEM +
4871 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872 REG_WR(bp, BAR_TSTRORM_INTMEM +
4873 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4875 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4876 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4880 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4883 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4884 x_def_status_block);
4885 def_sb->x_def_status_block.status_block_id = sb_id;
4887 REG_WR(bp, BAR_XSTRORM_INTMEM +
4888 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4889 REG_WR(bp, BAR_XSTRORM_INTMEM +
4890 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4892 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4893 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4895 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4896 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4897 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4899 bp->stats_pending = 0;
4900 bp->set_mac_pending = 0;
4902 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4905 static void bnx2x_update_coalesce(struct bnx2x *bp)
4907 int port = BP_PORT(bp);
4910 for_each_queue(bp, i) {
4911 int sb_id = bp->fp[i].sb_id;
4913 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4914 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4915 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4916 U_SB_ETH_RX_CQ_INDEX),
4918 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4919 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4920 U_SB_ETH_RX_CQ_INDEX),
4921 (bp->rx_ticks/12) ? 0 : 1);
4923 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4924 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4925 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4926 C_SB_ETH_TX_CQ_INDEX),
4928 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4929 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4930 C_SB_ETH_TX_CQ_INDEX),
4931 (bp->tx_ticks/12) ? 0 : 1);
4935 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4936 struct bnx2x_fastpath *fp, int last)
4940 for (i = 0; i < last; i++) {
4941 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4942 struct sk_buff *skb = rx_buf->skb;
4945 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4949 if (fp->tpa_state[i] == BNX2X_TPA_START)
4950 pci_unmap_single(bp->pdev,
4951 pci_unmap_addr(rx_buf, mapping),
4952 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4959 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4961 int func = BP_FUNC(bp);
4962 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4963 ETH_MAX_AGGREGATION_QUEUES_E1H;
4964 u16 ring_prod, cqe_ring_prod;
4967 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4969 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4971 if (bp->flags & TPA_ENABLE_FLAG) {
4973 for_each_rx_queue(bp, j) {
4974 struct bnx2x_fastpath *fp = &bp->fp[j];
4976 for (i = 0; i < max_agg_queues; i++) {
4977 fp->tpa_pool[i].skb =
4978 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4979 if (!fp->tpa_pool[i].skb) {
4980 BNX2X_ERR("Failed to allocate TPA "
4981 "skb pool for queue[%d] - "
4982 "disabling TPA on this "
4984 bnx2x_free_tpa_pool(bp, fp, i);
4985 fp->disable_tpa = 1;
4988 pci_unmap_addr_set((struct sw_rx_bd *)
4989 &bp->fp->tpa_pool[i],
4991 fp->tpa_state[i] = BNX2X_TPA_STOP;
4996 for_each_rx_queue(bp, j) {
4997 struct bnx2x_fastpath *fp = &bp->fp[j];
5000 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5001 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5003 /* Mark queue as Rx */
5004 fp->is_rx_queue = 1;
5006 /* "next page" elements initialization */
5008 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5009 struct eth_rx_sge *sge;
5011 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5013 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5014 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5016 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5017 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5020 bnx2x_init_sge_ring_bit_mask(fp);
5023 for (i = 1; i <= NUM_RX_RINGS; i++) {
5024 struct eth_rx_bd *rx_bd;
5026 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5028 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5029 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5031 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5032 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5036 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5037 struct eth_rx_cqe_next_page *nextpg;
5039 nextpg = (struct eth_rx_cqe_next_page *)
5040 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5042 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5043 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5045 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5046 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5049 /* Allocate SGEs and initialize the ring elements */
5050 for (i = 0, ring_prod = 0;
5051 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5053 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5054 BNX2X_ERR("was only able to allocate "
5056 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5057 /* Cleanup already allocated elements */
5058 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5059 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5060 fp->disable_tpa = 1;
5064 ring_prod = NEXT_SGE_IDX(ring_prod);
5066 fp->rx_sge_prod = ring_prod;
5068 /* Allocate BDs and initialize BD ring */
5069 fp->rx_comp_cons = 0;
5070 cqe_ring_prod = ring_prod = 0;
5071 for (i = 0; i < bp->rx_ring_size; i++) {
5072 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5073 BNX2X_ERR("was only able to allocate "
5074 "%d rx skbs on queue[%d]\n", i, j);
5075 fp->eth_q_stats.rx_skb_alloc_failed++;
5078 ring_prod = NEXT_RX_IDX(ring_prod);
5079 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5080 WARN_ON(ring_prod <= i);
5083 fp->rx_bd_prod = ring_prod;
5084 /* must not have more available CQEs than BDs */
5085 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5087 fp->rx_pkt = fp->rx_calls = 0;
5090 * this will generate an interrupt (to the TSTORM)
5091 * must only be done after chip is initialized
5093 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5098 REG_WR(bp, BAR_USTRORM_INTMEM +
5099 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5100 U64_LO(fp->rx_comp_mapping));
5101 REG_WR(bp, BAR_USTRORM_INTMEM +
5102 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5103 U64_HI(fp->rx_comp_mapping));
5107 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5111 for_each_tx_queue(bp, j) {
5112 struct bnx2x_fastpath *fp = &bp->fp[j];
5114 for (i = 1; i <= NUM_TX_RINGS; i++) {
5115 struct eth_tx_next_bd *tx_next_bd =
5116 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5118 tx_next_bd->addr_hi =
5119 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5120 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5121 tx_next_bd->addr_lo =
5122 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5123 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5126 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5127 fp->tx_db.data.zero_fill1 = 0;
5128 fp->tx_db.data.prod = 0;
5130 fp->tx_pkt_prod = 0;
5131 fp->tx_pkt_cons = 0;
5134 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5138 /* clean tx statistics */
5139 for_each_rx_queue(bp, i)
5140 bnx2x_fp(bp, i, tx_pkt) = 0;
5143 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5145 int func = BP_FUNC(bp);
5147 spin_lock_init(&bp->spq_lock);
5149 bp->spq_left = MAX_SPQ_PENDING;
5150 bp->spq_prod_idx = 0;
5151 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5152 bp->spq_prod_bd = bp->spq;
5153 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5155 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5156 U64_LO(bp->spq_mapping));
5158 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5159 U64_HI(bp->spq_mapping));
5161 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5165 static void bnx2x_init_context(struct bnx2x *bp)
5169 for_each_rx_queue(bp, i) {
5170 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5171 struct bnx2x_fastpath *fp = &bp->fp[i];
5172 u8 cl_id = fp->cl_id;
5174 context->ustorm_st_context.common.sb_index_numbers =
5175 BNX2X_RX_SB_INDEX_NUM;
5176 context->ustorm_st_context.common.clientId = cl_id;
5177 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5178 context->ustorm_st_context.common.flags =
5179 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5181 context->ustorm_st_context.common.statistics_counter_id =
5183 context->ustorm_st_context.common.mc_alignment_log_size =
5184 BNX2X_RX_ALIGN_SHIFT;
5185 context->ustorm_st_context.common.bd_buff_size =
5187 context->ustorm_st_context.common.bd_page_base_hi =
5188 U64_HI(fp->rx_desc_mapping);
5189 context->ustorm_st_context.common.bd_page_base_lo =
5190 U64_LO(fp->rx_desc_mapping);
5191 if (!fp->disable_tpa) {
5192 context->ustorm_st_context.common.flags |=
5193 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5194 context->ustorm_st_context.common.sge_buff_size =
5195 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5197 context->ustorm_st_context.common.sge_page_base_hi =
5198 U64_HI(fp->rx_sge_mapping);
5199 context->ustorm_st_context.common.sge_page_base_lo =
5200 U64_LO(fp->rx_sge_mapping);
5202 context->ustorm_st_context.common.max_sges_for_packet =
5203 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5204 context->ustorm_st_context.common.max_sges_for_packet =
5205 ((context->ustorm_st_context.common.
5206 max_sges_for_packet + PAGES_PER_SGE - 1) &
5207 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5210 context->ustorm_ag_context.cdu_usage =
5211 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5212 CDU_REGION_NUMBER_UCM_AG,
5213 ETH_CONNECTION_TYPE);
5215 context->xstorm_ag_context.cdu_reserved =
5216 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5217 CDU_REGION_NUMBER_XCM_AG,
5218 ETH_CONNECTION_TYPE);
5221 for_each_tx_queue(bp, i) {
5222 struct bnx2x_fastpath *fp = &bp->fp[i];
5223 struct eth_context *context =
5224 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5226 context->cstorm_st_context.sb_index_number =
5227 C_SB_ETH_TX_CQ_INDEX;
5228 context->cstorm_st_context.status_block_id = fp->sb_id;
5230 context->xstorm_st_context.tx_bd_page_base_hi =
5231 U64_HI(fp->tx_desc_mapping);
5232 context->xstorm_st_context.tx_bd_page_base_lo =
5233 U64_LO(fp->tx_desc_mapping);
5234 context->xstorm_st_context.statistics_data = (fp->cl_id |
5235 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5239 static void bnx2x_init_ind_table(struct bnx2x *bp)
5241 int func = BP_FUNC(bp);
5244 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5248 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5249 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5250 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5251 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5252 bp->fp->cl_id + (i % bp->num_rx_queues));
5255 static void bnx2x_set_client_config(struct bnx2x *bp)
5257 struct tstorm_eth_client_config tstorm_client = {0};
5258 int port = BP_PORT(bp);
5261 tstorm_client.mtu = bp->dev->mtu;
5262 tstorm_client.config_flags =
5263 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5264 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5266 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5267 tstorm_client.config_flags |=
5268 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5269 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5273 for_each_queue(bp, i) {
5274 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5276 REG_WR(bp, BAR_TSTRORM_INTMEM +
5277 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5278 ((u32 *)&tstorm_client)[0]);
5279 REG_WR(bp, BAR_TSTRORM_INTMEM +
5280 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5281 ((u32 *)&tstorm_client)[1]);
5284 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5285 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5288 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5290 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5291 int mode = bp->rx_mode;
5292 int mask = bp->rx_mode_cl_mask;
5293 int func = BP_FUNC(bp);
5294 int port = BP_PORT(bp);
5296 /* All but management unicast packets should pass to the host as well */
5298 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5303 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5306 case BNX2X_RX_MODE_NONE: /* no Rx */
5307 tstorm_mac_filter.ucast_drop_all = mask;
5308 tstorm_mac_filter.mcast_drop_all = mask;
5309 tstorm_mac_filter.bcast_drop_all = mask;
5312 case BNX2X_RX_MODE_NORMAL:
5313 tstorm_mac_filter.bcast_accept_all = mask;
5316 case BNX2X_RX_MODE_ALLMULTI:
5317 tstorm_mac_filter.mcast_accept_all = mask;
5318 tstorm_mac_filter.bcast_accept_all = mask;
5321 case BNX2X_RX_MODE_PROMISC:
5322 tstorm_mac_filter.ucast_accept_all = mask;
5323 tstorm_mac_filter.mcast_accept_all = mask;
5324 tstorm_mac_filter.bcast_accept_all = mask;
5325 /* pass management unicast packets as well */
5326 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5330 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5335 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5338 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5339 REG_WR(bp, BAR_TSTRORM_INTMEM +
5340 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5341 ((u32 *)&tstorm_mac_filter)[i]);
5343 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5344 ((u32 *)&tstorm_mac_filter)[i]); */
5347 if (mode != BNX2X_RX_MODE_NONE)
5348 bnx2x_set_client_config(bp);
5351 static void bnx2x_init_internal_common(struct bnx2x *bp)
5355 /* Zero this manually as its initialization is
5356 currently missing in the initTool */
5357 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5358 REG_WR(bp, BAR_USTRORM_INTMEM +
5359 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5362 static void bnx2x_init_internal_port(struct bnx2x *bp)
5364 int port = BP_PORT(bp);
5367 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5369 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5370 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5374 static void bnx2x_init_internal_func(struct bnx2x *bp)
5376 struct tstorm_eth_function_common_config tstorm_config = {0};
5377 struct stats_indication_flags stats_flags = {0};
5378 int port = BP_PORT(bp);
5379 int func = BP_FUNC(bp);
5385 tstorm_config.config_flags = MULTI_FLAGS(bp);
5386 tstorm_config.rss_result_mask = MULTI_MASK;
5389 /* Enable TPA if needed */
5390 if (bp->flags & TPA_ENABLE_FLAG)
5391 tstorm_config.config_flags |=
5392 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5395 tstorm_config.config_flags |=
5396 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5398 tstorm_config.leading_client_id = BP_L_ID(bp);
5400 REG_WR(bp, BAR_TSTRORM_INTMEM +
5401 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5402 (*(u32 *)&tstorm_config));
5404 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5405 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5406 bnx2x_set_storm_rx_mode(bp);
5408 for_each_queue(bp, i) {
5409 u8 cl_id = bp->fp[i].cl_id;
5411 /* reset xstorm per client statistics */
5412 offset = BAR_XSTRORM_INTMEM +
5413 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5415 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5416 REG_WR(bp, offset + j*4, 0);
5418 /* reset tstorm per client statistics */
5419 offset = BAR_TSTRORM_INTMEM +
5420 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5422 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5423 REG_WR(bp, offset + j*4, 0);
5425 /* reset ustorm per client statistics */
5426 offset = BAR_USTRORM_INTMEM +
5427 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5429 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5430 REG_WR(bp, offset + j*4, 0);
5433 /* Init statistics related context */
5434 stats_flags.collect_eth = 1;
5436 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5437 ((u32 *)&stats_flags)[0]);
5438 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5439 ((u32 *)&stats_flags)[1]);
5441 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5442 ((u32 *)&stats_flags)[0]);
5443 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5444 ((u32 *)&stats_flags)[1]);
5446 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5447 ((u32 *)&stats_flags)[0]);
5448 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5449 ((u32 *)&stats_flags)[1]);
5451 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5452 ((u32 *)&stats_flags)[0]);
5453 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5454 ((u32 *)&stats_flags)[1]);
5456 REG_WR(bp, BAR_XSTRORM_INTMEM +
5457 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5458 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5459 REG_WR(bp, BAR_XSTRORM_INTMEM +
5460 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5461 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5463 REG_WR(bp, BAR_TSTRORM_INTMEM +
5464 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5465 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5466 REG_WR(bp, BAR_TSTRORM_INTMEM +
5467 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5468 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5470 REG_WR(bp, BAR_USTRORM_INTMEM +
5471 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5472 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5473 REG_WR(bp, BAR_USTRORM_INTMEM +
5474 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5475 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5477 if (CHIP_IS_E1H(bp)) {
5478 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5480 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5482 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5484 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5487 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5491 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5493 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5494 SGE_PAGE_SIZE * PAGES_PER_SGE),
5496 for_each_rx_queue(bp, i) {
5497 struct bnx2x_fastpath *fp = &bp->fp[i];
5499 REG_WR(bp, BAR_USTRORM_INTMEM +
5500 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5501 U64_LO(fp->rx_comp_mapping));
5502 REG_WR(bp, BAR_USTRORM_INTMEM +
5503 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5504 U64_HI(fp->rx_comp_mapping));
5507 REG_WR(bp, BAR_USTRORM_INTMEM +
5508 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5509 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5510 REG_WR(bp, BAR_USTRORM_INTMEM +
5511 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5512 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5514 REG_WR16(bp, BAR_USTRORM_INTMEM +
5515 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5519 /* dropless flow control */
5520 if (CHIP_IS_E1H(bp)) {
5521 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5523 rx_pause.bd_thr_low = 250;
5524 rx_pause.cqe_thr_low = 250;
5526 rx_pause.sge_thr_low = 0;
5527 rx_pause.bd_thr_high = 350;
5528 rx_pause.cqe_thr_high = 350;
5529 rx_pause.sge_thr_high = 0;
5531 for_each_rx_queue(bp, i) {
5532 struct bnx2x_fastpath *fp = &bp->fp[i];
5534 if (!fp->disable_tpa) {
5535 rx_pause.sge_thr_low = 150;
5536 rx_pause.sge_thr_high = 250;
5540 offset = BAR_USTRORM_INTMEM +
5541 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5544 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5546 REG_WR(bp, offset + j*4,
5547 ((u32 *)&rx_pause)[j]);
5551 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5553 /* Init rate shaping and fairness contexts */
5557 /* During init there is no active link
5558 Until link is up, set link rate to 10Gbps */
5559 bp->link_vars.line_speed = SPEED_10000;
5560 bnx2x_init_port_minmax(bp);
5562 bnx2x_calc_vn_weight_sum(bp);
5564 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5565 bnx2x_init_vn_minmax(bp, 2*vn + port);
5567 /* Enable rate shaping and fairness */
5568 bp->cmng.flags.cmng_enables =
5569 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5570 if (bp->vn_weight_sum)
5571 bp->cmng.flags.cmng_enables |=
5572 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5574 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5575 " fairness will be disabled\n");
5577 /* rate shaping and fairness are disabled */
5579 "single function mode minmax will be disabled\n");
5583 /* Store it to internal memory */
5585 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5586 REG_WR(bp, BAR_XSTRORM_INTMEM +
5587 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5588 ((u32 *)(&bp->cmng))[i]);
5591 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5593 switch (load_code) {
5594 case FW_MSG_CODE_DRV_LOAD_COMMON:
5595 bnx2x_init_internal_common(bp);
5598 case FW_MSG_CODE_DRV_LOAD_PORT:
5599 bnx2x_init_internal_port(bp);
5602 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5603 bnx2x_init_internal_func(bp);
5607 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5612 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5616 for_each_queue(bp, i) {
5617 struct bnx2x_fastpath *fp = &bp->fp[i];
5620 fp->state = BNX2X_FP_STATE_CLOSED;
5622 fp->cl_id = BP_L_ID(bp) + i;
5624 fp->sb_id = fp->cl_id + 1;
5626 fp->sb_id = fp->cl_id;
5628 /* Suitable Rx and Tx SBs are served by the same client */
5629 if (i >= bp->num_rx_queues)
5630 fp->cl_id -= bp->num_rx_queues;
5632 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5633 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5634 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5636 bnx2x_update_fpsb_idx(fp);
5639 /* ensure status block indices were read */
5643 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5645 bnx2x_update_dsb_idx(bp);
5646 bnx2x_update_coalesce(bp);
5647 bnx2x_init_rx_rings(bp);
5648 bnx2x_init_tx_ring(bp);
5649 bnx2x_init_sp_ring(bp);
5650 bnx2x_init_context(bp);
5651 bnx2x_init_internal(bp, load_code);
5652 bnx2x_init_ind_table(bp);
5653 bnx2x_stats_init(bp);
5655 /* At this point, we are ready for interrupts */
5656 atomic_set(&bp->intr_sem, 0);
5658 /* flush all before enabling interrupts */
5662 bnx2x_int_enable(bp);
5664 /* Check for SPIO5 */
5665 bnx2x_attn_int_deasserted0(bp,
5666 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5667 AEU_INPUTS_ATTN_BITS_SPIO5);
5670 /* end of nic init */
5673 * gzip service functions
5676 static int bnx2x_gunzip_init(struct bnx2x *bp)
5678 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5679 &bp->gunzip_mapping);
5680 if (bp->gunzip_buf == NULL)
5683 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5684 if (bp->strm == NULL)
5687 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5689 if (bp->strm->workspace == NULL)
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL;
5704 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5705 " un-compression\n", bp->dev->name);
5709 static void bnx2x_gunzip_end(struct bnx2x *bp)
5711 kfree(bp->strm->workspace);
5716 if (bp->gunzip_buf) {
5717 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5718 bp->gunzip_mapping);
5719 bp->gunzip_buf = NULL;
5723 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5727 /* check gzip header */
5728 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5729 BNX2X_ERR("Bad gzip header\n");
5737 if (zbuf[3] & FNAME)
5738 while ((zbuf[n++] != 0) && (n < len));
5740 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5741 bp->strm->avail_in = len - n;
5742 bp->strm->next_out = bp->gunzip_buf;
5743 bp->strm->avail_out = FW_BUF_SIZE;
5745 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);