bnx2x: MDC/MDIO CL45 IOCTLs
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int poll;
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
107
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
112 static int debug;
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
117
118 static struct workqueue_struct *bnx2x_wq;
119
120 enum bnx2x_board_type {
121         BCM57710 = 0,
122         BCM57711 = 1,
123         BCM57711E = 2,
124 };
125
126 /* indexed by board_type, above */
127 static struct {
128         char *name;
129 } board_info[] __devinitdata = {
130         { "Broadcom NetXtreme II BCM57710 XGb" },
131         { "Broadcom NetXtreme II BCM57711 XGb" },
132         { "Broadcom NetXtreme II BCM57711E XGb" }
133 };
134
135
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 static const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184                             int idx)
185 {
186         u32 cmd_offset;
187         int i;
188
189         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
193                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195         }
196         REG_WR(bp, dmae_reg_go_c[idx], 1);
197 }
198
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200                       u32 len32)
201 {
202         struct dmae_command *dmae = &bp->init_dmae;
203         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
204         int cnt = 200;
205
206         if (!bp->dmae_ready) {
207                 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
210                    "  using indirect\n", dst_addr, len32);
211                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212                 return;
213         }
214
215         mutex_lock(&bp->dmae_mutex);
216
217         memset(dmae, 0, sizeof(struct dmae_command));
218
219         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 #ifdef __BIG_ENDIAN
223                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 #else
225                         DMAE_CMD_ENDIANITY_DW_SWAP |
226 #endif
227                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229         dmae->src_addr_lo = U64_LO(dma_addr);
230         dmae->src_addr_hi = U64_HI(dma_addr);
231         dmae->dst_addr_lo = dst_addr >> 2;
232         dmae->dst_addr_hi = 0;
233         dmae->len = len32;
234         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236         dmae->comp_val = DMAE_COMP_VAL;
237
238         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
240                     "dst_addr [%x:%08x (%08x)]\n"
241            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
242            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248
249         *wb_comp = 0;
250
251         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
252
253         udelay(5);
254
255         while (*wb_comp != DMAE_COMP_VAL) {
256                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
258                 if (!cnt) {
259                         BNX2X_ERR("DMAE timeout!\n");
260                         break;
261                 }
262                 cnt--;
263                 /* adjust delay for emulation/FPGA */
264                 if (CHIP_REV_IS_SLOW(bp))
265                         msleep(100);
266                 else
267                         udelay(5);
268         }
269
270         mutex_unlock(&bp->dmae_mutex);
271 }
272
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 {
275         struct dmae_command *dmae = &bp->init_dmae;
276         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
277         int cnt = 200;
278
279         if (!bp->dmae_ready) {
280                 u32 *data = bnx2x_sp(bp, wb_data[0]);
281                 int i;
282
283                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
284                    "  using indirect\n", src_addr, len32);
285                 for (i = 0; i < len32; i++)
286                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287                 return;
288         }
289
290         mutex_lock(&bp->dmae_mutex);
291
292         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293         memset(dmae, 0, sizeof(struct dmae_command));
294
295         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298 #ifdef __BIG_ENDIAN
299                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
300 #else
301                         DMAE_CMD_ENDIANITY_DW_SWAP |
302 #endif
303                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305         dmae->src_addr_lo = src_addr >> 2;
306         dmae->src_addr_hi = 0;
307         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309         dmae->len = len32;
310         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312         dmae->comp_val = DMAE_COMP_VAL;
313
314         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
316                     "dst_addr [%x:%08x (%08x)]\n"
317            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
318            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
321
322         *wb_comp = 0;
323
324         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
325
326         udelay(5);
327
328         while (*wb_comp != DMAE_COMP_VAL) {
329
330                 if (!cnt) {
331                         BNX2X_ERR("DMAE timeout!\n");
332                         break;
333                 }
334                 cnt--;
335                 /* adjust delay for emulation/FPGA */
336                 if (CHIP_REV_IS_SLOW(bp))
337                         msleep(100);
338                 else
339                         udelay(5);
340         }
341         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344
345         mutex_unlock(&bp->dmae_mutex);
346 }
347
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350 {
351         u32 wb_write[2];
352
353         wb_write[0] = val_hi;
354         wb_write[1] = val_lo;
355         REG_WR_DMAE(bp, reg, wb_write, 2);
356 }
357
358 #ifdef USE_WB_RD
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360 {
361         u32 wb_data[2];
362
363         REG_RD_DMAE(bp, reg, wb_data, 2);
364
365         return HILO_U64(wb_data[0], wb_data[1]);
366 }
367 #endif
368
369 static int bnx2x_mc_assert(struct bnx2x *bp)
370 {
371         char last_idx;
372         int i, rc = 0;
373         u32 row0, row1, row2, row3;
374
375         /* XSTORM */
376         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
378         if (last_idx)
379                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381         /* print the asserts */
382         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385                               XSTORM_ASSERT_LIST_OFFSET(i));
386                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395                                   " 0x%08x 0x%08x 0x%08x\n",
396                                   i, row3, row2, row1, row0);
397                         rc++;
398                 } else {
399                         break;
400                 }
401         }
402
403         /* TSTORM */
404         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
406         if (last_idx)
407                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409         /* print the asserts */
410         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413                               TSTORM_ASSERT_LIST_OFFSET(i));
414                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423                                   " 0x%08x 0x%08x 0x%08x\n",
424                                   i, row3, row2, row1, row0);
425                         rc++;
426                 } else {
427                         break;
428                 }
429         }
430
431         /* CSTORM */
432         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
434         if (last_idx)
435                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437         /* print the asserts */
438         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441                               CSTORM_ASSERT_LIST_OFFSET(i));
442                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451                                   " 0x%08x 0x%08x 0x%08x\n",
452                                   i, row3, row2, row1, row0);
453                         rc++;
454                 } else {
455                         break;
456                 }
457         }
458
459         /* USTORM */
460         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461                            USTORM_ASSERT_LIST_INDEX_OFFSET);
462         if (last_idx)
463                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465         /* print the asserts */
466         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469                               USTORM_ASSERT_LIST_OFFSET(i));
470                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
472                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
474                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479                                   " 0x%08x 0x%08x 0x%08x\n",
480                                   i, row3, row2, row1, row0);
481                         rc++;
482                 } else {
483                         break;
484                 }
485         }
486
487         return rc;
488 }
489
490 static void bnx2x_fw_dump(struct bnx2x *bp)
491 {
492         u32 mark, offset;
493         __be32 data[9];
494         int word;
495
496         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497         mark = ((mark + 0x3) & ~0x3);
498         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
499
500         printk(KERN_ERR PFX);
501         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502                 for (word = 0; word < 8; word++)
503                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504                                                   offset + 4*word));
505                 data[8] = 0x0;
506                 printk(KERN_CONT "%s", (char *)data);
507         }
508         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509                 for (word = 0; word < 8; word++)
510                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511                                                   offset + 4*word));
512                 data[8] = 0x0;
513                 printk(KERN_CONT "%s", (char *)data);
514         }
515         printk(KERN_ERR PFX "end of fw dump\n");
516 }
517
518 static void bnx2x_panic_dump(struct bnx2x *bp)
519 {
520         int i;
521         u16 j, start, end;
522
523         bp->stats_state = STATS_STATE_DISABLED;
524         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
526         BNX2X_ERR("begin crash dump -----------------\n");
527
528         /* Indices */
529         /* Common */
530         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
531                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
532                   "  spq_prod_idx(%u)\n",
533                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536         /* Rx */
537         for_each_rx_queue(bp, i) {
538                 struct bnx2x_fastpath *fp = &bp->fp[i];
539
540                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
541                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
542                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
543                           i, fp->rx_bd_prod, fp->rx_bd_cons,
544                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
547                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
548                           fp->rx_sge_prod, fp->last_max_sge,
549                           le16_to_cpu(fp->fp_u_idx),
550                           fp->status_blk->u_status_block.status_block_index);
551         }
552
553         /* Tx */
554         for_each_tx_queue(bp, i) {
555                 struct bnx2x_fastpath *fp = &bp->fp[i];
556
557                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
558                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
559                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
562                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563                           fp->status_blk->c_status_block.status_block_index,
564                           fp->tx_db.data.prod);
565         }
566
567         /* Rings */
568         /* Rx */
569         for_each_rx_queue(bp, i) {
570                 struct bnx2x_fastpath *fp = &bp->fp[i];
571
572                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574                 for (j = start; j != end; j = RX_BD(j + 1)) {
575                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
578                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
579                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
580                 }
581
582                 start = RX_SGE(fp->rx_sge_prod);
583                 end = RX_SGE(fp->last_max_sge);
584                 for (j = start; j != end; j = RX_SGE(j + 1)) {
585                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
588                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
589                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
590                 }
591
592                 start = RCQ_BD(fp->rx_comp_cons - 10);
593                 end = RCQ_BD(fp->rx_comp_cons + 503);
594                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
597                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
599                 }
600         }
601
602         /* Tx */
603         for_each_tx_queue(bp, i) {
604                 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608                 for (j = start; j != end; j = TX_BD(j + 1)) {
609                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
611                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612                                   i, j, sw_bd->skb, sw_bd->first_bd);
613                 }
614
615                 start = TX_BD(fp->tx_bd_cons - 10);
616                 end = TX_BD(fp->tx_bd_cons + 254);
617                 for (j = start; j != end; j = TX_BD(j + 1)) {
618                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
620                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
622                 }
623         }
624
625         bnx2x_fw_dump(bp);
626         bnx2x_mc_assert(bp);
627         BNX2X_ERR("end crash dump -----------------\n");
628 }
629
630 static void bnx2x_int_enable(struct bnx2x *bp)
631 {
632         int port = BP_PORT(bp);
633         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634         u32 val = REG_RD(bp, addr);
635         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
637
638         if (msix) {
639                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                          HC_CONFIG_0_REG_INT_LINE_EN_0);
641                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643         } else if (msi) {
644                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
648         } else {
649                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
652                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
653
654                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655                    val, port, addr);
656
657                 REG_WR(bp, addr, val);
658
659                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660         }
661
662         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
663            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
664
665         REG_WR(bp, addr, val);
666         /*
667          * Ensure that HC_CONFIG is written before leading/trailing edge config
668          */
669         mmiowb();
670         barrier();
671
672         if (CHIP_IS_E1H(bp)) {
673                 /* init leading/trailing edge */
674                 if (IS_E1HMF(bp)) {
675                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
676                         if (bp->port.pmf)
677                                 /* enable nig and gpio3 attention */
678                                 val |= 0x1100;
679                 } else
680                         val = 0xffff;
681
682                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684         }
685
686         /* Make sure that interrupts are indeed enabled from here on */
687         mmiowb();
688 }
689
690 static void bnx2x_int_disable(struct bnx2x *bp)
691 {
692         int port = BP_PORT(bp);
693         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694         u32 val = REG_RD(bp, addr);
695
696         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
699                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702            val, port, addr);
703
704         /* flush all outstanding writes */
705         mmiowb();
706
707         REG_WR(bp, addr, val);
708         if (REG_RD(bp, addr) != val)
709                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
710
711 }
712
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
714 {
715         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
716         int i, offset;
717
718         /* disable interrupt handling */
719         atomic_inc(&bp->intr_sem);
720         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
722         if (disable_hw)
723                 /* prevent the HW from sending interrupts */
724                 bnx2x_int_disable(bp);
725
726         /* make sure all ISRs are done */
727         if (msix) {
728                 synchronize_irq(bp->msix_table[0].vector);
729                 offset = 1;
730                 for_each_queue(bp, i)
731                         synchronize_irq(bp->msix_table[i + offset].vector);
732         } else
733                 synchronize_irq(bp->pdev->irq);
734
735         /* make sure sp_task is not running */
736         cancel_delayed_work(&bp->sp_task);
737         flush_workqueue(bnx2x_wq);
738 }
739
740 /* fast path */
741
742 /*
743  * General service functions
744  */
745
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747                                 u8 storm, u16 index, u8 op, u8 update)
748 {
749         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750                        COMMAND_REG_INT_ACK);
751         struct igu_ack_register igu_ack;
752
753         igu_ack.status_block_index = index;
754         igu_ack.sb_id_and_flags =
755                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
760         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761            (*(u32 *)&igu_ack), hc_addr);
762         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
763
764         /* Make sure that ACK is written */
765         mmiowb();
766         barrier();
767 }
768
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770 {
771         struct host_status_block *fpsb = fp->status_blk;
772         u16 rc = 0;
773
774         barrier(); /* status block is written to by the chip */
775         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777                 rc |= 1;
778         }
779         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781                 rc |= 2;
782         }
783         return rc;
784 }
785
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
787 {
788         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789                        COMMAND_REG_SIMD_MASK);
790         u32 result = REG_RD(bp, hc_addr);
791
792         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793            result, hc_addr);
794
795         return result;
796 }
797
798
799 /*
800  * fast path service functions
801  */
802
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 {
805         /* Tell compiler that consumer and producer can change */
806         barrier();
807         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
808 }
809
810 /* free skb in the packet ring at pos idx
811  * return idx of last bd freed
812  */
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814                              u16 idx)
815 {
816         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817         struct eth_tx_start_bd *tx_start_bd;
818         struct eth_tx_bd *tx_data_bd;
819         struct sk_buff *skb = tx_buf->skb;
820         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821         int nbd;
822
823         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
824            idx, tx_buf, skb);
825
826         /* unmap first bd */
827         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
831
832         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835                 BNX2X_ERR("BAD nbd!\n");
836                 bnx2x_panic();
837         }
838 #endif
839         new_cons = nbd + tx_buf->first_bd;
840
841         /* Get the next bd */
842         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         /* Skip a parse bd... */
845         --nbd;
846         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848         /* ...and the TSO split header bd since they have no mapping */
849         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850                 --nbd;
851                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852         }
853
854         /* now free frags */
855         while (nbd > 0) {
856
857                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
861                 if (--nbd)
862                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863         }
864
865         /* release skb */
866         WARN_ON(!skb);
867         dev_kfree_skb_any(skb);
868         tx_buf->first_bd = 0;
869         tx_buf->skb = NULL;
870
871         return new_cons;
872 }
873
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
875 {
876         s16 used;
877         u16 prod;
878         u16 cons;
879
880         barrier(); /* Tell compiler that prod and cons can change */
881         prod = fp->tx_bd_prod;
882         cons = fp->tx_bd_cons;
883
884         /* NUM_TX_RINGS = number of "next-page" entries
885            It will be used as a threshold */
886         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
887
888 #ifdef BNX2X_STOP_ON_ERROR
889         WARN_ON(used < 0);
890         WARN_ON(used > fp->bp->tx_ring_size);
891         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
892 #endif
893
894         return (s16)(fp->bp->tx_ring_size) - used;
895 }
896
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
898 {
899         struct bnx2x *bp = fp->bp;
900         struct netdev_queue *txq;
901         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902         int done = 0;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         if (unlikely(bp->panic))
906                 return;
907 #endif
908
909         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911         sw_cons = fp->tx_pkt_cons;
912
913         while (sw_cons != hw_cons) {
914                 u16 pkt_cons;
915
916                 pkt_cons = TX_BD(sw_cons);
917
918                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
920                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
921                    hw_cons, sw_cons, pkt_cons);
922
923 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
924                         rmb();
925                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926                 }
927 */
928                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929                 sw_cons++;
930                 done++;
931         }
932
933         fp->tx_pkt_cons = sw_cons;
934         fp->tx_bd_cons = bd_cons;
935
936         /* TBD need a thresh? */
937         if (unlikely(netif_tx_queue_stopped(txq))) {
938
939                 /* Need to make the tx_bd_cons update visible to start_xmit()
940                  * before checking for netif_tx_queue_stopped().  Without the
941                  * memory barrier, there is a small possibility that
942                  * start_xmit() will miss it and cause the queue to be stopped
943                  * forever.
944                  */
945                 smp_mb();
946
947                 if ((netif_tx_queue_stopped(txq)) &&
948                     (bp->state == BNX2X_STATE_OPEN) &&
949                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950                         netif_tx_wake_queue(txq);
951         }
952 }
953
954
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956                            union eth_rx_cqe *rr_cqe)
957 {
958         struct bnx2x *bp = fp->bp;
959         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
962         DP(BNX2X_MSG_SP,
963            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
964            fp->index, cid, command, bp->state,
965            rr_cqe->ramrod_cqe.ramrod_type);
966
967         bp->spq_left++;
968
969         if (fp->index) {
970                 switch (command | fp->state) {
971                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972                                                 BNX2X_FP_STATE_OPENING):
973                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974                            cid);
975                         fp->state = BNX2X_FP_STATE_OPEN;
976                         break;
977
978                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980                            cid);
981                         fp->state = BNX2X_FP_STATE_HALTED;
982                         break;
983
984                 default:
985                         BNX2X_ERR("unexpected MC reply (%d)  "
986                                   "fp->state is %x\n", command, fp->state);
987                         break;
988                 }
989                 mb(); /* force bnx2x_wait_ramrod() to see the change */
990                 return;
991         }
992
993         switch (command | bp->state) {
994         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996                 bp->state = BNX2X_STATE_OPEN;
997                 break;
998
999         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002                 fp->state = BNX2X_FP_STATE_HALTED;
1003                 break;
1004
1005         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1008                 break;
1009
1010
1011         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014                 bp->set_mac_pending = 0;
1015                 break;
1016
1017         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1020                 break;
1021
1022         default:
1023                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1024                           command, bp->state);
1025                 break;
1026         }
1027         mb(); /* force bnx2x_wait_ramrod() to see the change */
1028 }
1029
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034         struct page *page = sw_buf->page;
1035         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037         /* Skip "next page" elements */
1038         if (!page)
1039                 return;
1040
1041         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043         __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045         sw_buf->page = NULL;
1046         sge->addr_hi = 0;
1047         sge->addr_lo = 0;
1048 }
1049
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051                                            struct bnx2x_fastpath *fp, int last)
1052 {
1053         int i;
1054
1055         for (i = 0; i < last; i++)
1056                 bnx2x_free_rx_sge(bp, fp, i);
1057 }
1058
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060                                      struct bnx2x_fastpath *fp, u16 index)
1061 {
1062         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065         dma_addr_t mapping;
1066
1067         if (unlikely(page == NULL))
1068                 return -ENOMEM;
1069
1070         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071                                PCI_DMA_FROMDEVICE);
1072         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074                 return -ENOMEM;
1075         }
1076
1077         sw_buf->page = page;
1078         pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083         return 0;
1084 }
1085
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087                                      struct bnx2x_fastpath *fp, u16 index)
1088 {
1089         struct sk_buff *skb;
1090         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092         dma_addr_t mapping;
1093
1094         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095         if (unlikely(skb == NULL))
1096                 return -ENOMEM;
1097
1098         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099                                  PCI_DMA_FROMDEVICE);
1100         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1101                 dev_kfree_skb(skb);
1102                 return -ENOMEM;
1103         }
1104
1105         rx_buf->skb = skb;
1106         pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111         return 0;
1112 }
1113
1114 /* note that we are not allocating a new skb,
1115  * we are just moving one from cons to prod
1116  * we are not creating a new mapping,
1117  * so there is no need to check for dma_mapping_error().
1118  */
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120                                struct sk_buff *skb, u16 cons, u16 prod)
1121 {
1122         struct bnx2x *bp = fp->bp;
1123         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128         pci_dma_sync_single_for_device(bp->pdev,
1129                                        pci_unmap_addr(cons_rx_buf, mapping),
1130                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1131
1132         prod_rx_buf->skb = cons_rx_buf->skb;
1133         pci_unmap_addr_set(prod_rx_buf, mapping,
1134                            pci_unmap_addr(cons_rx_buf, mapping));
1135         *prod_bd = *cons_bd;
1136 }
1137
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139                                              u16 idx)
1140 {
1141         u16 last_max = fp->last_max_sge;
1142
1143         if (SUB_S16(idx, last_max) > 0)
1144                 fp->last_max_sge = idx;
1145 }
1146
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148 {
1149         int i, j;
1150
1151         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152                 int idx = RX_SGE_CNT * i - 1;
1153
1154                 for (j = 0; j < 2; j++) {
1155                         SGE_MASK_CLEAR_BIT(fp, idx);
1156                         idx--;
1157                 }
1158         }
1159 }
1160
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162                                   struct eth_fast_path_rx_cqe *fp_cqe)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1167                       SGE_PAGE_SHIFT;
1168         u16 last_max, last_elem, first_elem;
1169         u16 delta = 0;
1170         u16 i;
1171
1172         if (!sge_len)
1173                 return;
1174
1175         /* First mark all used pages */
1176         for (i = 0; i < sge_len; i++)
1177                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182         /* Here we assume that the last SGE index is the biggest */
1183         prefetch((void *)(fp->sge_mask));
1184         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186         last_max = RX_SGE(fp->last_max_sge);
1187         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190         /* If ring is not full */
1191         if (last_elem + 1 != first_elem)
1192                 last_elem++;
1193
1194         /* Now update the prod */
1195         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196                 if (likely(fp->sge_mask[i]))
1197                         break;
1198
1199                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200                 delta += RX_SGE_MASK_ELEM_SZ;
1201         }
1202
1203         if (delta > 0) {
1204                 fp->rx_sge_prod += delta;
1205                 /* clear page-end entries */
1206                 bnx2x_clear_sge_mask_next_elems(fp);
1207         }
1208
1209         DP(NETIF_MSG_RX_STATUS,
1210            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1211            fp->last_max_sge, fp->rx_sge_prod);
1212 }
1213
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215 {
1216         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217         memset(fp->sge_mask, 0xff,
1218                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
1220         /* Clear the two last indices in the page to 1:
1221            these are the indices that correspond to the "next" element,
1222            hence will never be indicated and should be removed from
1223            the calculations. */
1224         bnx2x_clear_sge_mask_next_elems(fp);
1225 }
1226
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228                             struct sk_buff *skb, u16 cons, u16 prod)
1229 {
1230         struct bnx2x *bp = fp->bp;
1231         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234         dma_addr_t mapping;
1235
1236         /* move empty skb from pool to prod and map it */
1237         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242         /* move partial skb from cons to pool (don't unmap yet) */
1243         fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245         /* mark bin state as start - print error if current state != stop */
1246         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249         fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251         /* point prod_bd to new skb */
1252         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255 #ifdef BNX2X_STOP_ON_ERROR
1256         fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259 #else
1260         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261 #endif
1262            fp->tpa_queue_used);
1263 #endif
1264 }
1265
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                                struct sk_buff *skb,
1268                                struct eth_fast_path_rx_cqe *fp_cqe,
1269                                u16 cqe_idx)
1270 {
1271         struct sw_rx_page *rx_pg, old_rx_pg;
1272         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273         u32 i, frag_len, frag_size, pages;
1274         int err;
1275         int j;
1276
1277         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1279
1280         /* This is needed in order to enable forwarding support */
1281         if (frag_size)
1282                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283                                                max(frag_size, (u32)len_on_bd));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         if (pages >
1287             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289                           pages, cqe_idx);
1290                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1291                           fp_cqe->pkt_len, len_on_bd);
1292                 bnx2x_panic();
1293                 return -EINVAL;
1294         }
1295 #endif
1296
1297         /* Run through the SGL and compose the fragmented skb */
1298         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301                 /* FW gives the indices of the SGE as if the ring is an array
1302                    (meaning that "next" element will consume 2 indices) */
1303                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304                 rx_pg = &fp->rx_page_ring[sge_idx];
1305                 old_rx_pg = *rx_pg;
1306
1307                 /* If we fail to allocate a substitute page, we simply stop
1308                    where we are and drop the whole packet */
1309                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310                 if (unlikely(err)) {
1311                         fp->eth_q_stats.rx_skb_alloc_failed++;
1312                         return err;
1313                 }
1314
1315                 /* Unmap the page as we r going to pass it to the stack */
1316                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1318
1319                 /* Add one frag and update the appropriate fields in the skb */
1320                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322                 skb->data_len += frag_len;
1323                 skb->truesize += frag_len;
1324                 skb->len += frag_len;
1325
1326                 frag_size -= frag_len;
1327         }
1328
1329         return 0;
1330 }
1331
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334                            u16 cqe_idx)
1335 {
1336         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337         struct sk_buff *skb = rx_buf->skb;
1338         /* alloc new skb */
1339         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341         /* Unmap skb in the pool anyway, as we are going to change
1342            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343            fails. */
1344         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1346
1347         if (likely(new_skb)) {
1348                 /* fix ip xsum and give it to the stack */
1349                 /* (no need to map the new skb) */
1350 #ifdef BCM_VLAN
1351                 int is_vlan_cqe =
1352                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353                          PARSING_FLAGS_VLAN);
1354                 int is_not_hwaccel_vlan_cqe =
1355                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356 #endif
1357
1358                 prefetch(skb);
1359                 prefetch(((char *)(skb)) + 128);
1360
1361 #ifdef BNX2X_STOP_ON_ERROR
1362                 if (pad + len > bp->rx_buf_size) {
1363                         BNX2X_ERR("skb_put is about to fail...  "
1364                                   "pad %d  len %d  rx_buf_size %d\n",
1365                                   pad, len, bp->rx_buf_size);
1366                         bnx2x_panic();
1367                         return;
1368                 }
1369 #endif
1370
1371                 skb_reserve(skb, pad);
1372                 skb_put(skb, len);
1373
1374                 skb->protocol = eth_type_trans(skb, bp->dev);
1375                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377                 {
1378                         struct iphdr *iph;
1379
1380                         iph = (struct iphdr *)skb->data;
1381 #ifdef BCM_VLAN
1382                         /* If there is no Rx VLAN offloading -
1383                            take VLAN tag into an account */
1384                         if (unlikely(is_not_hwaccel_vlan_cqe))
1385                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386 #endif
1387                         iph->check = 0;
1388                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389                 }
1390
1391                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392                                          &cqe->fast_path_cqe, cqe_idx)) {
1393 #ifdef BCM_VLAN
1394                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395                             (!is_not_hwaccel_vlan_cqe))
1396                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397                                                 le16_to_cpu(cqe->fast_path_cqe.
1398                                                             vlan_tag));
1399                         else
1400 #endif
1401                                 netif_receive_skb(skb);
1402                 } else {
1403                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404                            " - dropping packet!\n");
1405                         dev_kfree_skb(skb);
1406                 }
1407
1408
1409                 /* put new skb in bin */
1410                 fp->tpa_pool[queue].skb = new_skb;
1411
1412         } else {
1413                 /* else drop the packet and keep the buffer in the bin */
1414                 DP(NETIF_MSG_RX_STATUS,
1415                    "Failed to allocate new skb - dropping packet!\n");
1416                 fp->eth_q_stats.rx_skb_alloc_failed++;
1417         }
1418
1419         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420 }
1421
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423                                         struct bnx2x_fastpath *fp,
1424                                         u16 bd_prod, u16 rx_comp_prod,
1425                                         u16 rx_sge_prod)
1426 {
1427         struct ustorm_eth_rx_producers rx_prods = {0};
1428         int i;
1429
1430         /* Update producers */
1431         rx_prods.bd_prod = bd_prod;
1432         rx_prods.cqe_prod = rx_comp_prod;
1433         rx_prods.sge_prod = rx_sge_prod;
1434
1435         /*
1436          * Make sure that the BD and SGE data is updated before updating the
1437          * producers since FW might read the BD/SGE right after the producer
1438          * is updated.
1439          * This is only applicable for weak-ordered memory model archs such
1440          * as IA-64. The following barrier is also mandatory since FW will
1441          * assumes BDs must have buffers.
1442          */
1443         wmb();
1444
1445         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446                 REG_WR(bp, BAR_USTRORM_INTMEM +
1447                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448                        ((u32 *)&rx_prods)[i]);
1449
1450         mmiowb(); /* keep prod updates ordered */
1451
1452         DP(NETIF_MSG_RX_STATUS,
1453            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1454            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1455 }
1456
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458 {
1459         struct bnx2x *bp = fp->bp;
1460         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462         int rx_pkt = 0;
1463
1464 #ifdef BNX2X_STOP_ON_ERROR
1465         if (unlikely(bp->panic))
1466                 return 0;
1467 #endif
1468
1469         /* CQ "next element" is of the size of the regular element,
1470            that's why it's ok here */
1471         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473                 hw_comp_cons++;
1474
1475         bd_cons = fp->rx_bd_cons;
1476         bd_prod = fp->rx_bd_prod;
1477         bd_prod_fw = bd_prod;
1478         sw_comp_cons = fp->rx_comp_cons;
1479         sw_comp_prod = fp->rx_comp_prod;
1480
1481         /* Memory barrier necessary as speculative reads of the rx
1482          * buffer can be ahead of the index in the status block
1483          */
1484         rmb();
1485
1486         DP(NETIF_MSG_RX_STATUS,
1487            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1488            fp->index, hw_comp_cons, sw_comp_cons);
1489
1490         while (sw_comp_cons != hw_comp_cons) {
1491                 struct sw_rx_bd *rx_buf = NULL;
1492                 struct sk_buff *skb;
1493                 union eth_rx_cqe *cqe;
1494                 u8 cqe_fp_flags;
1495                 u16 len, pad;
1496
1497                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498                 bd_prod = RX_BD(bd_prod);
1499                 bd_cons = RX_BD(bd_cons);
1500
1501                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1503
1504                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1505                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1506                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1510
1511                 /* is this a slowpath msg? */
1512                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513                         bnx2x_sp_event(fp, cqe);
1514                         goto next_cqe;
1515
1516                 /* this is an rx packet */
1517                 } else {
1518                         rx_buf = &fp->rx_buf_ring[bd_cons];
1519                         skb = rx_buf->skb;
1520                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521                         pad = cqe->fast_path_cqe.placement_offset;
1522
1523                         /* If CQE is marked both TPA_START and TPA_END
1524                            it is a non-TPA CQE */
1525                         if ((!fp->disable_tpa) &&
1526                             (TPA_TYPE(cqe_fp_flags) !=
1527                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1528                                 u16 queue = cqe->fast_path_cqe.queue_index;
1529
1530                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531                                         DP(NETIF_MSG_RX_STATUS,
1532                                            "calling tpa_start on queue %d\n",
1533                                            queue);
1534
1535                                         bnx2x_tpa_start(fp, queue, skb,
1536                                                         bd_cons, bd_prod);
1537                                         goto next_rx;
1538                                 }
1539
1540                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541                                         DP(NETIF_MSG_RX_STATUS,
1542                                            "calling tpa_stop on queue %d\n",
1543                                            queue);
1544
1545                                         if (!BNX2X_RX_SUM_FIX(cqe))
1546                                                 BNX2X_ERR("STOP on none TCP "
1547                                                           "data\n");
1548
1549                                         /* This is a size of the linear data
1550                                            on this skb */
1551                                         len = le16_to_cpu(cqe->fast_path_cqe.
1552                                                                 len_on_bd);
1553                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1554                                                     len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1556                                         if (bp->panic)
1557                                                 return 0;
1558 #endif
1559
1560                                         bnx2x_update_sge_prod(fp,
1561                                                         &cqe->fast_path_cqe);
1562                                         goto next_cqe;
1563                                 }
1564                         }
1565
1566                         pci_dma_sync_single_for_device(bp->pdev,
1567                                         pci_unmap_addr(rx_buf, mapping),
1568                                                        pad + RX_COPY_THRESH,
1569                                                        PCI_DMA_FROMDEVICE);
1570                         prefetch(skb);
1571                         prefetch(((char *)(skb)) + 128);
1572
1573                         /* is this an error packet? */
1574                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575                                 DP(NETIF_MSG_RX_ERR,
1576                                    "ERROR  flags %x  rx packet %u\n",
1577                                    cqe_fp_flags, sw_comp_cons);
1578                                 fp->eth_q_stats.rx_err_discard_pkt++;
1579                                 goto reuse_rx;
1580                         }
1581
1582                         /* Since we don't have a jumbo ring
1583                          * copy small packets if mtu > 1500
1584                          */
1585                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586                             (len <= RX_COPY_THRESH)) {
1587                                 struct sk_buff *new_skb;
1588
1589                                 new_skb = netdev_alloc_skb(bp->dev,
1590                                                            len + pad);
1591                                 if (new_skb == NULL) {
1592                                         DP(NETIF_MSG_RX_ERR,
1593                                            "ERROR  packet dropped "
1594                                            "because of alloc failure\n");
1595                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1596                                         goto reuse_rx;
1597                                 }
1598
1599                                 /* aligned copy */
1600                                 skb_copy_from_linear_data_offset(skb, pad,
1601                                                     new_skb->data + pad, len);
1602                                 skb_reserve(new_skb, pad);
1603                                 skb_put(new_skb, len);
1604
1605                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607                                 skb = new_skb;
1608
1609                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610                                 pci_unmap_single(bp->pdev,
1611                                         pci_unmap_addr(rx_buf, mapping),
1612                                                  bp->rx_buf_size,
1613                                                  PCI_DMA_FROMDEVICE);
1614                                 skb_reserve(skb, pad);
1615                                 skb_put(skb, len);
1616
1617                         } else {
1618                                 DP(NETIF_MSG_RX_ERR,
1619                                    "ERROR  packet dropped because "
1620                                    "of alloc failure\n");
1621                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1622 reuse_rx:
1623                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624                                 goto next_rx;
1625                         }
1626
1627                         skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629                         skb->ip_summed = CHECKSUM_NONE;
1630                         if (bp->rx_csum) {
1631                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1633                                 else
1634                                         fp->eth_q_stats.hw_csum_err++;
1635                         }
1636                 }
1637
1638                 skb_record_rx_queue(skb, fp->index);
1639 #ifdef BCM_VLAN
1640                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642                      PARSING_FLAGS_VLAN))
1643                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645                 else
1646 #endif
1647                         netif_receive_skb(skb);
1648
1649
1650 next_rx:
1651                 rx_buf->skb = NULL;
1652
1653                 bd_cons = NEXT_RX_IDX(bd_cons);
1654                 bd_prod = NEXT_RX_IDX(bd_prod);
1655                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656                 rx_pkt++;
1657 next_cqe:
1658                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1660
1661                 if (rx_pkt == budget)
1662                         break;
1663         } /* while */
1664
1665         fp->rx_bd_cons = bd_cons;
1666         fp->rx_bd_prod = bd_prod_fw;
1667         fp->rx_comp_cons = sw_comp_cons;
1668         fp->rx_comp_prod = sw_comp_prod;
1669
1670         /* Update producers */
1671         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672                              fp->rx_sge_prod);
1673
1674         fp->rx_pkt += rx_pkt;
1675         fp->rx_calls++;
1676
1677         return rx_pkt;
1678 }
1679
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681 {
1682         struct bnx2x_fastpath *fp = fp_cookie;
1683         struct bnx2x *bp = fp->bp;
1684
1685         /* Return here if interrupt is disabled */
1686         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688                 return IRQ_HANDLED;
1689         }
1690
1691         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692            fp->index, fp->sb_id);
1693         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1694
1695 #ifdef BNX2X_STOP_ON_ERROR
1696         if (unlikely(bp->panic))
1697                 return IRQ_HANDLED;
1698 #endif
1699         /* Handle Rx or Tx according to MSI-X vector */
1700         if (fp->is_rx_queue) {
1701                 prefetch(fp->rx_cons_sb);
1702                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1703
1704                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1705
1706         } else {
1707                 prefetch(fp->tx_cons_sb);
1708                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710                 bnx2x_update_fpsb_idx(fp);
1711                 rmb();
1712                 bnx2x_tx_int(fp);
1713
1714                 /* Re-enable interrupts */
1715                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719         }
1720
1721         return IRQ_HANDLED;
1722 }
1723
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725 {
1726         struct bnx2x *bp = netdev_priv(dev_instance);
1727         u16 status = bnx2x_ack_int(bp);
1728         u16 mask;
1729         int i;
1730
1731         /* Return here if interrupt is shared and it's not for us */
1732         if (unlikely(status == 0)) {
1733                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734                 return IRQ_NONE;
1735         }
1736         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1737
1738         /* Return here if interrupt is disabled */
1739         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741                 return IRQ_HANDLED;
1742         }
1743
1744 #ifdef BNX2X_STOP_ON_ERROR
1745         if (unlikely(bp->panic))
1746                 return IRQ_HANDLED;
1747 #endif
1748
1749         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750                 struct bnx2x_fastpath *fp = &bp->fp[i];
1751
1752                 mask = 0x2 << fp->sb_id;
1753                 if (status & mask) {
1754                         /* Handle Rx or Tx according to SB id */
1755                         if (fp->is_rx_queue) {
1756                                 prefetch(fp->rx_cons_sb);
1757                                 prefetch(&fp->status_blk->u_status_block.
1758                                                         status_block_index);
1759
1760                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1761
1762                         } else {
1763                                 prefetch(fp->tx_cons_sb);
1764                                 prefetch(&fp->status_blk->c_status_block.
1765                                                         status_block_index);
1766
1767                                 bnx2x_update_fpsb_idx(fp);
1768                                 rmb();
1769                                 bnx2x_tx_int(fp);
1770
1771                                 /* Re-enable interrupts */
1772                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773                                              le16_to_cpu(fp->fp_u_idx),
1774                                              IGU_INT_NOP, 1);
1775                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776                                              le16_to_cpu(fp->fp_c_idx),
1777                                              IGU_INT_ENABLE, 1);
1778                         }
1779                         status &= ~mask;
1780                 }
1781         }
1782
1783
1784         if (unlikely(status & 0x1)) {
1785                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1786
1787                 status &= ~0x1;
1788                 if (!status)
1789                         return IRQ_HANDLED;
1790         }
1791
1792         if (status)
1793                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794                    status);
1795
1796         return IRQ_HANDLED;
1797 }
1798
1799 /* end of fast path */
1800
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1802
1803 /* Link */
1804
1805 /*
1806  * General service functions
1807  */
1808
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1810 {
1811         u32 lock_status;
1812         u32 resource_bit = (1 << resource);
1813         int func = BP_FUNC(bp);
1814         u32 hw_lock_control_reg;
1815         int cnt;
1816
1817         /* Validating that the resource is within range */
1818         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819                 DP(NETIF_MSG_HW,
1820                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822                 return -EINVAL;
1823         }
1824
1825         if (func <= 5) {
1826                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827         } else {
1828                 hw_lock_control_reg =
1829                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830         }
1831
1832         /* Validating that the resource is not already taken */
1833         lock_status = REG_RD(bp, hw_lock_control_reg);
1834         if (lock_status & resource_bit) {
1835                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1836                    lock_status, resource_bit);
1837                 return -EEXIST;
1838         }
1839
1840         /* Try for 5 second every 5ms */
1841         for (cnt = 0; cnt < 1000; cnt++) {
1842                 /* Try to acquire the lock */
1843                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844                 lock_status = REG_RD(bp, hw_lock_control_reg);
1845                 if (lock_status & resource_bit)
1846                         return 0;
1847
1848                 msleep(5);
1849         }
1850         DP(NETIF_MSG_HW, "Timeout\n");
1851         return -EAGAIN;
1852 }
1853
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1855 {
1856         u32 lock_status;
1857         u32 resource_bit = (1 << resource);
1858         int func = BP_FUNC(bp);
1859         u32 hw_lock_control_reg;
1860
1861         /* Validating that the resource is within range */
1862         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863                 DP(NETIF_MSG_HW,
1864                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866                 return -EINVAL;
1867         }
1868
1869         if (func <= 5) {
1870                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871         } else {
1872                 hw_lock_control_reg =
1873                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874         }
1875
1876         /* Validating that the resource is currently taken */
1877         lock_status = REG_RD(bp, hw_lock_control_reg);
1878         if (!(lock_status & resource_bit)) {
1879                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1880                    lock_status, resource_bit);
1881                 return -EFAULT;
1882         }
1883
1884         REG_WR(bp, hw_lock_control_reg, resource_bit);
1885         return 0;
1886 }
1887
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1890 {
1891         mutex_lock(&bp->port.phy_mutex);
1892
1893         if (bp->port.need_hw_lock)
1894                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1895 }
1896
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1898 {
1899         if (bp->port.need_hw_lock)
1900                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1901
1902         mutex_unlock(&bp->port.phy_mutex);
1903 }
1904
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906 {
1907         /* The GPIO should be swapped if swap register is set and active */
1908         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910         int gpio_shift = gpio_num +
1911                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912         u32 gpio_mask = (1 << gpio_shift);
1913         u32 gpio_reg;
1914         int value;
1915
1916         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918                 return -EINVAL;
1919         }
1920
1921         /* read GPIO value */
1922         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924         /* get the requested pin value */
1925         if ((gpio_reg & gpio_mask) == gpio_mask)
1926                 value = 1;
1927         else
1928                 value = 0;
1929
1930         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1931
1932         return value;
1933 }
1934
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944
1945         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947                 return -EINVAL;
1948         }
1949
1950         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951         /* read GPIO and mask except the float bits */
1952         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1953
1954         switch (mode) {
1955         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957                    gpio_num, gpio_shift);
1958                 /* clear FLOAT and set CLR */
1959                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961                 break;
1962
1963         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965                    gpio_num, gpio_shift);
1966                 /* clear FLOAT and set SET */
1967                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969                 break;
1970
1971         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973                    gpio_num, gpio_shift);
1974                 /* set FLOAT */
1975                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976                 break;
1977
1978         default:
1979                 break;
1980         }
1981
1982         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984
1985         return 0;
1986 }
1987
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO int */
2005         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010                                    "output low\n", gpio_num, gpio_shift);
2011                 /* clear SET and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018                                    "output high\n", gpio_num, gpio_shift);
2019                 /* clear CLR and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022                 break;
2023
2024         default:
2025                 break;
2026         }
2027
2028         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031         return 0;
2032 }
2033
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2035 {
2036         u32 spio_mask = (1 << spio_num);
2037         u32 spio_reg;
2038
2039         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040             (spio_num > MISC_REGISTERS_SPIO_7)) {
2041                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046         /* read SPIO and mask except the float bits */
2047         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052                 /* clear FLOAT and set CLR */
2053                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055                 break;
2056
2057         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059                 /* clear FLOAT and set SET */
2060                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062                 break;
2063
2064         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066                 /* set FLOAT */
2067                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076
2077         return 0;
2078 }
2079
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2081 {
2082         switch (bp->link_vars.ieee_fc &
2083                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2086                                           ADVERTISED_Pause);
2087                 break;
2088
2089         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2091                                          ADVERTISED_Pause);
2092                 break;
2093
2094         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2096                 break;
2097
2098         default:
2099                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2100                                           ADVERTISED_Pause);
2101                 break;
2102         }
2103 }
2104
2105 static void bnx2x_link_report(struct bnx2x *bp)
2106 {
2107         if (bp->state == BNX2X_STATE_DISABLED) {
2108                 netif_carrier_off(bp->dev);
2109                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110                 return;
2111         }
2112
2113         if (bp->link_vars.link_up) {
2114                 if (bp->state == BNX2X_STATE_OPEN)
2115                         netif_carrier_on(bp->dev);
2116                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2117
2118                 printk("%d Mbps ", bp->link_vars.line_speed);
2119
2120                 if (bp->link_vars.duplex == DUPLEX_FULL)
2121                         printk("full duplex");
2122                 else
2123                         printk("half duplex");
2124
2125                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2127                                 printk(", receive ");
2128                                 if (bp->link_vars.flow_ctrl &
2129                                     BNX2X_FLOW_CTRL_TX)
2130                                         printk("& transmit ");
2131                         } else {
2132                                 printk(", transmit ");
2133                         }
2134                         printk("flow control ON");
2135                 }
2136                 printk("\n");
2137
2138         } else { /* link_down */
2139                 netif_carrier_off(bp->dev);
2140                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2141         }
2142 }
2143
2144 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2145 {
2146         if (!BP_NOMCP(bp)) {
2147                 u8 rc;
2148
2149                 /* Initialize link parameters structure variables */
2150                 /* It is recommended to turn off RX FC for jumbo frames
2151                    for better performance */
2152                 if (bp->dev->mtu > 5000)
2153                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2154                 else
2155                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2156
2157                 bnx2x_acquire_phy_lock(bp);
2158
2159                 if (load_mode == LOAD_DIAG)
2160                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2161
2162                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2163
2164                 bnx2x_release_phy_lock(bp);
2165
2166                 bnx2x_calc_fc_adv(bp);
2167
2168                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2170                         bnx2x_link_report(bp);
2171                 }
2172
2173                 return rc;
2174         }
2175         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2176         return -EINVAL;
2177 }
2178
2179 static void bnx2x_link_set(struct bnx2x *bp)
2180 {
2181         if (!BP_NOMCP(bp)) {
2182                 bnx2x_acquire_phy_lock(bp);
2183                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2184                 bnx2x_release_phy_lock(bp);
2185
2186                 bnx2x_calc_fc_adv(bp);
2187         } else
2188                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2189 }
2190
2191 static void bnx2x__link_reset(struct bnx2x *bp)
2192 {
2193         if (!BP_NOMCP(bp)) {
2194                 bnx2x_acquire_phy_lock(bp);
2195                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2196                 bnx2x_release_phy_lock(bp);
2197         } else
2198                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2199 }
2200
2201 static u8 bnx2x_link_test(struct bnx2x *bp)
2202 {
2203         u8 rc;
2204
2205         bnx2x_acquire_phy_lock(bp);
2206         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2207         bnx2x_release_phy_lock(bp);
2208
2209         return rc;
2210 }
2211
2212 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2213 {
2214         u32 r_param = bp->link_vars.line_speed / 8;
2215         u32 fair_periodic_timeout_usec;
2216         u32 t_fair;
2217
2218         memset(&(bp->cmng.rs_vars), 0,
2219                sizeof(struct rate_shaping_vars_per_port));
2220         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2221
2222         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2224
2225         /* this is the threshold below which no timer arming will occur
2226            1.25 coefficient is for the threshold to be a little bigger
2227            than the real time, to compensate for timer in-accuracy */
2228         bp->cmng.rs_vars.rs_threshold =
2229                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2230
2231         /* resolution of fairness timer */
2232         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2235
2236         /* this is the threshold below which we won't arm the timer anymore */
2237         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2238
2239         /* we multiply by 1e3/8 to get bytes/msec.
2240            We don't want the credits to pass a credit
2241            of the t_fair*FAIR_MEM (algorithm resolution) */
2242         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243         /* since each tick is 4 usec */
2244         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2245 }
2246
2247 /* Calculates the sum of vn_min_rates.
2248    It's needed for further normalizing of the min_rates.
2249    Returns:
2250      sum of vn_min_rates.
2251        or
2252      0 - if all the min_rates are 0.
2253      In the later case fainess algorithm should be deactivated.
2254      If not all min_rates are zero then those that are zeroes will be set to 1.
2255  */
2256 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2257 {
2258         int all_zero = 1;
2259         int port = BP_PORT(bp);
2260         int vn;
2261
2262         bp->vn_weight_sum = 0;
2263         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264                 int func = 2*vn + port;
2265                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2268
2269                 /* Skip hidden vns */
2270                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2271                         continue;
2272
2273                 /* If min rate is zero - set it to 1 */
2274                 if (!vn_min_rate)
2275                         vn_min_rate = DEF_MIN_RATE;
2276                 else
2277                         all_zero = 0;
2278
2279                 bp->vn_weight_sum += vn_min_rate;
2280         }
2281
2282         /* ... only if all min rates are zeros - disable fairness */
2283         if (all_zero)
2284                 bp->vn_weight_sum = 0;
2285 }
2286
2287 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2288 {
2289         struct rate_shaping_vars_per_vn m_rs_vn;
2290         struct fairness_vars_per_vn m_fair_vn;
2291         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292         u16 vn_min_rate, vn_max_rate;
2293         int i;
2294
2295         /* If function is hidden - set min and max to zeroes */
2296         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2297                 vn_min_rate = 0;
2298                 vn_max_rate = 0;
2299
2300         } else {
2301                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2303                 /* If fairness is enabled (not all min rates are zeroes) and
2304                    if current min rate is zero - set it to 1.
2305                    This is a requirement of the algorithm. */
2306                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2307                         vn_min_rate = DEF_MIN_RATE;
2308                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2310         }
2311
2312         DP(NETIF_MSG_IFUP,
2313            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2314            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2315
2316         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2318
2319         /* global vn counter - maximal Mbps for this vn */
2320         m_rs_vn.vn_counter.rate = vn_max_rate;
2321
2322         /* quota - number of bytes transmitted in this period */
2323         m_rs_vn.vn_counter.quota =
2324                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2325
2326         if (bp->vn_weight_sum) {
2327                 /* credit for each period of the fairness algorithm:
2328                    number of bytes in T_FAIR (the vn share the port rate).
2329                    vn_weight_sum should not be larger than 10000, thus
2330                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2331                    than zero */
2332                 m_fair_vn.vn_credit_delta =
2333                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2334                                                  (8 * bp->vn_weight_sum))),
2335                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2336                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337                    m_fair_vn.vn_credit_delta);
2338         }
2339
2340         /* Store it to internal memory */
2341         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344                        ((u32 *)(&m_rs_vn))[i]);
2345
2346         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349                        ((u32 *)(&m_fair_vn))[i]);
2350 }
2351
2352
2353 /* This function is called upon link interrupt */
2354 static void bnx2x_link_attn(struct bnx2x *bp)
2355 {
2356         /* Make sure that we are synced with the current statistics */
2357         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2358
2359         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2360
2361         if (bp->link_vars.link_up) {
2362
2363                 /* dropless flow control */
2364                 if (CHIP_IS_E1H(bp)) {
2365                         int port = BP_PORT(bp);
2366                         u32 pause_enabled = 0;
2367
2368                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2369                                 pause_enabled = 1;
2370
2371                         REG_WR(bp, BAR_USTRORM_INTMEM +
2372                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2373                                pause_enabled);
2374                 }
2375
2376                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377                         struct host_port_stats *pstats;
2378
2379                         pstats = bnx2x_sp(bp, port_stats);
2380                         /* reset old bmac stats */
2381                         memset(&(pstats->mac_stx[0]), 0,
2382                                sizeof(struct mac_stx));
2383                 }
2384                 if ((bp->state == BNX2X_STATE_OPEN) ||
2385                     (bp->state == BNX2X_STATE_DISABLED))
2386                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387         }
2388
2389         /* indicate link status */
2390         bnx2x_link_report(bp);
2391
2392         if (IS_E1HMF(bp)) {
2393                 int port = BP_PORT(bp);
2394                 int func;
2395                 int vn;
2396
2397                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398                         if (vn == BP_E1HVN(bp))
2399                                 continue;
2400
2401                         func = ((vn << 1) | port);
2402
2403                         /* Set the attention towards other drivers
2404                            on the same port */
2405                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2407                 }
2408
2409                 if (bp->link_vars.link_up) {
2410                         int i;
2411
2412                         /* Init rate shaping and fairness contexts */
2413                         bnx2x_init_port_minmax(bp);
2414
2415                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2416                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2417
2418                         /* Store it to internal memory */
2419                         for (i = 0;
2420                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423                                        ((u32 *)(&bp->cmng))[i]);
2424                 }
2425         }
2426 }
2427
2428 static void bnx2x__link_status_update(struct bnx2x *bp)
2429 {
2430         int func = BP_FUNC(bp);
2431
2432         if (bp->state != BNX2X_STATE_OPEN)
2433                 return;
2434
2435         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2436
2437         if (bp->link_vars.link_up)
2438                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2439         else
2440                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2441
2442         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443         bnx2x_calc_vn_weight_sum(bp);
2444
2445         /* indicate link status */
2446         bnx2x_link_report(bp);
2447 }
2448
2449 static void bnx2x_pmf_update(struct bnx2x *bp)
2450 {
2451         int port = BP_PORT(bp);
2452         u32 val;
2453
2454         bp->port.pmf = 1;
2455         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2456
2457         /* enable nig attention */
2458         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2461
2462         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2463 }
2464
2465 /* end of Link */
2466
2467 /* slow path */
2468
2469 /*
2470  * General service functions
2471  */
2472
2473 /* send the MCP a request, block until there is a reply */
2474 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2475 {
2476         int func = BP_FUNC(bp);
2477         u32 seq = ++bp->fw_seq;
2478         u32 rc = 0;
2479         u32 cnt = 1;
2480         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2481
2482         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2484
2485         do {
2486                 /* let the FW do it's magic ... */
2487                 msleep(delay);
2488
2489                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2490
2491                 /* Give the FW up to 2 second (200*10ms) */
2492         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2493
2494         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495            cnt*delay, rc, seq);
2496
2497         /* is this a reply to our command? */
2498         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499                 rc &= FW_MSG_CODE_MASK;
2500         else {
2501                 /* FW BUG! */
2502                 BNX2X_ERR("FW failed to respond!\n");
2503                 bnx2x_fw_dump(bp);
2504                 rc = 0;
2505         }
2506
2507         return rc;
2508 }
2509
2510 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512 static void bnx2x_set_rx_mode(struct net_device *dev);
2513
2514 static void bnx2x_e1h_disable(struct bnx2x *bp)
2515 {
2516         int port = BP_PORT(bp);
2517         int i;
2518
2519         bp->rx_mode = BNX2X_RX_MODE_NONE;
2520         bnx2x_set_storm_rx_mode(bp);
2521
2522         netif_tx_disable(bp->dev);
2523         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2524
2525         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2526
2527         bnx2x_set_mac_addr_e1h(bp, 0);
2528
2529         for (i = 0; i < MC_HASH_SIZE; i++)
2530                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2531
2532         netif_carrier_off(bp->dev);
2533 }
2534
2535 static void bnx2x_e1h_enable(struct bnx2x *bp)
2536 {
2537         int port = BP_PORT(bp);
2538
2539         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2540
2541         bnx2x_set_mac_addr_e1h(bp, 1);
2542
2543         /* Tx queue should be only reenabled */
2544         netif_tx_wake_all_queues(bp->dev);
2545
2546         /* Initialize the receive filter. */
2547         bnx2x_set_rx_mode(bp->dev);
2548 }
2549
2550 static void bnx2x_update_min_max(struct bnx2x *bp)
2551 {
2552         int port = BP_PORT(bp);
2553         int vn, i;
2554
2555         /* Init rate shaping and fairness contexts */
2556         bnx2x_init_port_minmax(bp);
2557
2558         bnx2x_calc_vn_weight_sum(bp);
2559
2560         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2562
2563         if (bp->port.pmf) {
2564                 int func;
2565
2566                 /* Set the attention towards other drivers on the same port */
2567                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568                         if (vn == BP_E1HVN(bp))
2569                                 continue;
2570
2571                         func = ((vn << 1) | port);
2572                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2574                 }
2575
2576                 /* Store it to internal memory */
2577                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2579                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580                                ((u32 *)(&bp->cmng))[i]);
2581         }
2582 }
2583
2584 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2585 {
2586         int func = BP_FUNC(bp);
2587
2588         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2590
2591         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2592
2593                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595                         bp->state = BNX2X_STATE_DISABLED;
2596
2597                         bnx2x_e1h_disable(bp);
2598                 } else {
2599                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600                         bp->state = BNX2X_STATE_OPEN;
2601
2602                         bnx2x_e1h_enable(bp);
2603                 }
2604                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2605         }
2606         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2607
2608                 bnx2x_update_min_max(bp);
2609                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2610         }
2611
2612         /* Report results to MCP */
2613         if (dcc_event)
2614                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2615         else
2616                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2617 }
2618
2619 /* the slow path queue is odd since completions arrive on the fastpath ring */
2620 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621                          u32 data_hi, u32 data_lo, int common)
2622 {
2623         int func = BP_FUNC(bp);
2624
2625         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2627            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2630
2631 #ifdef BNX2X_STOP_ON_ERROR
2632         if (unlikely(bp->panic))
2633                 return -EIO;
2634 #endif
2635
2636         spin_lock_bh(&bp->spq_lock);
2637
2638         if (!bp->spq_left) {
2639                 BNX2X_ERR("BUG! SPQ ring full!\n");
2640                 spin_unlock_bh(&bp->spq_lock);
2641                 bnx2x_panic();
2642                 return -EBUSY;
2643         }
2644
2645         /* CID needs port number to be encoded int it */
2646         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2648                                      HW_CID(bp, cid)));
2649         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2650         if (common)
2651                 bp->spq_prod_bd->hdr.type |=
2652                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2653
2654         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2656
2657         bp->spq_left--;
2658
2659         if (bp->spq_prod_bd == bp->spq_last_bd) {
2660                 bp->spq_prod_bd = bp->spq;
2661                 bp->spq_prod_idx = 0;
2662                 DP(NETIF_MSG_TIMER, "end of spq\n");
2663
2664         } else {
2665                 bp->spq_prod_bd++;
2666                 bp->spq_prod_idx++;
2667         }
2668
2669         /* Make sure that BD data is updated before writing the producer */
2670         wmb();
2671
2672         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2673                bp->spq_prod_idx);
2674
2675         mmiowb();
2676
2677         spin_unlock_bh(&bp->spq_lock);
2678         return 0;
2679 }
2680
2681 /* acquire split MCP access lock register */
2682 static int bnx2x_acquire_alr(struct bnx2x *bp)
2683 {
2684         u32 i, j, val;
2685         int rc = 0;
2686
2687         might_sleep();
2688         i = 100;
2689         for (j = 0; j < i*10; j++) {
2690                 val = (1UL << 31);
2691                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693                 if (val & (1L << 31))
2694                         break;
2695
2696                 msleep(5);
2697         }
2698         if (!(val & (1L << 31))) {
2699                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2700                 rc = -EBUSY;
2701         }
2702
2703         return rc;
2704 }
2705
2706 /* release split MCP access lock register */
2707 static void bnx2x_release_alr(struct bnx2x *bp)
2708 {
2709         u32 val = 0;
2710
2711         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2712 }
2713
2714 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2715 {
2716         struct host_def_status_block *def_sb = bp->def_status_blk;
2717         u16 rc = 0;
2718
2719         barrier(); /* status block is written to by the chip */
2720         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2722                 rc |= 1;
2723         }
2724         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2726                 rc |= 2;
2727         }
2728         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2730                 rc |= 4;
2731         }
2732         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2734                 rc |= 8;
2735         }
2736         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2738                 rc |= 16;
2739         }
2740         return rc;
2741 }
2742
2743 /*
2744  * slow path service functions
2745  */
2746
2747 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2748 {
2749         int port = BP_PORT(bp);
2750         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751                        COMMAND_REG_ATTN_BITS_SET);
2752         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2754         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755                                        NIG_REG_MASK_INTERRUPT_PORT0;
2756         u32 aeu_mask;
2757         u32 nig_mask = 0;
2758
2759         if (bp->attn_state & asserted)
2760                 BNX2X_ERR("IGU ERROR\n");
2761
2762         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763         aeu_mask = REG_RD(bp, aeu_addr);
2764
2765         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2766            aeu_mask, asserted);
2767         aeu_mask &= ~(asserted & 0xff);
2768         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2769
2770         REG_WR(bp, aeu_addr, aeu_mask);
2771         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2772
2773         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2774         bp->attn_state |= asserted;
2775         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2776
2777         if (asserted & ATTN_HARD_WIRED_MASK) {
2778                 if (asserted & ATTN_NIG_FOR_FUNC) {
2779
2780                         bnx2x_acquire_phy_lock(bp);
2781
2782                         /* save nig interrupt mask */
2783                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2784                         REG_WR(bp, nig_int_mask_addr, 0);
2785
2786                         bnx2x_link_attn(bp);
2787
2788                         /* handle unicore attn? */
2789                 }
2790                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2792
2793                 if (asserted & GPIO_2_FUNC)
2794                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2795
2796                 if (asserted & GPIO_3_FUNC)
2797                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2798
2799                 if (asserted & GPIO_4_FUNC)
2800                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2801
2802                 if (port == 0) {
2803                         if (asserted & ATTN_GENERAL_ATTN_1) {
2804                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2806                         }
2807                         if (asserted & ATTN_GENERAL_ATTN_2) {
2808                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2810                         }
2811                         if (asserted & ATTN_GENERAL_ATTN_3) {
2812                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2814                         }
2815                 } else {
2816                         if (asserted & ATTN_GENERAL_ATTN_4) {
2817                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2819                         }
2820                         if (asserted & ATTN_GENERAL_ATTN_5) {
2821                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2823                         }
2824                         if (asserted & ATTN_GENERAL_ATTN_6) {
2825                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2827                         }
2828                 }
2829
2830         } /* if hardwired */
2831
2832         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833            asserted, hc_addr);
2834         REG_WR(bp, hc_addr, asserted);
2835
2836         /* now set back the mask */
2837         if (asserted & ATTN_NIG_FOR_FUNC) {
2838                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2839                 bnx2x_release_phy_lock(bp);
2840         }
2841 }
2842
2843 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2844 {
2845         int port = BP_PORT(bp);
2846
2847         /* mark the failure */
2848         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851                  bp->link_params.ext_phy_config);
2852
2853         /* log the failure */
2854         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855                " the driver to shutdown the card to prevent permanent"
2856                " damage.  Please contact Dell Support for assistance\n",
2857                bp->dev->name);
2858 }
2859 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2860 {
2861         int port = BP_PORT(bp);
2862         int reg_offset;
2863         u32 val, swap_val, swap_override;
2864
2865         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2867
2868         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2869
2870                 val = REG_RD(bp, reg_offset);
2871                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872                 REG_WR(bp, reg_offset, val);
2873
2874                 BNX2X_ERR("SPIO5 hw attention\n");
2875
2876                 /* Fan failure attention */
2877                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2879                         /* Low power mode is controlled by GPIO 2 */
2880                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2881                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2882                         /* The PHY reset is controlled by GPIO 1 */
2883                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2885                         break;
2886
2887                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888                         /* The PHY reset is controlled by GPIO 1 */
2889                         /* fake the port number to cancel the swap done in
2890                            set_gpio() */
2891                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893                         port = (swap_val && swap_override) ^ 1;
2894                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896                         break;
2897
2898                 default:
2899                         break;
2900                 }
2901                 bnx2x_fan_failure(bp);
2902         }
2903
2904         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906                 bnx2x_acquire_phy_lock(bp);
2907                 bnx2x_handle_module_detect_int(&bp->link_params);
2908                 bnx2x_release_phy_lock(bp);
2909         }
2910
2911         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2912
2913                 val = REG_RD(bp, reg_offset);
2914                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915                 REG_WR(bp, reg_offset, val);
2916
2917                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918                           (attn & HW_INTERRUT_ASSERT_SET_0));
2919                 bnx2x_panic();
2920         }
2921 }
2922
2923 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2924 {
2925         u32 val;
2926
2927         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2928
2929                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931                 /* DORQ discard attention */
2932                 if (val & 0x2)
2933                         BNX2X_ERR("FATAL error from DORQ\n");
2934         }
2935
2936         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2937
2938                 int port = BP_PORT(bp);
2939                 int reg_offset;
2940
2941                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2943
2944                 val = REG_RD(bp, reg_offset);
2945                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946                 REG_WR(bp, reg_offset, val);
2947
2948                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949                           (attn & HW_INTERRUT_ASSERT_SET_1));
2950                 bnx2x_panic();
2951         }
2952 }
2953
2954 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2955 {
2956         u32 val;
2957
2958         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2959
2960                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962                 /* CFC error attention */
2963                 if (val & 0x2)
2964                         BNX2X_ERR("FATAL error from CFC\n");
2965         }
2966
2967         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2968
2969                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971                 /* RQ_USDMDP_FIFO_OVERFLOW */
2972                 if (val & 0x18000)
2973                         BNX2X_ERR("FATAL error from PXP\n");
2974         }
2975
2976         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2977
2978                 int port = BP_PORT(bp);
2979                 int reg_offset;
2980
2981                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2983
2984                 val = REG_RD(bp, reg_offset);
2985                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986                 REG_WR(bp, reg_offset, val);
2987
2988                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989                           (attn & HW_INTERRUT_ASSERT_SET_2));
2990                 bnx2x_panic();
2991         }
2992 }
2993
2994 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2995 {
2996         u32 val;
2997
2998         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2999
3000                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001                         int func = BP_FUNC(bp);
3002
3003                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3004                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3005                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3006                                 bnx2x_dcc_event(bp,
3007                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3008                         bnx2x__link_status_update(bp);
3009                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3010                                 bnx2x_pmf_update(bp);
3011
3012                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3013
3014                         BNX2X_ERR("MC assert!\n");
3015                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3019                         bnx2x_panic();
3020
3021                 } else if (attn & BNX2X_MCP_ASSERT) {
3022
3023                         BNX2X_ERR("MCP assert!\n");
3024                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3025                         bnx2x_fw_dump(bp);
3026
3027                 } else
3028                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3029         }
3030
3031         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3032                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033                 if (attn & BNX2X_GRC_TIMEOUT) {
3034                         val = CHIP_IS_E1H(bp) ?
3035                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3037                 }
3038                 if (attn & BNX2X_GRC_RSV) {
3039                         val = CHIP_IS_E1H(bp) ?
3040                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3042                 }
3043                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3044         }
3045 }
3046
3047 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3048 {
3049         struct attn_route attn;
3050         struct attn_route group_mask;
3051         int port = BP_PORT(bp);
3052         int index;
3053         u32 reg_addr;
3054         u32 val;
3055         u32 aeu_mask;
3056
3057         /* need to take HW lock because MCP or other port might also
3058            try to handle this event */
3059         bnx2x_acquire_alr(bp);
3060
3061         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3065         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3067
3068         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069                 if (deasserted & (1 << index)) {
3070                         group_mask = bp->attn_group[index];
3071
3072                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073                            index, group_mask.sig[0], group_mask.sig[1],
3074                            group_mask.sig[2], group_mask.sig[3]);
3075
3076                         bnx2x_attn_int_deasserted3(bp,
3077                                         attn.sig[3] & group_mask.sig[3]);
3078                         bnx2x_attn_int_deasserted1(bp,
3079                                         attn.sig[1] & group_mask.sig[1]);
3080                         bnx2x_attn_int_deasserted2(bp,
3081                                         attn.sig[2] & group_mask.sig[2]);
3082                         bnx2x_attn_int_deasserted0(bp,
3083                                         attn.sig[0] & group_mask.sig[0]);
3084
3085                         if ((attn.sig[0] & group_mask.sig[0] &
3086                                                 HW_PRTY_ASSERT_SET_0) ||
3087                             (attn.sig[1] & group_mask.sig[1] &
3088                                                 HW_PRTY_ASSERT_SET_1) ||
3089                             (attn.sig[2] & group_mask.sig[2] &
3090                                                 HW_PRTY_ASSERT_SET_2))
3091                                 BNX2X_ERR("FATAL HW block parity attention\n");
3092                 }
3093         }
3094
3095         bnx2x_release_alr(bp);
3096
3097         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3098
3099         val = ~deasserted;
3100         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3101            val, reg_addr);
3102         REG_WR(bp, reg_addr, val);
3103
3104         if (~bp->attn_state & deasserted)
3105                 BNX2X_ERR("IGU ERROR\n");
3106
3107         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3109
3110         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111         aeu_mask = REG_RD(bp, reg_addr);
3112
3113         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3114            aeu_mask, deasserted);
3115         aeu_mask |= (deasserted & 0xff);
3116         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3117
3118         REG_WR(bp, reg_addr, aeu_mask);
3119         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3120
3121         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122         bp->attn_state &= ~deasserted;
3123         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3124 }
3125
3126 static void bnx2x_attn_int(struct bnx2x *bp)
3127 {
3128         /* read local copy of bits */
3129         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3130                                                                 attn_bits);
3131         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132                                                                 attn_bits_ack);
3133         u32 attn_state = bp->attn_state;
3134
3135         /* look for changed bits */
3136         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3137         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3138
3139         DP(NETIF_MSG_HW,
3140            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3141            attn_bits, attn_ack, asserted, deasserted);
3142
3143         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3144                 BNX2X_ERR("BAD attention state\n");
3145
3146         /* handle bits that were raised */
3147         if (asserted)
3148                 bnx2x_attn_int_asserted(bp, asserted);
3149
3150         if (deasserted)
3151                 bnx2x_attn_int_deasserted(bp, deasserted);
3152 }
3153
3154 static void bnx2x_sp_task(struct work_struct *work)
3155 {
3156         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3157         u16 status;
3158
3159
3160         /* Return here if interrupt is disabled */
3161         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3162                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3163                 return;
3164         }
3165
3166         status = bnx2x_update_dsb_idx(bp);
3167 /*      if (status == 0)                                     */
3168 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3169
3170         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3171
3172         /* HW attentions */
3173         if (status & 0x1)
3174                 bnx2x_attn_int(bp);
3175
3176         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3177                      IGU_INT_NOP, 1);
3178         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3179                      IGU_INT_NOP, 1);
3180         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3181                      IGU_INT_NOP, 1);
3182         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3183                      IGU_INT_NOP, 1);
3184         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3185                      IGU_INT_ENABLE, 1);
3186
3187 }
3188
3189 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3190 {
3191         struct net_device *dev = dev_instance;
3192         struct bnx2x *bp = netdev_priv(dev);
3193
3194         /* Return here if interrupt is disabled */
3195         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3197                 return IRQ_HANDLED;
3198         }
3199
3200         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3201
3202 #ifdef BNX2X_STOP_ON_ERROR
3203         if (unlikely(bp->panic))
3204                 return IRQ_HANDLED;
3205 #endif
3206
3207         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3208
3209         return IRQ_HANDLED;
3210 }
3211
3212 /* end of slow path */
3213
3214 /* Statistics */
3215
3216 /****************************************************************************
3217 * Macros
3218 ****************************************************************************/
3219
3220 /* sum[hi:lo] += add[hi:lo] */
3221 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3222         do { \
3223                 s_lo += a_lo; \
3224                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3225         } while (0)
3226
3227 /* difference = minuend - subtrahend */
3228 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3229         do { \
3230                 if (m_lo < s_lo) { \
3231                         /* underflow */ \
3232                         d_hi = m_hi - s_hi; \
3233                         if (d_hi > 0) { \
3234                                 /* we can 'loan' 1 */ \
3235                                 d_hi--; \
3236                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3237                         } else { \
3238                                 /* m_hi <= s_hi */ \
3239                                 d_hi = 0; \
3240                                 d_lo = 0; \
3241                         } \
3242                 } else { \
3243                         /* m_lo >= s_lo */ \
3244                         if (m_hi < s_hi) { \
3245                                 d_hi = 0; \
3246                                 d_lo = 0; \
3247                         } else { \
3248                                 /* m_hi >= s_hi */ \
3249                                 d_hi = m_hi - s_hi; \
3250                                 d_lo = m_lo - s_lo; \
3251                         } \
3252                 } \
3253         } while (0)
3254
3255 #define UPDATE_STAT64(s, t) \
3256         do { \
3257                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262                        pstats->mac_stx[1].t##_lo, diff.lo); \
3263         } while (0)
3264
3265 #define UPDATE_STAT64_NIG(s, t) \
3266         do { \
3267                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268                         diff.lo, new->s##_lo, old->s##_lo); \
3269                 ADD_64(estats->t##_hi, diff.hi, \
3270                        estats->t##_lo, diff.lo); \
3271         } while (0)
3272
3273 /* sum[hi:lo] += add */
3274 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3275         do { \
3276                 s_lo += a; \
3277                 s_hi += (s_lo < a) ? 1 : 0; \
3278         } while (0)
3279
3280 #define UPDATE_EXTEND_STAT(s) \
3281         do { \
3282                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283                               pstats->mac_stx[1].s##_lo, \
3284                               new->s); \
3285         } while (0)
3286
3287 #define UPDATE_EXTEND_TSTAT(s, t) \
3288         do { \
3289                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290                 old_tclient->s = tclient->s; \
3291                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3292         } while (0)
3293
3294 #define UPDATE_EXTEND_USTAT(s, t) \
3295         do { \
3296                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297                 old_uclient->s = uclient->s; \
3298                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3299         } while (0)
3300
3301 #define UPDATE_EXTEND_XSTAT(s, t) \
3302         do { \
3303                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304                 old_xclient->s = xclient->s; \
3305                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306         } while (0)
3307
3308 /* minuend -= subtrahend */
3309 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3310         do { \
3311                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3312         } while (0)
3313
3314 /* minuend[hi:lo] -= subtrahend */
3315 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3316         do { \
3317                 SUB_64(m_hi, 0, m_lo, s); \
3318         } while (0)
3319
3320 #define SUB_EXTEND_USTAT(s, t) \
3321         do { \
3322                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3324         } while (0)
3325
3326 /*
3327  * General service functions
3328  */
3329
3330 static inline long bnx2x_hilo(u32 *hiref)
3331 {
3332         u32 lo = *(hiref + 1);
3333 #if (BITS_PER_LONG == 64)
3334         u32 hi = *hiref;
3335
3336         return HILO_U64(hi, lo);
3337 #else
3338         return lo;
3339 #endif
3340 }
3341
3342 /*
3343  * Init service functions
3344  */
3345
3346 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3347 {
3348         if (!bp->stats_pending) {
3349                 struct eth_query_ramrod_data ramrod_data = {0};
3350                 int i, rc;
3351
3352                 ramrod_data.drv_counter = bp->stats_counter++;
3353                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3354                 for_each_queue(bp, i)
3355                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3356
3357                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358                                    ((u32 *)&ramrod_data)[1],
3359                                    ((u32 *)&ramrod_data)[0], 0);
3360                 if (rc == 0) {
3361                         /* stats ramrod has it's own slot on the spq */
3362                         bp->spq_left++;
3363                         bp->stats_pending = 1;
3364                 }
3365         }
3366 }
3367
3368 static void bnx2x_stats_init(struct bnx2x *bp)
3369 {
3370         int port = BP_PORT(bp);
3371         int i;
3372
3373         bp->stats_pending = 0;
3374         bp->executer_idx = 0;
3375         bp->stats_counter = 0;
3376
3377         /* port stats */
3378         if (!BP_NOMCP(bp))
3379                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3380         else
3381                 bp->port.port_stx = 0;
3382         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3383
3384         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3385         bp->port.old_nig_stats.brb_discard =
3386                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3387         bp->port.old_nig_stats.brb_truncate =
3388                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3389         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3390                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3391         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3392                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3393
3394         /* function stats */
3395         for_each_queue(bp, i) {
3396                 struct bnx2x_fastpath *fp = &bp->fp[i];
3397
3398                 memset(&fp->old_tclient, 0,
3399                        sizeof(struct tstorm_per_client_stats));
3400                 memset(&fp->old_uclient, 0,
3401                        sizeof(struct ustorm_per_client_stats));
3402                 memset(&fp->old_xclient, 0,
3403                        sizeof(struct xstorm_per_client_stats));
3404                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3405         }
3406
3407         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3408         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3409
3410         bp->stats_state = STATS_STATE_DISABLED;
3411         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3412                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3413 }
3414
3415 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3416 {
3417         struct dmae_command *dmae = &bp->stats_dmae;
3418         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3419
3420         *stats_comp = DMAE_COMP_VAL;
3421         if (CHIP_REV_IS_SLOW(bp))
3422                 return;
3423
3424         /* loader */
3425         if (bp->executer_idx) {
3426                 int loader_idx = PMF_DMAE_C(bp);
3427
3428                 memset(dmae, 0, sizeof(struct dmae_command));
3429
3430                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3431                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3432                                 DMAE_CMD_DST_RESET |
3433 #ifdef __BIG_ENDIAN
3434                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3435 #else
3436                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3437 #endif
3438                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3439                                                DMAE_CMD_PORT_0) |
3440                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3442                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3443                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3444                                      sizeof(struct dmae_command) *
3445                                      (loader_idx + 1)) >> 2;
3446                 dmae->dst_addr_hi = 0;
3447                 dmae->len = sizeof(struct dmae_command) >> 2;
3448                 if (CHIP_IS_E1(bp))
3449                         dmae->len--;
3450                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3451                 dmae->comp_addr_hi = 0;
3452                 dmae->comp_val = 1;
3453
3454                 *stats_comp = 0;
3455                 bnx2x_post_dmae(bp, dmae, loader_idx);
3456
3457         } else if (bp->func_stx) {
3458                 *stats_comp = 0;
3459                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3460         }
3461 }
3462
3463 static int bnx2x_stats_comp(struct bnx2x *bp)
3464 {
3465         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3466         int cnt = 10;
3467
3468         might_sleep();
3469         while (*stats_comp != DMAE_COMP_VAL) {
3470                 if (!cnt) {
3471                         BNX2X_ERR("timeout waiting for stats finished\n");
3472                         break;
3473                 }
3474                 cnt--;
3475                 msleep(1);
3476         }
3477         return 1;
3478 }
3479
3480 /*
3481  * Statistics service functions
3482  */
3483
3484 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3485 {
3486         struct dmae_command *dmae;
3487         u32 opcode;
3488         int loader_idx = PMF_DMAE_C(bp);
3489         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3490
3491         /* sanity */
3492         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3493                 BNX2X_ERR("BUG!\n");
3494                 return;
3495         }
3496
3497         bp->executer_idx = 0;
3498
3499         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3500                   DMAE_CMD_C_ENABLE |
3501                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3502 #ifdef __BIG_ENDIAN
3503                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3504 #else
3505                   DMAE_CMD_ENDIANITY_DW_SWAP |
3506 #endif
3507                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3508                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3509
3510         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3511         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3512         dmae->src_addr_lo = bp->port.port_stx >> 2;
3513         dmae->src_addr_hi = 0;
3514         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3515         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3516         dmae->len = DMAE_LEN32_RD_MAX;
3517         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3518         dmae->comp_addr_hi = 0;
3519         dmae->comp_val = 1;
3520
3521         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3522         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3523         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3524         dmae->src_addr_hi = 0;
3525         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3526                                    DMAE_LEN32_RD_MAX * 4);
3527         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3528                                    DMAE_LEN32_RD_MAX * 4);
3529         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3530         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3531         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3532         dmae->comp_val = DMAE_COMP_VAL;
3533
3534         *stats_comp = 0;
3535         bnx2x_hw_stats_post(bp);
3536         bnx2x_stats_comp(bp);
3537 }
3538
3539 static void bnx2x_port_stats_init(struct bnx2x *bp)
3540 {
3541         struct dmae_command *dmae;
3542         int port = BP_PORT(bp);
3543         int vn = BP_E1HVN(bp);
3544         u32 opcode;
3545         int loader_idx = PMF_DMAE_C(bp);
3546         u32 mac_addr;
3547         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3548
3549         /* sanity */
3550         if (!bp->link_vars.link_up || !bp->port.pmf) {
3551                 BNX2X_ERR("BUG!\n");
3552                 return;
3553         }
3554
3555         bp->executer_idx = 0;
3556
3557         /* MCP */
3558         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3559                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3560                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3561 #ifdef __BIG_ENDIAN
3562                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3563 #else
3564                   DMAE_CMD_ENDIANITY_DW_SWAP |
3565 #endif
3566                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3567                   (vn << DMAE_CMD_E1HVN_SHIFT));
3568
3569         if (bp->port.port_stx) {
3570
3571                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3572                 dmae->opcode = opcode;
3573                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3574                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3575                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3576                 dmae->dst_addr_hi = 0;
3577                 dmae->len = sizeof(struct host_port_stats) >> 2;
3578                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579                 dmae->comp_addr_hi = 0;
3580                 dmae->comp_val = 1;
3581         }
3582
3583         if (bp->func_stx) {
3584
3585                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586                 dmae->opcode = opcode;
3587                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3588                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3589                 dmae->dst_addr_lo = bp->func_stx >> 2;
3590                 dmae->dst_addr_hi = 0;
3591                 dmae->len = sizeof(struct host_func_stats) >> 2;
3592                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593                 dmae->comp_addr_hi = 0;
3594                 dmae->comp_val = 1;
3595         }
3596
3597         /* MAC */
3598         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3599                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3600                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3601 #ifdef __BIG_ENDIAN
3602                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3603 #else
3604                   DMAE_CMD_ENDIANITY_DW_SWAP |
3605 #endif
3606                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3607                   (vn << DMAE_CMD_E1HVN_SHIFT));
3608
3609         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3610
3611                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3612                                    NIG_REG_INGRESS_BMAC0_MEM);
3613
3614                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3615                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3616                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617                 dmae->opcode = opcode;
3618                 dmae->src_addr_lo = (mac_addr +
3619                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3620                 dmae->src_addr_hi = 0;
3621                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3624                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3625                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3626                 dmae->comp_addr_hi = 0;
3627                 dmae->comp_val = 1;
3628
3629                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3630                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3631                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3632                 dmae->opcode = opcode;
3633                 dmae->src_addr_lo = (mac_addr +
3634                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3635                 dmae->src_addr_hi = 0;
3636                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3637                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3638                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3639                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3640                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3641                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3642                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643                 dmae->comp_addr_hi = 0;
3644                 dmae->comp_val = 1;
3645
3646         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3647
3648                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3649
3650                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3651                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3652                 dmae->opcode = opcode;
3653                 dmae->src_addr_lo = (mac_addr +
3654                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3655                 dmae->src_addr_hi = 0;
3656                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3657                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3658                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3659                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3660                 dmae->comp_addr_hi = 0;
3661                 dmae->comp_val = 1;
3662
3663                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3664                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3665                 dmae->opcode = opcode;
3666                 dmae->src_addr_lo = (mac_addr +
3667                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3668                 dmae->src_addr_hi = 0;
3669                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3670                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3671                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3672                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3673                 dmae->len = 1;
3674                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675                 dmae->comp_addr_hi = 0;
3676                 dmae->comp_val = 1;
3677
3678                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3679                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3680                 dmae->opcode = opcode;
3681                 dmae->src_addr_lo = (mac_addr +
3682                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3683                 dmae->src_addr_hi = 0;
3684                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3685                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3686                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3687                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3688                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3689                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690                 dmae->comp_addr_hi = 0;
3691                 dmae->comp_val = 1;
3692         }
3693
3694         /* NIG */
3695         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696         dmae->opcode = opcode;
3697         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3698                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3699         dmae->src_addr_hi = 0;
3700         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3701         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3702         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3703         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704         dmae->comp_addr_hi = 0;
3705         dmae->comp_val = 1;
3706
3707         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3708         dmae->opcode = opcode;
3709         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3710                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3711         dmae->src_addr_hi = 0;
3712         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3713                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3714         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3715                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3716         dmae->len = (2*sizeof(u32)) >> 2;
3717         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718         dmae->comp_addr_hi = 0;
3719         dmae->comp_val = 1;
3720
3721         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3723                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3724                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3725 #ifdef __BIG_ENDIAN
3726                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3727 #else
3728                         DMAE_CMD_ENDIANITY_DW_SWAP |
3729 #endif
3730                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3731                         (vn << DMAE_CMD_E1HVN_SHIFT));
3732         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3733                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3734         dmae->src_addr_hi = 0;
3735         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3736                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3737         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3738                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3739         dmae->len = (2*sizeof(u32)) >> 2;
3740         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3741         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3742         dmae->comp_val = DMAE_COMP_VAL;
3743
3744         *stats_comp = 0;
3745 }
3746
3747 static void bnx2x_func_stats_init(struct bnx2x *bp)
3748 {
3749         struct dmae_command *dmae = &bp->stats_dmae;
3750         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3751
3752         /* sanity */
3753         if (!bp->func_stx) {
3754                 BNX2X_ERR("BUG!\n");
3755                 return;
3756         }
3757
3758         bp->executer_idx = 0;
3759         memset(dmae, 0, sizeof(struct dmae_command));
3760
3761         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3762                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3763                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3764 #ifdef __BIG_ENDIAN
3765                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3766 #else
3767                         DMAE_CMD_ENDIANITY_DW_SWAP |
3768 #endif
3769                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3770                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3771         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3772         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3773         dmae->dst_addr_lo = bp->func_stx >> 2;
3774         dmae->dst_addr_hi = 0;
3775         dmae->len = sizeof(struct host_func_stats) >> 2;
3776         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3777         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3778         dmae->comp_val = DMAE_COMP_VAL;
3779
3780         *stats_comp = 0;
3781 }
3782
3783 static void bnx2x_stats_start(struct bnx2x *bp)
3784 {
3785         if (bp->port.pmf)
3786                 bnx2x_port_stats_init(bp);
3787
3788         else if (bp->func_stx)
3789                 bnx2x_func_stats_init(bp);
3790
3791         bnx2x_hw_stats_post(bp);
3792         bnx2x_storm_stats_post(bp);
3793 }
3794
3795 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3796 {
3797         bnx2x_stats_comp(bp);
3798         bnx2x_stats_pmf_update(bp);
3799         bnx2x_stats_start(bp);
3800 }
3801
3802 static void bnx2x_stats_restart(struct bnx2x *bp)
3803 {
3804         bnx2x_stats_comp(bp);
3805         bnx2x_stats_start(bp);
3806 }
3807
3808 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3809 {
3810         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3811         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3812         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3813         struct {
3814                 u32 lo;
3815                 u32 hi;
3816         } diff;
3817
3818         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3819         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3820         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3821         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3822         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3823         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3824         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3825         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3826         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3827         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3828         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3829         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3830         UPDATE_STAT64(tx_stat_gt127,
3831                                 tx_stat_etherstatspkts65octetsto127octets);
3832         UPDATE_STAT64(tx_stat_gt255,
3833                                 tx_stat_etherstatspkts128octetsto255octets);
3834         UPDATE_STAT64(tx_stat_gt511,
3835                                 tx_stat_etherstatspkts256octetsto511octets);
3836         UPDATE_STAT64(tx_stat_gt1023,
3837                                 tx_stat_etherstatspkts512octetsto1023octets);
3838         UPDATE_STAT64(tx_stat_gt1518,
3839                                 tx_stat_etherstatspkts1024octetsto1522octets);
3840         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3841         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3842         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3843         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3844         UPDATE_STAT64(tx_stat_gterr,
3845                                 tx_stat_dot3statsinternalmactransmiterrors);
3846         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3847
3848         estats->pause_frames_received_hi =
3849                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3850         estats->pause_frames_received_lo =
3851                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3852
3853         estats->pause_frames_sent_hi =
3854                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3855         estats->pause_frames_sent_lo =
3856                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3857 }
3858
3859 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3860 {
3861         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3862         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3863         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3864
3865         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3866         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3867         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3868         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3869         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3870         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3871         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3872         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3873         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3874         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3875         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3876         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3877         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3878         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3879         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3880         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3881         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3882         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3883         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3884         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3885         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3886         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3887         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3888         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3889         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3890         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3891         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3892         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3893         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3894         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3895         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3896
3897         estats->pause_frames_received_hi =
3898                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3899         estats->pause_frames_received_lo =
3900                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3901         ADD_64(estats->pause_frames_received_hi,
3902                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3903                estats->pause_frames_received_lo,
3904                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3905
3906         estats->pause_frames_sent_hi =
3907                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3908         estats->pause_frames_sent_lo =
3909                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3910         ADD_64(estats->pause_frames_sent_hi,
3911                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3912                estats->pause_frames_sent_lo,
3913                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3914 }
3915
3916 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3917 {
3918         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3919         struct nig_stats *old = &(bp->port.old_nig_stats);
3920         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3921         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3922         struct {
3923                 u32 lo;
3924                 u32 hi;
3925         } diff;
3926         u32 nig_timer_max;
3927
3928         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3929                 bnx2x_bmac_stats_update(bp);
3930
3931         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3932                 bnx2x_emac_stats_update(bp);
3933
3934         else { /* unreached */
3935                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3936                 return -1;
3937         }
3938
3939         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3940                       new->brb_discard - old->brb_discard);
3941         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3942                       new->brb_truncate - old->brb_truncate);
3943
3944         UPDATE_STAT64_NIG(egress_mac_pkt0,
3945                                         etherstatspkts1024octetsto1522octets);
3946         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3947
3948         memcpy(old, new, sizeof(struct nig_stats));
3949
3950         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3951                sizeof(struct mac_stx));
3952         estats->brb_drop_hi = pstats->brb_drop_hi;
3953         estats->brb_drop_lo = pstats->brb_drop_lo;
3954
3955         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3956
3957         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3958         if (nig_timer_max != estats->nig_timer_max) {
3959                 estats->nig_timer_max = nig_timer_max;
3960                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3961         }
3962
3963         return 0;
3964 }
3965
3966 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3967 {
3968         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3969         struct tstorm_per_port_stats *tport =
3970                                         &stats->tstorm_common.port_statistics;
3971         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3972         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3973         int i;
3974
3975         memset(&(fstats->total_bytes_received_hi), 0,
3976                sizeof(struct host_func_stats) - 2*sizeof(u32));
3977         estats->error_bytes_received_hi = 0;
3978         estats->error_bytes_received_lo = 0;
3979         estats->etherstatsoverrsizepkts_hi = 0;
3980         estats->etherstatsoverrsizepkts_lo = 0;
3981         estats->no_buff_discard_hi = 0;
3982         estats->no_buff_discard_lo = 0;
3983
3984         for_each_rx_queue(bp, i) {
3985                 struct bnx2x_fastpath *fp = &bp->fp[i];
3986                 int cl_id = fp->cl_id;
3987                 struct tstorm_per_client_stats *tclient =
3988                                 &stats->tstorm_common.client_statistics[cl_id];
3989                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3990                 struct ustorm_per_client_stats *uclient =
3991                                 &stats->ustorm_common.client_statistics[cl_id];
3992                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3993                 struct xstorm_per_client_stats *xclient =
3994                                 &stats->xstorm_common.client_statistics[cl_id];
3995                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3996                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3997                 u32 diff;
3998
3999                 /* are storm stats valid? */
4000                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4001                                                         bp->stats_counter) {
4002                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4003                            "  xstorm counter (%d) != stats_counter (%d)\n",
4004                            i, xclient->stats_counter, bp->stats_counter);
4005                         return -1;
4006                 }
4007                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4008                                                         bp->stats_counter) {
4009                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4010                            "  tstorm counter (%d) != stats_counter (%d)\n",
4011                            i, tclient->stats_counter, bp->stats_counter);
4012                         return -2;
4013                 }
4014                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4015                                                         bp->stats_counter) {
4016                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4017                            "  ustorm counter (%d) != stats_counter (%d)\n",
4018                            i, uclient->stats_counter, bp->stats_counter);
4019                         return -4;
4020                 }
4021
4022                 qstats->total_bytes_received_hi =
4023                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4024                 qstats->total_bytes_received_lo =
4025                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4026
4027                 ADD_64(qstats->total_bytes_received_hi,
4028                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4029                        qstats->total_bytes_received_lo,
4030                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4031
4032                 ADD_64(qstats->total_bytes_received_hi,
4033                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4034                        qstats->total_bytes_received_lo,
4035                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4036
4037                 qstats->valid_bytes_received_hi =
4038                                         qstats->total_bytes_received_hi;
4039                 qstats->valid_bytes_received_lo =
4040                                         qstats->total_bytes_received_lo;
4041
4042                 qstats->error_bytes_received_hi =
4043                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4044                 qstats->error_bytes_received_lo =
4045                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4046
4047                 ADD_64(qstats->total_bytes_received_hi,
4048                        qstats->error_bytes_received_hi,
4049                        qstats->total_bytes_received_lo,
4050                        qstats->error_bytes_received_lo);
4051
4052                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4053                                         total_unicast_packets_received);
4054                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4055                                         total_multicast_packets_received);
4056                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4057                                         total_broadcast_packets_received);
4058                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4059                                         etherstatsoverrsizepkts);
4060                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4061
4062                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4063                                         total_unicast_packets_received);
4064                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4065                                         total_multicast_packets_received);
4066                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4067                                         total_broadcast_packets_received);
4068                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4069                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4070                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4071
4072                 qstats->total_bytes_transmitted_hi =
4073                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4074                 qstats->total_bytes_transmitted_lo =
4075                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4076
4077                 ADD_64(qstats->total_bytes_transmitted_hi,
4078                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4079                        qstats->total_bytes_transmitted_lo,
4080                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4081
4082                 ADD_64(qstats->total_bytes_transmitted_hi,
4083                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4084                        qstats->total_bytes_transmitted_lo,
4085                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4086
4087                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4088                                         total_unicast_packets_transmitted);
4089                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4090                                         total_multicast_packets_transmitted);
4091                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4092                                         total_broadcast_packets_transmitted);
4093
4094                 old_tclient->checksum_discard = tclient->checksum_discard;
4095                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4096
4097                 ADD_64(fstats->total_bytes_received_hi,
4098                        qstats->total_bytes_received_hi,
4099                        fstats->total_bytes_received_lo,
4100                        qstats->total_bytes_received_lo);
4101                 ADD_64(fstats->total_bytes_transmitted_hi,
4102                        qstats->total_bytes_transmitted_hi,
4103                        fstats->total_bytes_transmitted_lo,
4104                        qstats->total_bytes_transmitted_lo);
4105                 ADD_64(fstats->total_unicast_packets_received_hi,
4106                        qstats->total_unicast_packets_received_hi,
4107                        fstats->total_unicast_packets_received_lo,
4108                        qstats->total_unicast_packets_received_lo);
4109                 ADD_64(fstats->total_multicast_packets_received_hi,
4110                        qstats->total_multicast_packets_received_hi,
4111                        fstats->total_multicast_packets_received_lo,
4112                        qstats->total_multicast_packets_received_lo);
4113                 ADD_64(fstats->total_broadcast_packets_received_hi,
4114                        qstats->total_broadcast_packets_received_hi,
4115                        fstats->total_broadcast_packets_received_lo,
4116                        qstats->total_broadcast_packets_received_lo);
4117                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4118                        qstats->total_unicast_packets_transmitted_hi,
4119                        fstats->total_unicast_packets_transmitted_lo,
4120                        qstats->total_unicast_packets_transmitted_lo);
4121                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4122                        qstats->total_multicast_packets_transmitted_hi,
4123                        fstats->total_multicast_packets_transmitted_lo,
4124                        qstats->total_multicast_packets_transmitted_lo);
4125                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4126                        qstats->total_broadcast_packets_transmitted_hi,
4127                        fstats->total_broadcast_packets_transmitted_lo,
4128                        qstats->total_broadcast_packets_transmitted_lo);
4129                 ADD_64(fstats->valid_bytes_received_hi,
4130                        qstats->valid_bytes_received_hi,
4131                        fstats->valid_bytes_received_lo,
4132                        qstats->valid_bytes_received_lo);
4133
4134                 ADD_64(estats->error_bytes_received_hi,
4135                        qstats->error_bytes_received_hi,
4136                        estats->error_bytes_received_lo,
4137                        qstats->error_bytes_received_lo);
4138                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4139                        qstats->etherstatsoverrsizepkts_hi,
4140                        estats->etherstatsoverrsizepkts_lo,
4141                        qstats->etherstatsoverrsizepkts_lo);
4142                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4143                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4144         }
4145
4146         ADD_64(fstats->total_bytes_received_hi,
4147                estats->rx_stat_ifhcinbadoctets_hi,
4148                fstats->total_bytes_received_lo,
4149                estats->rx_stat_ifhcinbadoctets_lo);
4150
4151         memcpy(estats, &(fstats->total_bytes_received_hi),
4152                sizeof(struct host_func_stats) - 2*sizeof(u32));
4153
4154         ADD_64(estats->etherstatsoverrsizepkts_hi,
4155                estats->rx_stat_dot3statsframestoolong_hi,
4156                estats->etherstatsoverrsizepkts_lo,
4157                estats->rx_stat_dot3statsframestoolong_lo);
4158         ADD_64(estats->error_bytes_received_hi,
4159                estats->rx_stat_ifhcinbadoctets_hi,
4160                estats->error_bytes_received_lo,
4161                estats->rx_stat_ifhcinbadoctets_lo);
4162
4163         if (bp->port.pmf) {
4164                 estats->mac_filter_discard =
4165                                 le32_to_cpu(tport->mac_filter_discard);
4166                 estats->xxoverflow_discard =
4167                                 le32_to_cpu(tport->xxoverflow_discard);
4168                 estats->brb_truncate_discard =
4169                                 le32_to_cpu(tport->brb_truncate_discard);
4170                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4171         }
4172
4173         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4174
4175         bp->stats_pending = 0;
4176
4177         return 0;
4178 }
4179
4180 static void bnx2x_net_stats_update(struct bnx2x *bp)
4181 {
4182         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4183         struct net_device_stats *nstats = &bp->dev->stats;
4184         int i;
4185
4186         nstats->rx_packets =
4187                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4188                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4189                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4190
4191         nstats->tx_packets =
4192                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4193                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4194                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4195
4196         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4197
4198         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4199
4200         nstats->rx_dropped = estats->mac_discard;
4201         for_each_rx_queue(bp, i)
4202                 nstats->rx_dropped +=
4203                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4204
4205         nstats->tx_dropped = 0;
4206
4207         nstats->multicast =
4208                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4209
4210         nstats->collisions =
4211                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4212
4213         nstats->rx_length_errors =
4214                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4215                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4216         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4217                                  bnx2x_hilo(&estats->brb_truncate_hi);
4218         nstats->rx_crc_errors =
4219                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4220         nstats->rx_frame_errors =
4221                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4222         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4223         nstats->rx_missed_errors = estats->xxoverflow_discard;
4224
4225         nstats->rx_errors = nstats->rx_length_errors +
4226                             nstats->rx_over_errors +
4227                             nstats->rx_crc_errors +
4228                             nstats->rx_frame_errors +
4229                             nstats->rx_fifo_errors +
4230                             nstats->rx_missed_errors;
4231
4232         nstats->tx_aborted_errors =
4233                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4234                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4235         nstats->tx_carrier_errors =
4236                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4237         nstats->tx_fifo_errors = 0;
4238         nstats->tx_heartbeat_errors = 0;
4239         nstats->tx_window_errors = 0;
4240
4241         nstats->tx_errors = nstats->tx_aborted_errors +
4242                             nstats->tx_carrier_errors +
4243             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4244 }
4245
4246 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4247 {
4248         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4249         int i;
4250
4251         estats->driver_xoff = 0;
4252         estats->rx_err_discard_pkt = 0;
4253         estats->rx_skb_alloc_failed = 0;
4254         estats->hw_csum_err = 0;
4255         for_each_rx_queue(bp, i) {
4256                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4257
4258                 estats->driver_xoff += qstats->driver_xoff;
4259                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4260                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4261                 estats->hw_csum_err += qstats->hw_csum_err;
4262         }
4263 }
4264
4265 static void bnx2x_stats_update(struct bnx2x *bp)
4266 {
4267         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4268
4269         if (*stats_comp != DMAE_COMP_VAL)
4270                 return;
4271
4272         if (bp->port.pmf)
4273                 bnx2x_hw_stats_update(bp);
4274
4275         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4276                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4277                 bnx2x_panic();
4278                 return;
4279         }
4280
4281         bnx2x_net_stats_update(bp);
4282         bnx2x_drv_stats_update(bp);
4283
4284         if (bp->msglevel & NETIF_MSG_TIMER) {
4285                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4286                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4287                 struct tstorm_per_client_stats *old_tclient =
4288                                                         &bp->fp->old_tclient;
4289                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4290                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4291                 struct net_device_stats *nstats = &bp->dev->stats;
4292                 int i;
4293
4294                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4295                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4296                                   "  tx pkt (%lx)\n",
4297                        bnx2x_tx_avail(fp0_tx),
4298                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4299                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4300                                   "  rx pkt (%lx)\n",
4301                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4302                              fp0_rx->rx_comp_cons),
4303                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4304                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4305                                   "brb truncate %u\n",
4306                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4307                        qstats->driver_xoff,
4308                        estats->brb_drop_lo, estats->brb_truncate_lo);
4309                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4310                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4311                         "mac_discard %u  mac_filter_discard %u  "
4312                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4313                         "ttl0_discard %u\n",
4314                        le32_to_cpu(old_tclient->checksum_discard),
4315                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4316                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4317                        estats->mac_discard, estats->mac_filter_discard,
4318                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4319                        le32_to_cpu(old_tclient->ttl0_discard));
4320
4321                 for_each_queue(bp, i) {
4322                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4323                                bnx2x_fp(bp, i, tx_pkt),
4324                                bnx2x_fp(bp, i, rx_pkt),
4325                                bnx2x_fp(bp, i, rx_calls));
4326                 }
4327         }
4328
4329         bnx2x_hw_stats_post(bp);
4330         bnx2x_storm_stats_post(bp);
4331 }
4332
4333 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4334 {
4335         struct dmae_command *dmae;
4336         u32 opcode;
4337         int loader_idx = PMF_DMAE_C(bp);
4338         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4339
4340         bp->executer_idx = 0;
4341
4342         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4343                   DMAE_CMD_C_ENABLE |
4344                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4345 #ifdef __BIG_ENDIAN
4346                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4347 #else
4348                   DMAE_CMD_ENDIANITY_DW_SWAP |
4349 #endif
4350                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4351                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4352
4353         if (bp->port.port_stx) {
4354
4355                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4356                 if (bp->func_stx)
4357                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4358                 else
4359                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4360                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4361                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4362                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4363                 dmae->dst_addr_hi = 0;
4364                 dmae->len = sizeof(struct host_port_stats) >> 2;
4365                 if (bp->func_stx) {
4366                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4367                         dmae->comp_addr_hi = 0;
4368                         dmae->comp_val = 1;
4369                 } else {
4370                         dmae->comp_addr_lo =
4371                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4372                         dmae->comp_addr_hi =
4373                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4374                         dmae->comp_val = DMAE_COMP_VAL;
4375
4376                         *stats_comp = 0;
4377                 }
4378         }
4379
4380         if (bp->func_stx) {
4381
4382                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4383                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4384                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4385                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4386                 dmae->dst_addr_lo = bp->func_stx >> 2;
4387                 dmae->dst_addr_hi = 0;
4388                 dmae->len = sizeof(struct host_func_stats) >> 2;
4389                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4390                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4391                 dmae->comp_val = DMAE_COMP_VAL;
4392
4393                 *stats_comp = 0;
4394         }
4395 }
4396
4397 static void bnx2x_stats_stop(struct bnx2x *bp)
4398 {
4399         int update = 0;
4400
4401         bnx2x_stats_comp(bp);
4402
4403         if (bp->port.pmf)
4404                 update = (bnx2x_hw_stats_update(bp) == 0);
4405
4406         update |= (bnx2x_storm_stats_update(bp) == 0);
4407
4408         if (update) {
4409                 bnx2x_net_stats_update(bp);
4410
4411                 if (bp->port.pmf)
4412                         bnx2x_port_stats_stop(bp);
4413
4414                 bnx2x_hw_stats_post(bp);
4415                 bnx2x_stats_comp(bp);
4416         }
4417 }
4418
4419 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4420 {
4421 }
4422
4423 static const struct {
4424         void (*action)(struct bnx2x *bp);
4425         enum bnx2x_stats_state next_state;
4426 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4427 /* state        event   */
4428 {
4429 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4430 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4431 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4432 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4433 },
4434 {
4435 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4436 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4437 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4438 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4439 }
4440 };
4441
4442 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4443 {
4444         enum bnx2x_stats_state state = bp->stats_state;
4445
4446         bnx2x_stats_stm[state][event].action(bp);
4447         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4448
4449         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4450                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4451                    state, event, bp->stats_state);
4452 }
4453
4454 static void bnx2x_timer(unsigned long data)
4455 {
4456         struct bnx2x *bp = (struct bnx2x *) data;
4457
4458         if (!netif_running(bp->dev))
4459                 return;
4460
4461         if (atomic_read(&bp->intr_sem) != 0)
4462                 goto timer_restart;
4463
4464         if (poll) {
4465                 struct bnx2x_fastpath *fp = &bp->fp[0];
4466                 int rc;
4467
4468                 bnx2x_tx_int(fp);
4469                 rc = bnx2x_rx_int(fp, 1000);
4470         }
4471
4472         if (!BP_NOMCP(bp)) {
4473                 int func = BP_FUNC(bp);
4474                 u32 drv_pulse;
4475                 u32 mcp_pulse;
4476
4477                 ++bp->fw_drv_pulse_wr_seq;
4478                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4479                 /* TBD - add SYSTEM_TIME */
4480                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4481                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4482
4483                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4484                              MCP_PULSE_SEQ_MASK);
4485                 /* The delta between driver pulse and mcp response
4486                  * should be 1 (before mcp response) or 0 (after mcp response)
4487                  */
4488                 if ((drv_pulse != mcp_pulse) &&
4489                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4490                         /* someone lost a heartbeat... */
4491                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4492                                   drv_pulse, mcp_pulse);
4493                 }
4494         }
4495
4496         if ((bp->state == BNX2X_STATE_OPEN) ||
4497             (bp->state == BNX2X_STATE_DISABLED))
4498                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4499
4500 timer_restart:
4501         mod_timer(&bp->timer, jiffies + bp->current_interval);
4502 }
4503
4504 /* end of Statistics */
4505
4506 /* nic init */
4507
4508 /*
4509  * nic init service functions
4510  */
4511
4512 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4513 {
4514         int port = BP_PORT(bp);
4515
4516         /* "CSTORM" */
4517         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4518                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4519                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4520         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4521                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4522                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4523 }
4524
4525 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4526                           dma_addr_t mapping, int sb_id)
4527 {
4528         int port = BP_PORT(bp);
4529         int func = BP_FUNC(bp);
4530         int index;
4531         u64 section;
4532
4533         /* USTORM */
4534         section = ((u64)mapping) + offsetof(struct host_status_block,
4535                                             u_status_block);
4536         sb->u_status_block.status_block_id = sb_id;
4537
4538         REG_WR(bp, BAR_CSTRORM_INTMEM +
4539                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4540         REG_WR(bp, BAR_CSTRORM_INTMEM +
4541                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4542                U64_HI(section));
4543         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4544                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4545
4546         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4547                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4548                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4549
4550         /* CSTORM */
4551         section = ((u64)mapping) + offsetof(struct host_status_block,
4552                                             c_status_block);
4553         sb->c_status_block.status_block_id = sb_id;
4554
4555         REG_WR(bp, BAR_CSTRORM_INTMEM +
4556                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4557         REG_WR(bp, BAR_CSTRORM_INTMEM +
4558                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4559                U64_HI(section));
4560         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4561                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4562
4563         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4564                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4565                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4566
4567         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4568 }
4569
4570 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4571 {
4572         int func = BP_FUNC(bp);
4573
4574         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4575                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4576                         sizeof(struct tstorm_def_status_block)/4);
4577         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4578                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4579                         sizeof(struct cstorm_def_status_block_u)/4);
4580         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4581                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4582                         sizeof(struct cstorm_def_status_block_c)/4);
4583         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4584                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4585                         sizeof(struct xstorm_def_status_block)/4);
4586 }
4587
4588 static void bnx2x_init_def_sb(struct bnx2x *bp,
4589                               struct host_def_status_block *def_sb,
4590                               dma_addr_t mapping, int sb_id)
4591 {
4592         int port = BP_PORT(bp);
4593         int func = BP_FUNC(bp);
4594         int index, val, reg_offset;
4595         u64 section;
4596
4597         /* ATTN */
4598         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4599                                             atten_status_block);
4600         def_sb->atten_status_block.status_block_id = sb_id;
4601
4602         bp->attn_state = 0;
4603
4604         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4605                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4606
4607         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4608                 bp->attn_group[index].sig[0] = REG_RD(bp,
4609                                                      reg_offset + 0x10*index);
4610                 bp->attn_group[index].sig[1] = REG_RD(bp,
4611                                                reg_offset + 0x4 + 0x10*index);
4612                 bp->attn_group[index].sig[2] = REG_RD(bp,
4613                                                reg_offset + 0x8 + 0x10*index);
4614                 bp->attn_group[index].sig[3] = REG_RD(bp,
4615                                                reg_offset + 0xc + 0x10*index);
4616         }
4617
4618         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4619                              HC_REG_ATTN_MSG0_ADDR_L);
4620
4621         REG_WR(bp, reg_offset, U64_LO(section));
4622         REG_WR(bp, reg_offset + 4, U64_HI(section));
4623
4624         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4625
4626         val = REG_RD(bp, reg_offset);
4627         val |= sb_id;
4628         REG_WR(bp, reg_offset, val);
4629
4630         /* USTORM */
4631         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4632                                             u_def_status_block);
4633         def_sb->u_def_status_block.status_block_id = sb_id;
4634
4635         REG_WR(bp, BAR_CSTRORM_INTMEM +
4636                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4637         REG_WR(bp, BAR_CSTRORM_INTMEM +
4638                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4639                U64_HI(section));
4640         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4641                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4642
4643         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4644                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4645                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4646
4647         /* CSTORM */
4648         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4649                                             c_def_status_block);
4650         def_sb->c_def_status_block.status_block_id = sb_id;
4651
4652         REG_WR(bp, BAR_CSTRORM_INTMEM +
4653                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4654         REG_WR(bp, BAR_CSTRORM_INTMEM +
4655                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4656                U64_HI(section));
4657         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4658                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4659
4660         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4661                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4662                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4663
4664         /* TSTORM */
4665         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4666                                             t_def_status_block);
4667         def_sb->t_def_status_block.status_block_id = sb_id;
4668
4669         REG_WR(bp, BAR_TSTRORM_INTMEM +
4670                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4671         REG_WR(bp, BAR_TSTRORM_INTMEM +
4672                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4673                U64_HI(section));
4674         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4675                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4676
4677         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4678                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4679                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4680
4681         /* XSTORM */
4682         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4683                                             x_def_status_block);
4684         def_sb->x_def_status_block.status_block_id = sb_id;
4685
4686         REG_WR(bp, BAR_XSTRORM_INTMEM +
4687                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4688         REG_WR(bp, BAR_XSTRORM_INTMEM +
4689                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4690                U64_HI(section));
4691         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4692                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4693
4694         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4695                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4696                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4697
4698         bp->stats_pending = 0;
4699         bp->set_mac_pending = 0;
4700
4701         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4702 }
4703
4704 static void bnx2x_update_coalesce(struct bnx2x *bp)
4705 {
4706         int port = BP_PORT(bp);
4707         int i;
4708
4709         for_each_queue(bp, i) {
4710                 int sb_id = bp->fp[i].sb_id;
4711
4712                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4713                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4714                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4715                                                       U_SB_ETH_RX_CQ_INDEX),
4716                         bp->rx_ticks/12);
4717                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4718                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4719                                                        U_SB_ETH_RX_CQ_INDEX),
4720                          (bp->rx_ticks/12) ? 0 : 1);
4721
4722                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4723                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4724                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4725                                                       C_SB_ETH_TX_CQ_INDEX),
4726                         bp->tx_ticks/12);
4727                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4728                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4729                                                        C_SB_ETH_TX_CQ_INDEX),
4730                          (bp->tx_ticks/12) ? 0 : 1);
4731         }
4732 }
4733
4734 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4735                                        struct bnx2x_fastpath *fp, int last)
4736 {
4737         int i;
4738
4739         for (i = 0; i < last; i++) {
4740                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4741                 struct sk_buff *skb = rx_buf->skb;
4742
4743                 if (skb == NULL) {
4744                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4745                         continue;
4746                 }
4747
4748                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4749                         pci_unmap_single(bp->pdev,
4750                                          pci_unmap_addr(rx_buf, mapping),
4751                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4752
4753                 dev_kfree_skb(skb);
4754                 rx_buf->skb = NULL;
4755         }
4756 }
4757
4758 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4759 {
4760         int func = BP_FUNC(bp);
4761         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4762                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4763         u16 ring_prod, cqe_ring_prod;
4764         int i, j;
4765
4766         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4767         DP(NETIF_MSG_IFUP,
4768            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4769
4770         if (bp->flags & TPA_ENABLE_FLAG) {
4771
4772                 for_each_rx_queue(bp, j) {
4773                         struct bnx2x_fastpath *fp = &bp->fp[j];
4774
4775                         for (i = 0; i < max_agg_queues; i++) {
4776                                 fp->tpa_pool[i].skb =
4777                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4778                                 if (!fp->tpa_pool[i].skb) {
4779                                         BNX2X_ERR("Failed to allocate TPA "
4780                                                   "skb pool for queue[%d] - "
4781                                                   "disabling TPA on this "
4782                                                   "queue!\n", j);
4783                                         bnx2x_free_tpa_pool(bp, fp, i);
4784                                         fp->disable_tpa = 1;
4785                                         break;
4786                                 }
4787                                 pci_unmap_addr_set((struct sw_rx_bd *)
4788                                                         &bp->fp->tpa_pool[i],
4789                                                    mapping, 0);
4790                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4791                         }
4792                 }
4793         }
4794
4795         for_each_rx_queue(bp, j) {
4796                 struct bnx2x_fastpath *fp = &bp->fp[j];
4797
4798                 fp->rx_bd_cons = 0;
4799                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4800                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4801
4802                 /* Mark queue as Rx */
4803                 fp->is_rx_queue = 1;
4804
4805                 /* "next page" elements initialization */
4806                 /* SGE ring */
4807                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4808                         struct eth_rx_sge *sge;
4809
4810                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4811                         sge->addr_hi =
4812                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4813                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4814                         sge->addr_lo =
4815                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4816                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4817                 }
4818
4819                 bnx2x_init_sge_ring_bit_mask(fp);
4820
4821                 /* RX BD ring */
4822                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4823                         struct eth_rx_bd *rx_bd;
4824
4825                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4826                         rx_bd->addr_hi =
4827                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4828                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4829                         rx_bd->addr_lo =
4830                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4831                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4832                 }
4833
4834                 /* CQ ring */
4835                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4836                         struct eth_rx_cqe_next_page *nextpg;
4837
4838                         nextpg = (struct eth_rx_cqe_next_page *)
4839                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4840                         nextpg->addr_hi =
4841                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4842                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4843                         nextpg->addr_lo =
4844                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4845                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4846                 }
4847
4848                 /* Allocate SGEs and initialize the ring elements */
4849                 for (i = 0, ring_prod = 0;
4850                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4851
4852                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4853                                 BNX2X_ERR("was only able to allocate "
4854                                           "%d rx sges\n", i);
4855                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4856                                 /* Cleanup already allocated elements */
4857                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4858                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4859                                 fp->disable_tpa = 1;
4860                                 ring_prod = 0;
4861                                 break;
4862                         }
4863                         ring_prod = NEXT_SGE_IDX(ring_prod);
4864                 }
4865                 fp->rx_sge_prod = ring_prod;
4866
4867                 /* Allocate BDs and initialize BD ring */
4868                 fp->rx_comp_cons = 0;
4869                 cqe_ring_prod = ring_prod = 0;
4870                 for (i = 0; i < bp->rx_ring_size; i++) {
4871                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4872                                 BNX2X_ERR("was only able to allocate "
4873                                           "%d rx skbs on queue[%d]\n", i, j);
4874                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4875                                 break;
4876                         }
4877                         ring_prod = NEXT_RX_IDX(ring_prod);
4878                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4879                         WARN_ON(ring_prod <= i);
4880                 }
4881
4882                 fp->rx_bd_prod = ring_prod;
4883                 /* must not have more available CQEs than BDs */
4884                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4885                                        cqe_ring_prod);
4886                 fp->rx_pkt = fp->rx_calls = 0;
4887
4888                 /* Warning!
4889                  * this will generate an interrupt (to the TSTORM)
4890                  * must only be done after chip is initialized
4891                  */
4892                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4893                                      fp->rx_sge_prod);
4894                 if (j != 0)
4895                         continue;
4896
4897                 REG_WR(bp, BAR_USTRORM_INTMEM +
4898                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4899                        U64_LO(fp->rx_comp_mapping));
4900                 REG_WR(bp, BAR_USTRORM_INTMEM +
4901                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4902                        U64_HI(fp->rx_comp_mapping));
4903         }
4904 }
4905
4906 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4907 {
4908         int i, j;
4909
4910         for_each_tx_queue(bp, j) {
4911                 struct bnx2x_fastpath *fp = &bp->fp[j];
4912
4913                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4914                         struct eth_tx_next_bd *tx_next_bd =
4915                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4916
4917                         tx_next_bd->addr_hi =
4918                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4919                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4920                         tx_next_bd->addr_lo =
4921                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4922                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4923                 }
4924
4925                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4926                 fp->tx_db.data.zero_fill1 = 0;
4927                 fp->tx_db.data.prod = 0;
4928
4929                 fp->tx_pkt_prod = 0;
4930                 fp->tx_pkt_cons = 0;
4931                 fp->tx_bd_prod = 0;
4932                 fp->tx_bd_cons = 0;
4933                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4934                 fp->tx_pkt = 0;
4935         }
4936 }
4937
4938 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4939 {
4940         int func = BP_FUNC(bp);
4941
4942         spin_lock_init(&bp->spq_lock);
4943
4944         bp->spq_left = MAX_SPQ_PENDING;
4945         bp->spq_prod_idx = 0;
4946         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4947         bp->spq_prod_bd = bp->spq;
4948         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4949
4950         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4951                U64_LO(bp->spq_mapping));
4952         REG_WR(bp,
4953                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4954                U64_HI(bp->spq_mapping));
4955
4956         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4957                bp->spq_prod_idx);
4958 }
4959
4960 static void bnx2x_init_context(struct bnx2x *bp)
4961 {
4962         int i;
4963
4964         for_each_rx_queue(bp, i) {
4965                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4966                 struct bnx2x_fastpath *fp = &bp->fp[i];
4967                 u8 cl_id = fp->cl_id;
4968
4969                 context->ustorm_st_context.common.sb_index_numbers =
4970                                                 BNX2X_RX_SB_INDEX_NUM;
4971                 context->ustorm_st_context.common.clientId = cl_id;
4972                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4973                 context->ustorm_st_context.common.flags =
4974                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4975                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4976                 context->ustorm_st_context.common.statistics_counter_id =
4977                                                 cl_id;
4978                 context->ustorm_st_context.common.mc_alignment_log_size =
4979                                                 BNX2X_RX_ALIGN_SHIFT;
4980                 context->ustorm_st_context.common.bd_buff_size =
4981                                                 bp->rx_buf_size;
4982                 context->ustorm_st_context.common.bd_page_base_hi =
4983                                                 U64_HI(fp->rx_desc_mapping);
4984                 context->ustorm_st_context.common.bd_page_base_lo =
4985                                                 U64_LO(fp->rx_desc_mapping);
4986                 if (!fp->disable_tpa) {
4987                         context->ustorm_st_context.common.flags |=
4988                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4989                         context->ustorm_st_context.common.sge_buff_size =
4990                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4991                                          (u32)0xffff);
4992                         context->ustorm_st_context.common.sge_page_base_hi =
4993                                                 U64_HI(fp->rx_sge_mapping);
4994                         context->ustorm_st_context.common.sge_page_base_lo =
4995                                                 U64_LO(fp->rx_sge_mapping);
4996
4997                         context->ustorm_st_context.common.max_sges_for_packet =
4998                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4999                         context->ustorm_st_context.common.max_sges_for_packet =
5000                                 ((context->ustorm_st_context.common.
5001                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5002                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5003                 }
5004
5005                 context->ustorm_ag_context.cdu_usage =
5006                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5007                                                CDU_REGION_NUMBER_UCM_AG,
5008                                                ETH_CONNECTION_TYPE);
5009
5010                 context->xstorm_ag_context.cdu_reserved =
5011                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5012                                                CDU_REGION_NUMBER_XCM_AG,
5013                                                ETH_CONNECTION_TYPE);
5014         }
5015
5016         for_each_tx_queue(bp, i) {
5017                 struct bnx2x_fastpath *fp = &bp->fp[i];
5018                 struct eth_context *context =
5019                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5020
5021                 context->cstorm_st_context.sb_index_number =
5022                                                 C_SB_ETH_TX_CQ_INDEX;
5023                 context->cstorm_st_context.status_block_id = fp->sb_id;
5024
5025                 context->xstorm_st_context.tx_bd_page_base_hi =
5026                                                 U64_HI(fp->tx_desc_mapping);
5027                 context->xstorm_st_context.tx_bd_page_base_lo =
5028                                                 U64_LO(fp->tx_desc_mapping);
5029                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5030                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5031         }
5032 }
5033
5034 static void bnx2x_init_ind_table(struct bnx2x *bp)
5035 {
5036         int func = BP_FUNC(bp);
5037         int i;
5038
5039         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5040                 return;
5041
5042         DP(NETIF_MSG_IFUP,
5043            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5044         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5045                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5046                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5047                         bp->fp->cl_id + (i % bp->num_rx_queues));
5048 }
5049
5050 static void bnx2x_set_client_config(struct bnx2x *bp)
5051 {
5052         struct tstorm_eth_client_config tstorm_client = {0};
5053         int port = BP_PORT(bp);
5054         int i;
5055
5056         tstorm_client.mtu = bp->dev->mtu;
5057         tstorm_client.config_flags =
5058                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5059                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5060 #ifdef BCM_VLAN
5061         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5062                 tstorm_client.config_flags |=
5063                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5064                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5065         }
5066 #endif
5067
5068         for_each_queue(bp, i) {
5069                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5070
5071                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5072                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5073                        ((u32 *)&tstorm_client)[0]);
5074                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5075                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5076                        ((u32 *)&tstorm_client)[1]);
5077         }
5078
5079         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5080            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5081 }
5082
5083 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5084 {
5085         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5086         int mode = bp->rx_mode;
5087         int mask = (1 << BP_L_ID(bp));
5088         int func = BP_FUNC(bp);
5089         int port = BP_PORT(bp);
5090         int i;
5091         /* All but management unicast packets should pass to the host as well */
5092         u32 llh_mask =
5093                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5094                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5095                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5096                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5097
5098         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5099
5100         switch (mode) {
5101         case BNX2X_RX_MODE_NONE: /* no Rx */
5102                 tstorm_mac_filter.ucast_drop_all = mask;
5103                 tstorm_mac_filter.mcast_drop_all = mask;
5104                 tstorm_mac_filter.bcast_drop_all = mask;
5105                 break;
5106
5107         case BNX2X_RX_MODE_NORMAL:
5108                 tstorm_mac_filter.bcast_accept_all = mask;
5109                 break;
5110
5111         case BNX2X_RX_MODE_ALLMULTI:
5112                 tstorm_mac_filter.mcast_accept_all = mask;
5113                 tstorm_mac_filter.bcast_accept_all = mask;
5114                 break;
5115
5116         case BNX2X_RX_MODE_PROMISC:
5117                 tstorm_mac_filter.ucast_accept_all = mask;
5118                 tstorm_mac_filter.mcast_accept_all = mask;
5119                 tstorm_mac_filter.bcast_accept_all = mask;
5120                 /* pass management unicast packets as well */
5121                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5122                 break;
5123
5124         default:
5125                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5126                 break;
5127         }
5128
5129         REG_WR(bp,
5130                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5131                llh_mask);
5132
5133         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5134                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5135                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5136                        ((u32 *)&tstorm_mac_filter)[i]);
5137
5138 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5139                    ((u32 *)&tstorm_mac_filter)[i]); */
5140         }
5141
5142         if (mode != BNX2X_RX_MODE_NONE)
5143                 bnx2x_set_client_config(bp);
5144 }
5145
5146 static void bnx2x_init_internal_common(struct bnx2x *bp)
5147 {
5148         int i;
5149
5150         /* Zero this manually as its initialization is
5151            currently missing in the initTool */
5152         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5153                 REG_WR(bp, BAR_USTRORM_INTMEM +
5154                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5155 }
5156
5157 static void bnx2x_init_internal_port(struct bnx2x *bp)
5158 {
5159         int port = BP_PORT(bp);
5160
5161         REG_WR(bp,
5162                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5163         REG_WR(bp,
5164                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5165         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5166         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5167 }
5168
5169 static void bnx2x_init_internal_func(struct bnx2x *bp)
5170 {
5171         struct tstorm_eth_function_common_config tstorm_config = {0};
5172         struct stats_indication_flags stats_flags = {0};
5173         int port = BP_PORT(bp);
5174         int func = BP_FUNC(bp);
5175         int i, j;
5176         u32 offset;
5177         u16 max_agg_size;
5178
5179         if (is_multi(bp)) {
5180                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5181                 tstorm_config.rss_result_mask = MULTI_MASK;
5182         }
5183
5184         /* Enable TPA if needed */
5185         if (bp->flags & TPA_ENABLE_FLAG)
5186                 tstorm_config.config_flags |=
5187                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5188
5189         if (IS_E1HMF(bp))
5190                 tstorm_config.config_flags |=
5191                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5192
5193         tstorm_config.leading_client_id = BP_L_ID(bp);
5194
5195         REG_WR(bp, BAR_TSTRORM_INTMEM +
5196                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5197                (*(u32 *)&tstorm_config));
5198
5199         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5200         bnx2x_set_storm_rx_mode(bp);
5201
5202         for_each_queue(bp, i) {
5203                 u8 cl_id = bp->fp[i].cl_id;
5204
5205                 /* reset xstorm per client statistics */
5206                 offset = BAR_XSTRORM_INTMEM +
5207                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5208                 for (j = 0;
5209                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5210                         REG_WR(bp, offset + j*4, 0);
5211
5212                 /* reset tstorm per client statistics */
5213                 offset = BAR_TSTRORM_INTMEM +
5214                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5215                 for (j = 0;
5216                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5217                         REG_WR(bp, offset + j*4, 0);
5218
5219                 /* reset ustorm per client statistics */
5220                 offset = BAR_USTRORM_INTMEM +
5221                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5222                 for (j = 0;
5223                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5224                         REG_WR(bp, offset + j*4, 0);
5225         }
5226
5227         /* Init statistics related context */
5228         stats_flags.collect_eth = 1;
5229
5230         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5231                ((u32 *)&stats_flags)[0]);
5232         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5233                ((u32 *)&stats_flags)[1]);
5234
5235         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5236                ((u32 *)&stats_flags)[0]);
5237         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5238                ((u32 *)&stats_flags)[1]);
5239
5240         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5241                ((u32 *)&stats_flags)[0]);
5242         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5243                ((u32 *)&stats_flags)[1]);
5244
5245         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5246                ((u32 *)&stats_flags)[0]);
5247         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5248                ((u32 *)&stats_flags)[1]);
5249
5250         REG_WR(bp, BAR_XSTRORM_INTMEM +
5251                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5252                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5253         REG_WR(bp, BAR_XSTRORM_INTMEM +
5254                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5255                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5256
5257         REG_WR(bp, BAR_TSTRORM_INTMEM +
5258                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5259                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5260         REG_WR(bp, BAR_TSTRORM_INTMEM +
5261                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5262                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5263
5264         REG_WR(bp, BAR_USTRORM_INTMEM +
5265                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5266                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5267         REG_WR(bp, BAR_USTRORM_INTMEM +
5268                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5269                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5270
5271         if (CHIP_IS_E1H(bp)) {
5272                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5273                         IS_E1HMF(bp));
5274                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5275                         IS_E1HMF(bp));
5276                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5277                         IS_E1HMF(bp));
5278                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5279                         IS_E1HMF(bp));
5280
5281                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5282                          bp->e1hov);
5283         }
5284
5285         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5286         max_agg_size =
5287                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5288                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5289                     (u32)0xffff);
5290         for_each_rx_queue(bp, i) {
5291                 struct bnx2x_fastpath *fp = &bp->fp[i];
5292
5293                 REG_WR(bp, BAR_USTRORM_INTMEM +
5294                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5295                        U64_LO(fp->rx_comp_mapping));
5296                 REG_WR(bp, BAR_USTRORM_INTMEM +
5297                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5298                        U64_HI(fp->rx_comp_mapping));
5299
5300                 /* Next page */
5301                 REG_WR(bp, BAR_USTRORM_INTMEM +
5302                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5303                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5304                 REG_WR(bp, BAR_USTRORM_INTMEM +
5305                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5306                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5307
5308                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5309                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5310                          max_agg_size);
5311         }
5312
5313         /* dropless flow control */
5314         if (CHIP_IS_E1H(bp)) {
5315                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5316
5317                 rx_pause.bd_thr_low = 250;
5318                 rx_pause.cqe_thr_low = 250;
5319                 rx_pause.cos = 1;
5320                 rx_pause.sge_thr_low = 0;
5321                 rx_pause.bd_thr_high = 350;
5322                 rx_pause.cqe_thr_high = 350;
5323                 rx_pause.sge_thr_high = 0;
5324
5325                 for_each_rx_queue(bp, i) {
5326                         struct bnx2x_fastpath *fp = &bp->fp[i];
5327
5328                         if (!fp->disable_tpa) {
5329                                 rx_pause.sge_thr_low = 150;
5330                                 rx_pause.sge_thr_high = 250;
5331                         }
5332
5333
5334                         offset = BAR_USTRORM_INTMEM +
5335                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5336                                                                    fp->cl_id);
5337                         for (j = 0;
5338                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5339                              j++)
5340                                 REG_WR(bp, offset + j*4,
5341                                        ((u32 *)&rx_pause)[j]);
5342                 }
5343         }
5344
5345         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5346
5347         /* Init rate shaping and fairness contexts */
5348         if (IS_E1HMF(bp)) {
5349                 int vn;
5350
5351                 /* During init there is no active link
5352                    Until link is up, set link rate to 10Gbps */
5353                 bp->link_vars.line_speed = SPEED_10000;
5354                 bnx2x_init_port_minmax(bp);
5355
5356                 bnx2x_calc_vn_weight_sum(bp);
5357
5358                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5359                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5360
5361                 /* Enable rate shaping and fairness */
5362                 bp->cmng.flags.cmng_enables =
5363                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5364                 if (bp->vn_weight_sum)
5365                         bp->cmng.flags.cmng_enables |=
5366                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5367                 else
5368                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5369                            "  fairness will be disabled\n");
5370         } else {
5371                 /* rate shaping and fairness are disabled */
5372                 DP(NETIF_MSG_IFUP,
5373                    "single function mode  minmax will be disabled\n");
5374         }
5375
5376
5377         /* Store it to internal memory */
5378         if (bp->port.pmf)
5379                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5380                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5381                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5382                                ((u32 *)(&bp->cmng))[i]);
5383 }
5384
5385 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5386 {
5387         switch (load_code) {
5388         case FW_MSG_CODE_DRV_LOAD_COMMON:
5389                 bnx2x_init_internal_common(bp);
5390                 /* no break */
5391
5392         case FW_MSG_CODE_DRV_LOAD_PORT:
5393                 bnx2x_init_internal_port(bp);
5394                 /* no break */
5395
5396         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5397                 bnx2x_init_internal_func(bp);
5398                 break;
5399
5400         default:
5401                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5402                 break;
5403         }
5404 }
5405
5406 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5407 {
5408         int i;
5409
5410         for_each_queue(bp, i) {
5411                 struct bnx2x_fastpath *fp = &bp->fp[i];
5412
5413                 fp->bp = bp;
5414                 fp->state = BNX2X_FP_STATE_CLOSED;
5415                 fp->index = i;
5416                 fp->cl_id = BP_L_ID(bp) + i;
5417                 fp->sb_id = fp->cl_id;
5418                 /* Suitable Rx and Tx SBs are served by the same client */
5419                 if (i >= bp->num_rx_queues)
5420                         fp->cl_id -= bp->num_rx_queues;
5421                 DP(NETIF_MSG_IFUP,
5422                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5423                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5424                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5425                               fp->sb_id);
5426                 bnx2x_update_fpsb_idx(fp);
5427         }
5428
5429         /* ensure status block indices were read */
5430         rmb();
5431
5432
5433         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5434                           DEF_SB_ID);
5435         bnx2x_update_dsb_idx(bp);
5436         bnx2x_update_coalesce(bp);
5437         bnx2x_init_rx_rings(bp);
5438         bnx2x_init_tx_ring(bp);
5439         bnx2x_init_sp_ring(bp);
5440         bnx2x_init_context(bp);
5441         bnx2x_init_internal(bp, load_code);
5442         bnx2x_init_ind_table(bp);
5443         bnx2x_stats_init(bp);
5444
5445         /* At this point, we are ready for interrupts */
5446         atomic_set(&bp->intr_sem, 0);
5447
5448         /* flush all before enabling interrupts */
5449         mb();
5450         mmiowb();
5451
5452         bnx2x_int_enable(bp);
5453
5454         /* Check for SPIO5 */
5455         bnx2x_attn_int_deasserted0(bp,
5456                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5457                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5458 }
5459
5460 /* end of nic init */
5461
5462 /*
5463  * gzip service functions
5464  */
5465
5466 static int bnx2x_gunzip_init(struct bnx2x *bp)
5467 {
5468         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5469                                               &bp->gunzip_mapping);
5470         if (bp->gunzip_buf  == NULL)
5471                 goto gunzip_nomem1;
5472
5473         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5474         if (bp->strm  == NULL)
5475                 goto gunzip_nomem2;
5476
5477         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5478                                       GFP_KERNEL);
5479         if (bp->strm->workspace == NULL)
5480                 goto gunzip_nomem3;
5481
5482         return 0;
5483
5484 gunzip_nomem3:
5485         kfree(bp->strm);
5486         bp->strm = NULL;
5487
5488 gunzip_nomem2:
5489         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5490                             bp->gunzip_mapping);
5491         bp->gunzip_buf = NULL;
5492
5493 gunzip_nomem1:
5494         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5495                " un-compression\n", bp->dev->name);
5496         return -ENOMEM;
5497 }
5498
5499 static void bnx2x_gunzip_end(struct bnx2x *bp)
5500 {
5501         kfree(bp->strm->workspace);
5502
5503         kfree(bp->strm);
5504         bp->strm = NULL;
5505
5506         if (bp->gunzip_buf) {
5507                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5508                                     bp->gunzip_mapping);
5509                 bp->gunzip_buf = NULL;
5510         }
5511 }
5512
5513 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5514 {
5515         int n, rc;
5516
5517         /* check gzip header */
5518         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5519                 BNX2X_ERR("Bad gzip header\n");
5520                 return -EINVAL;
5521         }
5522
5523         n = 10;
5524
5525 #define FNAME                           0x8
5526
5527         if (zbuf[3] & FNAME)
5528                 while ((zbuf[n++] != 0) && (n < len));
5529
5530         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5531         bp->strm->avail_in = len - n;
5532         bp->strm->next_out = bp->gunzip_buf;
5533         bp->strm->avail_out = FW_BUF_SIZE;
5534
5535         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5536         if (rc != Z_OK)
5537                 return rc;
5538
5539         rc = zlib_inflate(bp->strm, Z_FINISH);
5540         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5541                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5542                        bp->dev->name, bp->strm->msg);
5543
5544         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5545         if (bp->gunzip_outlen & 0x3)
5546                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5547                                     " gunzip_outlen (%d) not aligned\n",
5548                        bp->dev->name, bp->gunzip_outlen);
5549         bp->gunzip_outlen >>= 2;
5550
5551         zlib_inflateEnd(bp->strm);
5552
5553         if (rc == Z_STREAM_END)
5554                 return 0;
5555
5556         return rc;
5557 }
5558
5559 /* nic load/unload */
5560
5561 /*
5562  * General service functions
5563  */
5564
5565 /* send a NIG loopback debug packet */
5566 static void bnx2x_lb_pckt(struct bnx2x *bp)
5567 {
5568         u32 wb_write[3];
5569
5570         /* Ethernet source and destination addresses */
5571         wb_write[0] = 0x55555555;
5572         wb_write[1] = 0x55555555;
5573         wb_write[2] = 0x20;             /* SOP */
5574         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5575
5576         /* NON-IP protocol */
5577         wb_write[0] = 0x09000000;
5578         wb_write[1] = 0x55555555;
5579         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5580         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5581 }
5582
5583 /* some of the internal memories
5584  * are not directly readable from the driver
5585  * to test them we send debug packets
5586  */
5587 static int bnx2x_int_mem_test(struct bnx2x *bp)
5588 {
5589         int factor;
5590         int count, i;
5591         u32 val = 0;
5592
5593         if (CHIP_REV_IS_FPGA(bp))
5594                 factor = 120;
5595         else if (CHIP_REV_IS_EMUL(bp))
5596                 factor = 200;
5597         else
5598                 factor = 1;
5599
5600         DP(NETIF_MSG_HW, "start part1\n");
5601
5602         /* Disable inputs of parser neighbor blocks */
5603         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5604         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5605         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5606         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5607
5608         /*  Write 0 to parser credits for CFC search request */
5609         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5610
5611         /* send Ethernet packet */
5612         bnx2x_lb_pckt(bp);
5613
5614         /* TODO do i reset NIG statistic? */
5615         /* Wait until NIG register shows 1 packet of size 0x10 */
5616         count = 1000 * factor;
5617         while (count) {
5618
5619                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5620                 val = *bnx2x_sp(bp, wb_data[0]);
5621                 if (val == 0x10)
5622                         break;
5623
5624                 msleep(10);
5625                 count--;
5626         }
5627         if (val != 0x10) {
5628                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5629                 return -1;
5630         }
5631
5632         /* Wait until PRS register shows 1 packet */
5633         count = 1000 * factor;
5634         while (count) {
5635                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5636                 if (val == 1)
5637                         break;
5638
5639                 msleep(10);
5640                 count--;
5641         }
5642         if (val != 0x1) {
5643                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5644                 return -2;
5645         }
5646
5647         /* Reset and init BRB, PRS */
5648         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5649         msleep(50);
5650         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5651         msleep(50);
5652         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5653         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5654
5655         DP(NETIF_MSG_HW, "part2\n");
5656
5657         /* Disable inputs of parser neighbor blocks */
5658         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5659         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5660         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5661         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5662
5663         /* Write 0 to parser credits for CFC search request */
5664         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5665
5666         /* send 10 Ethernet packets */
5667         for (i = 0; i < 10; i++)
5668                 bnx2x_lb_pckt(bp);
5669
5670         /* Wait until NIG register shows 10 + 1
5671            packets of size 11*0x10 = 0xb0 */
5672         count = 1000 * factor;
5673         while (count) {
5674
5675                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5676                 val = *bnx2x_sp(bp, wb_data[0]);
5677                 if (val == 0xb0)
5678                         break;
5679
5680                 msleep(10);
5681                 count--;
5682         }
5683         if (val != 0xb0) {
5684                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5685                 return -3;
5686         }
5687
5688         /* Wait until PRS register shows 2 packets */
5689         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5690         if (val != 2)
5691                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5692
5693         /* Write 1 to parser credits for CFC search request */
5694         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5695
5696         /* Wait until PRS register shows 3 packets */
5697         msleep(10 * factor);
5698         /* Wait until NIG register shows 1 packet of size 0x10 */
5699         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5700         if (val != 3)
5701                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5702
5703         /* clear NIG EOP FIFO */
5704         for (i = 0; i < 11; i++)
5705                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5706         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5707         if (val != 1) {
5708                 BNX2X_ERR("clear of NIG failed\n");
5709                 return -4;
5710         }
5711
5712         /* Reset and init BRB, PRS, NIG */
5713         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5714         msleep(50);
5715         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5716         msleep(50);
5717         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5718         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5719 #ifndef BCM_ISCSI
5720         /* set NIC mode */
5721         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5722 #endif
5723
5724         /* Enable inputs of parser neighbor blocks */
5725         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5726         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5727         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5728         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5729
5730         DP(NETIF_MSG_HW, "done\n");
5731
5732         return 0; /* OK */
5733 }
5734
5735 static void enable_blocks_attention(struct bnx2x *bp)
5736 {
5737         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5738         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5739         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5740         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5741         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5742         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5743         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5744         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5745         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5746 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5747 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5748