bnx2x: Removing redundant macros
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
78
79 static int disable_tpa;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83
84 module_param(disable_tpa, int, 0);
85
86 static int int_mode;
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
90 module_param(poll, int, 0);
91
92 static int mrrs = -1;
93 module_param(mrrs, int, 0);
94 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
95
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
98 MODULE_PARM_DESC(poll, "use polling (for debug)");
99 MODULE_PARM_DESC(debug, "default debug msglevel");
100
101 static struct workqueue_struct *bnx2x_wq;
102
103 enum bnx2x_board_type {
104         BCM57710 = 0,
105         BCM57711 = 1,
106         BCM57711E = 2,
107 };
108
109 /* indexed by board_type, above */
110 static struct {
111         char *name;
112 } board_info[] __devinitdata = {
113         { "Broadcom NetXtreme II BCM57710 XGb" },
114         { "Broadcom NetXtreme II BCM57711 XGb" },
115         { "Broadcom NetXtreme II BCM57711E XGb" }
116 };
117
118
119 static const struct pci_device_id bnx2x_pci_tbl[] = {
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
123                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
125                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126         { 0 }
127 };
128
129 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
130
131 /****************************************************************************
132 * General service functions
133 ****************************************************************************/
134
135 /* used only at init
136  * locking is done by mcp
137  */
138 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
139 {
140         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
141         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
142         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
143                                PCICFG_VENDOR_ID_OFFSET);
144 }
145
146 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 {
148         u32 val;
149
150         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
151         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
152         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
153                                PCICFG_VENDOR_ID_OFFSET);
154
155         return val;
156 }
157
158 static const u32 dmae_reg_go_c[] = {
159         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
160         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
161         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
162         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
163 };
164
165 /* copy command into DMAE command memory and set DMAE command go */
166 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167                             int idx)
168 {
169         u32 cmd_offset;
170         int i;
171
172         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
173         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
174                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
175
176                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
177                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
178         }
179         REG_WR(bp, dmae_reg_go_c[idx], 1);
180 }
181
182 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
183                       u32 len32)
184 {
185         struct dmae_command *dmae = &bp->init_dmae;
186         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
187         int cnt = 200;
188
189         if (!bp->dmae_ready) {
190                 u32 *data = bnx2x_sp(bp, wb_data[0]);
191
192                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
193                    "  using indirect\n", dst_addr, len32);
194                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195                 return;
196         }
197
198         mutex_lock(&bp->dmae_mutex);
199
200         memset(dmae, 0, sizeof(struct dmae_command));
201
202         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
203                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
204                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
205 #ifdef __BIG_ENDIAN
206                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
207 #else
208                         DMAE_CMD_ENDIANITY_DW_SWAP |
209 #endif
210                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
211                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
212         dmae->src_addr_lo = U64_LO(dma_addr);
213         dmae->src_addr_hi = U64_HI(dma_addr);
214         dmae->dst_addr_lo = dst_addr >> 2;
215         dmae->dst_addr_hi = 0;
216         dmae->len = len32;
217         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
218         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
219         dmae->comp_val = DMAE_COMP_VAL;
220
221         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
222            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
223                     "dst_addr [%x:%08x (%08x)]\n"
224            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
225            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
226            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
227            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
228         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
229            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
230            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231
232         *wb_comp = 0;
233
234         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235
236         udelay(5);
237
238         while (*wb_comp != DMAE_COMP_VAL) {
239                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
240
241                 if (!cnt) {
242                         BNX2X_ERR("dmae timeout!\n");
243                         break;
244                 }
245                 cnt--;
246                 /* adjust delay for emulation/FPGA */
247                 if (CHIP_REV_IS_SLOW(bp))
248                         msleep(100);
249                 else
250                         udelay(5);
251         }
252
253         mutex_unlock(&bp->dmae_mutex);
254 }
255
256 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
257 {
258         struct dmae_command *dmae = &bp->init_dmae;
259         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260         int cnt = 200;
261
262         if (!bp->dmae_ready) {
263                 u32 *data = bnx2x_sp(bp, wb_data[0]);
264                 int i;
265
266                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
267                    "  using indirect\n", src_addr, len32);
268                 for (i = 0; i < len32; i++)
269                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270                 return;
271         }
272
273         mutex_lock(&bp->dmae_mutex);
274
275         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276         memset(dmae, 0, sizeof(struct dmae_command));
277
278         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281 #ifdef __BIG_ENDIAN
282                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
283 #else
284                         DMAE_CMD_ENDIANITY_DW_SWAP |
285 #endif
286                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
288         dmae->src_addr_lo = src_addr >> 2;
289         dmae->src_addr_hi = 0;
290         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292         dmae->len = len32;
293         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
295         dmae->comp_val = DMAE_COMP_VAL;
296
297         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
298            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
299                     "dst_addr [%x:%08x (%08x)]\n"
300            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
301            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304
305         *wb_comp = 0;
306
307         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308
309         udelay(5);
310
311         while (*wb_comp != DMAE_COMP_VAL) {
312
313                 if (!cnt) {
314                         BNX2X_ERR("dmae timeout!\n");
315                         break;
316                 }
317                 cnt--;
318                 /* adjust delay for emulation/FPGA */
319                 if (CHIP_REV_IS_SLOW(bp))
320                         msleep(100);
321                 else
322                         udelay(5);
323         }
324         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
325            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
326            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327
328         mutex_unlock(&bp->dmae_mutex);
329 }
330
331 /* used only for slowpath so not inlined */
332 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 {
334         u32 wb_write[2];
335
336         wb_write[0] = val_hi;
337         wb_write[1] = val_lo;
338         REG_WR_DMAE(bp, reg, wb_write, 2);
339 }
340
341 #ifdef USE_WB_RD
342 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 {
344         u32 wb_data[2];
345
346         REG_RD_DMAE(bp, reg, wb_data, 2);
347
348         return HILO_U64(wb_data[0], wb_data[1]);
349 }
350 #endif
351
352 static int bnx2x_mc_assert(struct bnx2x *bp)
353 {
354         char last_idx;
355         int i, rc = 0;
356         u32 row0, row1, row2, row3;
357
358         /* XSTORM */
359         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
360                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
361         if (last_idx)
362                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363
364         /* print the asserts */
365         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366
367                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i));
369                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
371                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
373                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375
376                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
377                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
378                                   " 0x%08x 0x%08x 0x%08x\n",
379                                   i, row3, row2, row1, row0);
380                         rc++;
381                 } else {
382                         break;
383                 }
384         }
385
386         /* TSTORM */
387         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
388                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
389         if (last_idx)
390                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391
392         /* print the asserts */
393         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394
395                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i));
397                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
399                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
401                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403
404                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
405                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
406                                   " 0x%08x 0x%08x 0x%08x\n",
407                                   i, row3, row2, row1, row0);
408                         rc++;
409                 } else {
410                         break;
411                 }
412         }
413
414         /* CSTORM */
415         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
416                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
417         if (last_idx)
418                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419
420         /* print the asserts */
421         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422
423                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i));
425                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
427                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
429                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431
432                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
433                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
434                                   " 0x%08x 0x%08x 0x%08x\n",
435                                   i, row3, row2, row1, row0);
436                         rc++;
437                 } else {
438                         break;
439                 }
440         }
441
442         /* USTORM */
443         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
444                            USTORM_ASSERT_LIST_INDEX_OFFSET);
445         if (last_idx)
446                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447
448         /* print the asserts */
449         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450
451                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i));
453                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
455                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
456                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
457                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
459
460                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
461                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
462                                   " 0x%08x 0x%08x 0x%08x\n",
463                                   i, row3, row2, row1, row0);
464                         rc++;
465                 } else {
466                         break;
467                 }
468         }
469
470         return rc;
471 }
472
473 static void bnx2x_fw_dump(struct bnx2x *bp)
474 {
475         u32 mark, offset;
476         u32 data[9];
477         int word;
478
479         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
480         mark = ((mark + 0x3) & ~0x3);
481         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482
483         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
484                 for (word = 0; word < 8; word++)
485                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486                                                   offset + 4*word));
487                 data[8] = 0x0;
488                 printk(KERN_CONT "%s", (char *)data);
489         }
490         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
491                 for (word = 0; word < 8; word++)
492                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493                                                   offset + 4*word));
494                 data[8] = 0x0;
495                 printk(KERN_CONT "%s", (char *)data);
496         }
497         printk("\n" KERN_ERR PFX "end of fw dump\n");
498 }
499
500 static void bnx2x_panic_dump(struct bnx2x *bp)
501 {
502         int i;
503         u16 j, start, end;
504
505         bp->stats_state = STATS_STATE_DISABLED;
506         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507
508         BNX2X_ERR("begin crash dump -----------------\n");
509
510         for_each_queue(bp, i) {
511                 struct bnx2x_fastpath *fp = &bp->fp[i];
512                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513
514                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
515                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
516                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
517                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
518                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
519                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
520                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
521                           fp->rx_bd_prod, fp->rx_bd_cons,
522                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
523                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
524                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
525                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
526                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
527                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
528                           fp->status_blk->c_status_block.status_block_index,
529                           fp->fp_u_idx,
530                           fp->status_blk->u_status_block.status_block_index,
531                           hw_prods->packets_prod, hw_prods->bds_prod);
532
533                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
534                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
535                 for (j = start; j < end; j++) {
536                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537
538                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
539                                   sw_bd->skb, sw_bd->first_bd);
540                 }
541
542                 start = TX_BD(fp->tx_bd_cons - 10);
543                 end = TX_BD(fp->tx_bd_cons + 254);
544                 for (j = start; j < end; j++) {
545                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546
547                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
548                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
549                 }
550
551                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
552                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
553                 for (j = start; j < end; j++) {
554                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
555                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556
557                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
558                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
559                 }
560
561                 start = RX_SGE(fp->rx_sge_prod);
562                 end = RX_SGE(fp->last_max_sge);
563                 for (j = start; j < end; j++) {
564                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
565                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566
567                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
568                                   j, rx_sge[1], rx_sge[0], sw_page->page);
569                 }
570
571                 start = RCQ_BD(fp->rx_comp_cons - 10);
572                 end = RCQ_BD(fp->rx_comp_cons + 503);
573                 for (j = start; j < end; j++) {
574                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575
576                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
577                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
578                 }
579         }
580
581         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
582                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
583                   "  spq_prod_idx(%u)\n",
584                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
585                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586
587         bnx2x_fw_dump(bp);
588         bnx2x_mc_assert(bp);
589         BNX2X_ERR("end crash dump -----------------\n");
590 }
591
592 static void bnx2x_int_enable(struct bnx2x *bp)
593 {
594         int port = BP_PORT(bp);
595         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
596         u32 val = REG_RD(bp, addr);
597         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
598         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
599
600         if (msix) {
601                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                          HC_CONFIG_0_REG_INT_LINE_EN_0);
603                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605         } else if (msi) {
606                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
607                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
608                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
609                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
610         } else {
611                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
612                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
613                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
614                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615
616                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
617                    val, port, addr);
618
619                 REG_WR(bp, addr, val);
620
621                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
622         }
623
624         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
625            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
626
627         REG_WR(bp, addr, val);
628
629         if (CHIP_IS_E1H(bp)) {
630                 /* init leading/trailing edge */
631                 if (IS_E1HMF(bp)) {
632                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
633                         if (bp->port.pmf)
634                                 /* enable nig and gpio3 attention */
635                                 val |= 0x1100;
636                 } else
637                         val = 0xffff;
638
639                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
640                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
641         }
642 }
643
644 static void bnx2x_int_disable(struct bnx2x *bp)
645 {
646         int port = BP_PORT(bp);
647         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
648         u32 val = REG_RD(bp, addr);
649
650         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
653                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654
655         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
656            val, port, addr);
657
658         /* flush all outstanding writes */
659         mmiowb();
660
661         REG_WR(bp, addr, val);
662         if (REG_RD(bp, addr) != val)
663                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
664 }
665
666 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
667 {
668         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
669         int i, offset;
670
671         /* disable interrupt handling */
672         atomic_inc(&bp->intr_sem);
673         if (disable_hw)
674                 /* prevent the HW from sending interrupts */
675                 bnx2x_int_disable(bp);
676
677         /* make sure all ISRs are done */
678         if (msix) {
679                 synchronize_irq(bp->msix_table[0].vector);
680                 offset = 1;
681                 for_each_queue(bp, i)
682                         synchronize_irq(bp->msix_table[i + offset].vector);
683         } else
684                 synchronize_irq(bp->pdev->irq);
685
686         /* make sure sp_task is not running */
687         cancel_delayed_work(&bp->sp_task);
688         flush_workqueue(bnx2x_wq);
689 }
690
691 /* fast path */
692
693 /*
694  * General service functions
695  */
696
697 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
698                                 u8 storm, u16 index, u8 op, u8 update)
699 {
700         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701                        COMMAND_REG_INT_ACK);
702         struct igu_ack_register igu_ack;
703
704         igu_ack.status_block_index = index;
705         igu_ack.sb_id_and_flags =
706                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
707                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
708                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
709                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
710
711         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
712            (*(u32 *)&igu_ack), hc_addr);
713         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
714 }
715
716 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
717 {
718         struct host_status_block *fpsb = fp->status_blk;
719         u16 rc = 0;
720
721         barrier(); /* status block is written to by the chip */
722         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
723                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
724                 rc |= 1;
725         }
726         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
727                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728                 rc |= 2;
729         }
730         return rc;
731 }
732
733 static u16 bnx2x_ack_int(struct bnx2x *bp)
734 {
735         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
736                        COMMAND_REG_SIMD_MASK);
737         u32 result = REG_RD(bp, hc_addr);
738
739         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740            result, hc_addr);
741
742         return result;
743 }
744
745
746 /*
747  * fast path service functions
748  */
749
750 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
751 {
752         u16 tx_cons_sb;
753
754         /* Tell compiler that status block fields can change */
755         barrier();
756         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
757         return (fp->tx_pkt_cons != tx_cons_sb);
758 }
759
760 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
761 {
762         /* Tell compiler that consumer and producer can change */
763         barrier();
764         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
765
766 }
767
768 /* free skb in the packet ring at pos idx
769  * return idx of last bd freed
770  */
771 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
772                              u16 idx)
773 {
774         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
775         struct eth_tx_bd *tx_bd;
776         struct sk_buff *skb = tx_buf->skb;
777         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
778         int nbd;
779
780         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
781            idx, tx_buf, skb);
782
783         /* unmap first bd */
784         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
785         tx_bd = &fp->tx_desc_ring[bd_idx];
786         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
787                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
788
789         nbd = le16_to_cpu(tx_bd->nbd) - 1;
790         new_cons = nbd + tx_buf->first_bd;
791 #ifdef BNX2X_STOP_ON_ERROR
792         if (nbd > (MAX_SKB_FRAGS + 2)) {
793                 BNX2X_ERR("BAD nbd!\n");
794                 bnx2x_panic();
795         }
796 #endif
797
798         /* Skip a parse bd and the TSO split header bd
799            since they have no mapping */
800         if (nbd)
801                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
802
803         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
804                                            ETH_TX_BD_FLAGS_TCP_CSUM |
805                                            ETH_TX_BD_FLAGS_SW_LSO)) {
806                 if (--nbd)
807                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808                 tx_bd = &fp->tx_desc_ring[bd_idx];
809                 /* is this a TSO split header bd? */
810                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
811                         if (--nbd)
812                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
813                 }
814         }
815
816         /* now free frags */
817         while (nbd > 0) {
818
819                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
820                 tx_bd = &fp->tx_desc_ring[bd_idx];
821                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
822                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
823                 if (--nbd)
824                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825         }
826
827         /* release skb */
828         WARN_ON(!skb);
829         dev_kfree_skb(skb);
830         tx_buf->first_bd = 0;
831         tx_buf->skb = NULL;
832
833         return new_cons;
834 }
835
836 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 {
838         s16 used;
839         u16 prod;
840         u16 cons;
841
842         barrier(); /* Tell compiler that prod and cons can change */
843         prod = fp->tx_bd_prod;
844         cons = fp->tx_bd_cons;
845
846         /* NUM_TX_RINGS = number of "next-page" entries
847            It will be used as a threshold */
848         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
849
850 #ifdef BNX2X_STOP_ON_ERROR
851         WARN_ON(used < 0);
852         WARN_ON(used > fp->bp->tx_ring_size);
853         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
854 #endif
855
856         return (s16)(fp->bp->tx_ring_size) - used;
857 }
858
859 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
860 {
861         struct bnx2x *bp = fp->bp;
862         struct netdev_queue *txq;
863         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
864         int done = 0;
865
866 #ifdef BNX2X_STOP_ON_ERROR
867         if (unlikely(bp->panic))
868                 return;
869 #endif
870
871         txq = netdev_get_tx_queue(bp->dev, fp->index);
872         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
873         sw_cons = fp->tx_pkt_cons;
874
875         while (sw_cons != hw_cons) {
876                 u16 pkt_cons;
877
878                 pkt_cons = TX_BD(sw_cons);
879
880                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
881
882                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
883                    hw_cons, sw_cons, pkt_cons);
884
885 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
886                         rmb();
887                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
888                 }
889 */
890                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
891                 sw_cons++;
892                 done++;
893
894                 if (done == work)
895                         break;
896         }
897
898         fp->tx_pkt_cons = sw_cons;
899         fp->tx_bd_cons = bd_cons;
900
901         /* Need to make the tx_bd_cons update visible to start_xmit()
902          * before checking for netif_tx_queue_stopped().  Without the
903          * memory barrier, there is a small possibility that start_xmit()
904          * will miss it and cause the queue to be stopped forever.
905          */
906         smp_mb();
907
908         /* TBD need a thresh? */
909         if (unlikely(netif_tx_queue_stopped(txq))) {
910
911                 __netif_tx_lock(txq, smp_processor_id());
912
913                 if ((netif_tx_queue_stopped(txq)) &&
914                     (bp->state == BNX2X_STATE_OPEN) &&
915                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
916                         netif_tx_wake_queue(txq);
917
918                 __netif_tx_unlock(txq);
919         }
920 }
921
922
923 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
924                            union eth_rx_cqe *rr_cqe)
925 {
926         struct bnx2x *bp = fp->bp;
927         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
928         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
929
930         DP(BNX2X_MSG_SP,
931            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
932            fp->index, cid, command, bp->state,
933            rr_cqe->ramrod_cqe.ramrod_type);
934
935         bp->spq_left++;
936
937         if (fp->index) {
938                 switch (command | fp->state) {
939                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
940                                                 BNX2X_FP_STATE_OPENING):
941                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
942                            cid);
943                         fp->state = BNX2X_FP_STATE_OPEN;
944                         break;
945
946                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
947                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
948                            cid);
949                         fp->state = BNX2X_FP_STATE_HALTED;
950                         break;
951
952                 default:
953                         BNX2X_ERR("unexpected MC reply (%d)  "
954                                   "fp->state is %x\n", command, fp->state);
955                         break;
956                 }
957                 mb(); /* force bnx2x_wait_ramrod() to see the change */
958                 return;
959         }
960
961         switch (command | bp->state) {
962         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
963                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
964                 bp->state = BNX2X_STATE_OPEN;
965                 break;
966
967         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
968                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
969                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
970                 fp->state = BNX2X_FP_STATE_HALTED;
971                 break;
972
973         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
974                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
975                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
976                 break;
977
978
979         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
980         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
981                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
982                 bp->set_mac_pending = 0;
983                 break;
984
985         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
986                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
987                 break;
988
989         default:
990                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
991                           command, bp->state);
992                 break;
993         }
994         mb(); /* force bnx2x_wait_ramrod() to see the change */
995 }
996
997 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
998                                      struct bnx2x_fastpath *fp, u16 index)
999 {
1000         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1001         struct page *page = sw_buf->page;
1002         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003
1004         /* Skip "next page" elements */
1005         if (!page)
1006                 return;
1007
1008         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1009                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1010         __free_pages(page, PAGES_PER_SGE_SHIFT);
1011
1012         sw_buf->page = NULL;
1013         sge->addr_hi = 0;
1014         sge->addr_lo = 0;
1015 }
1016
1017 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1018                                            struct bnx2x_fastpath *fp, int last)
1019 {
1020         int i;
1021
1022         for (i = 0; i < last; i++)
1023                 bnx2x_free_rx_sge(bp, fp, i);
1024 }
1025
1026 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1027                                      struct bnx2x_fastpath *fp, u16 index)
1028 {
1029         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1030         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1031         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1032         dma_addr_t mapping;
1033
1034         if (unlikely(page == NULL))
1035                 return -ENOMEM;
1036
1037         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1038                                PCI_DMA_FROMDEVICE);
1039         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1040                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1041                 return -ENOMEM;
1042         }
1043
1044         sw_buf->page = page;
1045         pci_unmap_addr_set(sw_buf, mapping, mapping);
1046
1047         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1048         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1049
1050         return 0;
1051 }
1052
1053 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1054                                      struct bnx2x_fastpath *fp, u16 index)
1055 {
1056         struct sk_buff *skb;
1057         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1058         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1059         dma_addr_t mapping;
1060
1061         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1062         if (unlikely(skb == NULL))
1063                 return -ENOMEM;
1064
1065         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1066                                  PCI_DMA_FROMDEVICE);
1067         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068                 dev_kfree_skb(skb);
1069                 return -ENOMEM;
1070         }
1071
1072         rx_buf->skb = skb;
1073         pci_unmap_addr_set(rx_buf, mapping, mapping);
1074
1075         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1076         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1077
1078         return 0;
1079 }
1080
1081 /* note that we are not allocating a new skb,
1082  * we are just moving one from cons to prod
1083  * we are not creating a new mapping,
1084  * so there is no need to check for dma_mapping_error().
1085  */
1086 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1087                                struct sk_buff *skb, u16 cons, u16 prod)
1088 {
1089         struct bnx2x *bp = fp->bp;
1090         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1091         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1092         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1093         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1094
1095         pci_dma_sync_single_for_device(bp->pdev,
1096                                        pci_unmap_addr(cons_rx_buf, mapping),
1097                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1098
1099         prod_rx_buf->skb = cons_rx_buf->skb;
1100         pci_unmap_addr_set(prod_rx_buf, mapping,
1101                            pci_unmap_addr(cons_rx_buf, mapping));
1102         *prod_bd = *cons_bd;
1103 }
1104
1105 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1106                                              u16 idx)
1107 {
1108         u16 last_max = fp->last_max_sge;
1109
1110         if (SUB_S16(idx, last_max) > 0)
1111                 fp->last_max_sge = idx;
1112 }
1113
1114 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1115 {
1116         int i, j;
1117
1118         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119                 int idx = RX_SGE_CNT * i - 1;
1120
1121                 for (j = 0; j < 2; j++) {
1122                         SGE_MASK_CLEAR_BIT(fp, idx);
1123                         idx--;
1124                 }
1125         }
1126 }
1127
1128 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1129                                   struct eth_fast_path_rx_cqe *fp_cqe)
1130 {
1131         struct bnx2x *bp = fp->bp;
1132         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1133                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1134                       SGE_PAGE_SHIFT;
1135         u16 last_max, last_elem, first_elem;
1136         u16 delta = 0;
1137         u16 i;
1138
1139         if (!sge_len)
1140                 return;
1141
1142         /* First mark all used pages */
1143         for (i = 0; i < sge_len; i++)
1144                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1145
1146         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1147            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149         /* Here we assume that the last SGE index is the biggest */
1150         prefetch((void *)(fp->sge_mask));
1151         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1152
1153         last_max = RX_SGE(fp->last_max_sge);
1154         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1155         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1156
1157         /* If ring is not full */
1158         if (last_elem + 1 != first_elem)
1159                 last_elem++;
1160
1161         /* Now update the prod */
1162         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1163                 if (likely(fp->sge_mask[i]))
1164                         break;
1165
1166                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1167                 delta += RX_SGE_MASK_ELEM_SZ;
1168         }
1169
1170         if (delta > 0) {
1171                 fp->rx_sge_prod += delta;
1172                 /* clear page-end entries */
1173                 bnx2x_clear_sge_mask_next_elems(fp);
1174         }
1175
1176         DP(NETIF_MSG_RX_STATUS,
1177            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1178            fp->last_max_sge, fp->rx_sge_prod);
1179 }
1180
1181 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1182 {
1183         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1184         memset(fp->sge_mask, 0xff,
1185                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1186
1187         /* Clear the two last indices in the page to 1:
1188            these are the indices that correspond to the "next" element,
1189            hence will never be indicated and should be removed from
1190            the calculations. */
1191         bnx2x_clear_sge_mask_next_elems(fp);
1192 }
1193
1194 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1195                             struct sk_buff *skb, u16 cons, u16 prod)
1196 {
1197         struct bnx2x *bp = fp->bp;
1198         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1199         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1200         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1201         dma_addr_t mapping;
1202
1203         /* move empty skb from pool to prod and map it */
1204         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1205         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1206                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1207         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1208
1209         /* move partial skb from cons to pool (don't unmap yet) */
1210         fp->tpa_pool[queue] = *cons_rx_buf;
1211
1212         /* mark bin state as start - print error if current state != stop */
1213         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1214                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1215
1216         fp->tpa_state[queue] = BNX2X_TPA_START;
1217
1218         /* point prod_bd to new skb */
1219         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1220         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1221
1222 #ifdef BNX2X_STOP_ON_ERROR
1223         fp->tpa_queue_used |= (1 << queue);
1224 #ifdef __powerpc64__
1225         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1226 #else
1227         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1228 #endif
1229            fp->tpa_queue_used);
1230 #endif
1231 }
1232
1233 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1234                                struct sk_buff *skb,
1235                                struct eth_fast_path_rx_cqe *fp_cqe,
1236                                u16 cqe_idx)
1237 {
1238         struct sw_rx_page *rx_pg, old_rx_pg;
1239         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1240         u32 i, frag_len, frag_size, pages;
1241         int err;
1242         int j;
1243
1244         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1245         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1246
1247         /* This is needed in order to enable forwarding support */
1248         if (frag_size)
1249                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1250                                                max(frag_size, (u32)len_on_bd));
1251
1252 #ifdef BNX2X_STOP_ON_ERROR
1253         if (pages >
1254             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1255                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1256                           pages, cqe_idx);
1257                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1258                           fp_cqe->pkt_len, len_on_bd);
1259                 bnx2x_panic();
1260                 return -EINVAL;
1261         }
1262 #endif
1263
1264         /* Run through the SGL and compose the fragmented skb */
1265         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1266                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1267
1268                 /* FW gives the indices of the SGE as if the ring is an array
1269                    (meaning that "next" element will consume 2 indices) */
1270                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1271                 rx_pg = &fp->rx_page_ring[sge_idx];
1272                 old_rx_pg = *rx_pg;
1273
1274                 /* If we fail to allocate a substitute page, we simply stop
1275                    where we are and drop the whole packet */
1276                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1277                 if (unlikely(err)) {
1278                         fp->eth_q_stats.rx_skb_alloc_failed++;
1279                         return err;
1280                 }
1281
1282                 /* Unmap the page as we r going to pass it to the stack */
1283                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1284                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1285
1286                 /* Add one frag and update the appropriate fields in the skb */
1287                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1288
1289                 skb->data_len += frag_len;
1290                 skb->truesize += frag_len;
1291                 skb->len += frag_len;
1292
1293                 frag_size -= frag_len;
1294         }
1295
1296         return 0;
1297 }
1298
1299 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1300                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1301                            u16 cqe_idx)
1302 {
1303         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1304         struct sk_buff *skb = rx_buf->skb;
1305         /* alloc new skb */
1306         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1307
1308         /* Unmap skb in the pool anyway, as we are going to change
1309            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1310            fails. */
1311         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1312                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1313
1314         if (likely(new_skb)) {
1315                 /* fix ip xsum and give it to the stack */
1316                 /* (no need to map the new skb) */
1317 #ifdef BCM_VLAN
1318                 int is_vlan_cqe =
1319                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320                          PARSING_FLAGS_VLAN);
1321                 int is_not_hwaccel_vlan_cqe =
1322                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1323 #endif
1324
1325                 prefetch(skb);
1326                 prefetch(((char *)(skb)) + 128);
1327
1328 #ifdef BNX2X_STOP_ON_ERROR
1329                 if (pad + len > bp->rx_buf_size) {
1330                         BNX2X_ERR("skb_put is about to fail...  "
1331                                   "pad %d  len %d  rx_buf_size %d\n",
1332                                   pad, len, bp->rx_buf_size);
1333                         bnx2x_panic();
1334                         return;
1335                 }
1336 #endif
1337
1338                 skb_reserve(skb, pad);
1339                 skb_put(skb, len);
1340
1341                 skb->protocol = eth_type_trans(skb, bp->dev);
1342                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1343
1344                 {
1345                         struct iphdr *iph;
1346
1347                         iph = (struct iphdr *)skb->data;
1348 #ifdef BCM_VLAN
1349                         /* If there is no Rx VLAN offloading -
1350                            take VLAN tag into an account */
1351                         if (unlikely(is_not_hwaccel_vlan_cqe))
1352                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1353 #endif
1354                         iph->check = 0;
1355                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1356                 }
1357
1358                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1359                                          &cqe->fast_path_cqe, cqe_idx)) {
1360 #ifdef BCM_VLAN
1361                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1362                             (!is_not_hwaccel_vlan_cqe))
1363                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1364                                                 le16_to_cpu(cqe->fast_path_cqe.
1365                                                             vlan_tag));
1366                         else
1367 #endif
1368                                 netif_receive_skb(skb);
1369                 } else {
1370                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1371                            " - dropping packet!\n");
1372                         dev_kfree_skb(skb);
1373                 }
1374
1375
1376                 /* put new skb in bin */
1377                 fp->tpa_pool[queue].skb = new_skb;
1378
1379         } else {
1380                 /* else drop the packet and keep the buffer in the bin */
1381                 DP(NETIF_MSG_RX_STATUS,
1382                    "Failed to allocate new skb - dropping packet!\n");
1383                 fp->eth_q_stats.rx_skb_alloc_failed++;
1384         }
1385
1386         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1387 }
1388
1389 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1390                                         struct bnx2x_fastpath *fp,
1391                                         u16 bd_prod, u16 rx_comp_prod,
1392                                         u16 rx_sge_prod)
1393 {
1394         struct ustorm_eth_rx_producers rx_prods = {0};
1395         int i;
1396
1397         /* Update producers */
1398         rx_prods.bd_prod = bd_prod;
1399         rx_prods.cqe_prod = rx_comp_prod;
1400         rx_prods.sge_prod = rx_sge_prod;
1401
1402         /*
1403          * Make sure that the BD and SGE data is updated before updating the
1404          * producers since FW might read the BD/SGE right after the producer
1405          * is updated.
1406          * This is only applicable for weak-ordered memory model archs such
1407          * as IA-64. The following barrier is also mandatory since FW will
1408          * assumes BDs must have buffers.
1409          */
1410         wmb();
1411
1412         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1413                 REG_WR(bp, BAR_USTRORM_INTMEM +
1414                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1415                        ((u32 *)&rx_prods)[i]);
1416
1417         mmiowb(); /* keep prod updates ordered */
1418
1419         DP(NETIF_MSG_RX_STATUS,
1420            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1421            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1422 }
1423
1424 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1425 {
1426         struct bnx2x *bp = fp->bp;
1427         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1428         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1429         int rx_pkt = 0;
1430
1431 #ifdef BNX2X_STOP_ON_ERROR
1432         if (unlikely(bp->panic))
1433                 return 0;
1434 #endif
1435
1436         /* CQ "next element" is of the size of the regular element,
1437            that's why it's ok here */
1438         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1439         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1440                 hw_comp_cons++;
1441
1442         bd_cons = fp->rx_bd_cons;
1443         bd_prod = fp->rx_bd_prod;
1444         bd_prod_fw = bd_prod;
1445         sw_comp_cons = fp->rx_comp_cons;
1446         sw_comp_prod = fp->rx_comp_prod;
1447
1448         /* Memory barrier necessary as speculative reads of the rx
1449          * buffer can be ahead of the index in the status block
1450          */
1451         rmb();
1452
1453         DP(NETIF_MSG_RX_STATUS,
1454            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1455            fp->index, hw_comp_cons, sw_comp_cons);
1456
1457         while (sw_comp_cons != hw_comp_cons) {
1458                 struct sw_rx_bd *rx_buf = NULL;
1459                 struct sk_buff *skb;
1460                 union eth_rx_cqe *cqe;
1461                 u8 cqe_fp_flags;
1462                 u16 len, pad;
1463
1464                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1465                 bd_prod = RX_BD(bd_prod);
1466                 bd_cons = RX_BD(bd_cons);
1467
1468                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1469                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1470
1471                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1472                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1473                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1474                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1475                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1476                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1477
1478                 /* is this a slowpath msg? */
1479                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1480                         bnx2x_sp_event(fp, cqe);
1481                         goto next_cqe;
1482
1483                 /* this is an rx packet */
1484                 } else {
1485                         rx_buf = &fp->rx_buf_ring[bd_cons];
1486                         skb = rx_buf->skb;
1487                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1488                         pad = cqe->fast_path_cqe.placement_offset;
1489
1490                         /* If CQE is marked both TPA_START and TPA_END
1491                            it is a non-TPA CQE */
1492                         if ((!fp->disable_tpa) &&
1493                             (TPA_TYPE(cqe_fp_flags) !=
1494                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1495                                 u16 queue = cqe->fast_path_cqe.queue_index;
1496
1497                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1498                                         DP(NETIF_MSG_RX_STATUS,
1499                                            "calling tpa_start on queue %d\n",
1500                                            queue);
1501
1502                                         bnx2x_tpa_start(fp, queue, skb,
1503                                                         bd_cons, bd_prod);
1504                                         goto next_rx;
1505                                 }
1506
1507                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1508                                         DP(NETIF_MSG_RX_STATUS,
1509                                            "calling tpa_stop on queue %d\n",
1510                                            queue);
1511
1512                                         if (!BNX2X_RX_SUM_FIX(cqe))
1513                                                 BNX2X_ERR("STOP on none TCP "
1514                                                           "data\n");
1515
1516                                         /* This is a size of the linear data
1517                                            on this skb */
1518                                         len = le16_to_cpu(cqe->fast_path_cqe.
1519                                                                 len_on_bd);
1520                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1521                                                     len, cqe, comp_ring_cons);
1522 #ifdef BNX2X_STOP_ON_ERROR
1523                                         if (bp->panic)
1524                                                 return -EINVAL;
1525 #endif
1526
1527                                         bnx2x_update_sge_prod(fp,
1528                                                         &cqe->fast_path_cqe);
1529                                         goto next_cqe;
1530                                 }
1531                         }
1532
1533                         pci_dma_sync_single_for_device(bp->pdev,
1534                                         pci_unmap_addr(rx_buf, mapping),
1535                                                        pad + RX_COPY_THRESH,
1536                                                        PCI_DMA_FROMDEVICE);
1537                         prefetch(skb);
1538                         prefetch(((char *)(skb)) + 128);
1539
1540                         /* is this an error packet? */
1541                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1542                                 DP(NETIF_MSG_RX_ERR,
1543                                    "ERROR  flags %x  rx packet %u\n",
1544                                    cqe_fp_flags, sw_comp_cons);
1545                                 fp->eth_q_stats.rx_err_discard_pkt++;
1546                                 goto reuse_rx;
1547                         }
1548
1549                         /* Since we don't have a jumbo ring
1550                          * copy small packets if mtu > 1500
1551                          */
1552                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1553                             (len <= RX_COPY_THRESH)) {
1554                                 struct sk_buff *new_skb;
1555
1556                                 new_skb = netdev_alloc_skb(bp->dev,
1557                                                            len + pad);
1558                                 if (new_skb == NULL) {
1559                                         DP(NETIF_MSG_RX_ERR,
1560                                            "ERROR  packet dropped "
1561                                            "because of alloc failure\n");
1562                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1563                                         goto reuse_rx;
1564                                 }
1565
1566                                 /* aligned copy */
1567                                 skb_copy_from_linear_data_offset(skb, pad,
1568                                                     new_skb->data + pad, len);
1569                                 skb_reserve(new_skb, pad);
1570                                 skb_put(new_skb, len);
1571
1572                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1573
1574                                 skb = new_skb;
1575
1576                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1577                                 pci_unmap_single(bp->pdev,
1578                                         pci_unmap_addr(rx_buf, mapping),
1579                                                  bp->rx_buf_size,
1580                                                  PCI_DMA_FROMDEVICE);
1581                                 skb_reserve(skb, pad);
1582                                 skb_put(skb, len);
1583
1584                         } else {
1585                                 DP(NETIF_MSG_RX_ERR,
1586                                    "ERROR  packet dropped because "
1587                                    "of alloc failure\n");
1588                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1589 reuse_rx:
1590                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591                                 goto next_rx;
1592                         }
1593
1594                         skb->protocol = eth_type_trans(skb, bp->dev);
1595
1596                         skb->ip_summed = CHECKSUM_NONE;
1597                         if (bp->rx_csum) {
1598                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1599                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1600                                 else
1601                                         fp->eth_q_stats.hw_csum_err++;
1602                         }
1603                 }
1604
1605                 skb_record_rx_queue(skb, fp->index);
1606 #ifdef BCM_VLAN
1607                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1608                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1609                      PARSING_FLAGS_VLAN))
1610                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1611                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1612                 else
1613 #endif
1614                         netif_receive_skb(skb);
1615
1616
1617 next_rx:
1618                 rx_buf->skb = NULL;
1619
1620                 bd_cons = NEXT_RX_IDX(bd_cons);
1621                 bd_prod = NEXT_RX_IDX(bd_prod);
1622                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1623                 rx_pkt++;
1624 next_cqe:
1625                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1626                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1627
1628                 if (rx_pkt == budget)
1629                         break;
1630         } /* while */
1631
1632         fp->rx_bd_cons = bd_cons;
1633         fp->rx_bd_prod = bd_prod_fw;
1634         fp->rx_comp_cons = sw_comp_cons;
1635         fp->rx_comp_prod = sw_comp_prod;
1636
1637         /* Update producers */
1638         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1639                              fp->rx_sge_prod);
1640
1641         fp->rx_pkt += rx_pkt;
1642         fp->rx_calls++;
1643
1644         return rx_pkt;
1645 }
1646
1647 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1648 {
1649         struct bnx2x_fastpath *fp = fp_cookie;
1650         struct bnx2x *bp = fp->bp;
1651         int index = fp->index;
1652
1653         /* Return here if interrupt is disabled */
1654         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1655                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656                 return IRQ_HANDLED;
1657         }
1658
1659         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1660            index, fp->sb_id);
1661         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1662
1663 #ifdef BNX2X_STOP_ON_ERROR
1664         if (unlikely(bp->panic))
1665                 return IRQ_HANDLED;
1666 #endif
1667
1668         prefetch(fp->rx_cons_sb);
1669         prefetch(fp->tx_cons_sb);
1670         prefetch(&fp->status_blk->c_status_block.status_block_index);
1671         prefetch(&fp->status_blk->u_status_block.status_block_index);
1672
1673         napi_schedule(&bnx2x_fp(bp, index, napi));
1674
1675         return IRQ_HANDLED;
1676 }
1677
1678 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1679 {
1680         struct bnx2x *bp = netdev_priv(dev_instance);
1681         u16 status = bnx2x_ack_int(bp);
1682         u16 mask;
1683
1684         /* Return here if interrupt is shared and it's not for us */
1685         if (unlikely(status == 0)) {
1686                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1687                 return IRQ_NONE;
1688         }
1689         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1690
1691         /* Return here if interrupt is disabled */
1692         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1693                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1694                 return IRQ_HANDLED;
1695         }
1696
1697 #ifdef BNX2X_STOP_ON_ERROR
1698         if (unlikely(bp->panic))
1699                 return IRQ_HANDLED;
1700 #endif
1701
1702         mask = 0x2 << bp->fp[0].sb_id;
1703         if (status & mask) {
1704                 struct bnx2x_fastpath *fp = &bp->fp[0];
1705
1706                 prefetch(fp->rx_cons_sb);
1707                 prefetch(fp->tx_cons_sb);
1708                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1710
1711                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1712
1713                 status &= ~mask;
1714         }
1715
1716
1717         if (unlikely(status & 0x1)) {
1718                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1719
1720                 status &= ~0x1;
1721                 if (!status)
1722                         return IRQ_HANDLED;
1723         }
1724
1725         if (status)
1726                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727                    status);
1728
1729         return IRQ_HANDLED;
1730 }
1731
1732 /* end of fast path */
1733
1734 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1735
1736 /* Link */
1737
1738 /*
1739  * General service functions
1740  */
1741
1742 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1743 {
1744         u32 lock_status;
1745         u32 resource_bit = (1 << resource);
1746         int func = BP_FUNC(bp);
1747         u32 hw_lock_control_reg;
1748         int cnt;
1749
1750         /* Validating that the resource is within range */
1751         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1752                 DP(NETIF_MSG_HW,
1753                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1754                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755                 return -EINVAL;
1756         }
1757
1758         if (func <= 5) {
1759                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1760         } else {
1761                 hw_lock_control_reg =
1762                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1763         }
1764
1765         /* Validating that the resource is not already taken */
1766         lock_status = REG_RD(bp, hw_lock_control_reg);
1767         if (lock_status & resource_bit) {
1768                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1769                    lock_status, resource_bit);
1770                 return -EEXIST;
1771         }
1772
1773         /* Try for 5 second every 5ms */
1774         for (cnt = 0; cnt < 1000; cnt++) {
1775                 /* Try to acquire the lock */
1776                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1777                 lock_status = REG_RD(bp, hw_lock_control_reg);
1778                 if (lock_status & resource_bit)
1779                         return 0;
1780
1781                 msleep(5);
1782         }
1783         DP(NETIF_MSG_HW, "Timeout\n");
1784         return -EAGAIN;
1785 }
1786
1787 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1788 {
1789         u32 lock_status;
1790         u32 resource_bit = (1 << resource);
1791         int func = BP_FUNC(bp);
1792         u32 hw_lock_control_reg;
1793
1794         /* Validating that the resource is within range */
1795         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1796                 DP(NETIF_MSG_HW,
1797                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1798                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799                 return -EINVAL;
1800         }
1801
1802         if (func <= 5) {
1803                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1804         } else {
1805                 hw_lock_control_reg =
1806                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1807         }
1808
1809         /* Validating that the resource is currently taken */
1810         lock_status = REG_RD(bp, hw_lock_control_reg);
1811         if (!(lock_status & resource_bit)) {
1812                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1813                    lock_status, resource_bit);
1814                 return -EFAULT;
1815         }
1816
1817         REG_WR(bp, hw_lock_control_reg, resource_bit);
1818         return 0;
1819 }
1820
1821 /* HW Lock for shared dual port PHYs */
1822 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1823 {
1824         mutex_lock(&bp->port.phy_mutex);
1825
1826         if (bp->port.need_hw_lock)
1827                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1828 }
1829
1830 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1831 {
1832         if (bp->port.need_hw_lock)
1833                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1834
1835         mutex_unlock(&bp->port.phy_mutex);
1836 }
1837
1838 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1839 {
1840         /* The GPIO should be swapped if swap register is set and active */
1841         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1842                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1843         int gpio_shift = gpio_num +
1844                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1845         u32 gpio_mask = (1 << gpio_shift);
1846         u32 gpio_reg;
1847         int value;
1848
1849         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1850                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1851                 return -EINVAL;
1852         }
1853
1854         /* read GPIO value */
1855         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1856
1857         /* get the requested pin value */
1858         if ((gpio_reg & gpio_mask) == gpio_mask)
1859                 value = 1;
1860         else
1861                 value = 0;
1862
1863         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1864
1865         return value;
1866 }
1867
1868 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1869 {
1870         /* The GPIO should be swapped if swap register is set and active */
1871         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1872                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1873         int gpio_shift = gpio_num +
1874                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1875         u32 gpio_mask = (1 << gpio_shift);
1876         u32 gpio_reg;
1877
1878         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1879                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1880                 return -EINVAL;
1881         }
1882
1883         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1884         /* read GPIO and mask except the float bits */
1885         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1886
1887         switch (mode) {
1888         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1889                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1890                    gpio_num, gpio_shift);
1891                 /* clear FLOAT and set CLR */
1892                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1893                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1894                 break;
1895
1896         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1897                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1898                    gpio_num, gpio_shift);
1899                 /* clear FLOAT and set SET */
1900                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1901                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1902                 break;
1903
1904         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1905                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1906                    gpio_num, gpio_shift);
1907                 /* set FLOAT */
1908                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1909                 break;
1910
1911         default:
1912                 break;
1913         }
1914
1915         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1916         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1917
1918         return 0;
1919 }
1920
1921 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1922 {
1923         /* The GPIO should be swapped if swap register is set and active */
1924         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1925                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1926         int gpio_shift = gpio_num +
1927                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1928         u32 gpio_mask = (1 << gpio_shift);
1929         u32 gpio_reg;
1930
1931         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1932                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1933                 return -EINVAL;
1934         }
1935
1936         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1937         /* read GPIO int */
1938         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1939
1940         switch (mode) {
1941         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1942                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1943                                    "output low\n", gpio_num, gpio_shift);
1944                 /* clear SET and set CLR */
1945                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1946                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1947                 break;
1948
1949         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1950                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1951                                    "output high\n", gpio_num, gpio_shift);
1952                 /* clear CLR and set SET */
1953                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1954                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1955                 break;
1956
1957         default:
1958                 break;
1959         }
1960
1961         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1962         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963
1964         return 0;
1965 }
1966
1967 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1968 {
1969         u32 spio_mask = (1 << spio_num);
1970         u32 spio_reg;
1971
1972         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1973             (spio_num > MISC_REGISTERS_SPIO_7)) {
1974                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1975                 return -EINVAL;
1976         }
1977
1978         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1979         /* read SPIO and mask except the float bits */
1980         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1981
1982         switch (mode) {
1983         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1984                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1985                 /* clear FLOAT and set CLR */
1986                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1987                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1988                 break;
1989
1990         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1991                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1992                 /* clear FLOAT and set SET */
1993                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1994                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1995                 break;
1996
1997         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1998                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1999                 /* set FLOAT */
2000                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2001                 break;
2002
2003         default:
2004                 break;
2005         }
2006
2007         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2008         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2009
2010         return 0;
2011 }
2012
2013 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2014 {
2015         switch (bp->link_vars.ieee_fc &
2016                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2017         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2018                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2019                                           ADVERTISED_Pause);
2020                 break;
2021         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2022                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2023                                          ADVERTISED_Pause);
2024                 break;
2025         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2026                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2027                 break;
2028         default:
2029                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2030                                           ADVERTISED_Pause);
2031                 break;
2032         }
2033 }
2034
2035 static void bnx2x_link_report(struct bnx2x *bp)
2036 {
2037         if (bp->link_vars.link_up) {
2038                 if (bp->state == BNX2X_STATE_OPEN)
2039                         netif_carrier_on(bp->dev);
2040                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2041
2042                 printk("%d Mbps ", bp->link_vars.line_speed);
2043
2044                 if (bp->link_vars.duplex == DUPLEX_FULL)
2045                         printk("full duplex");
2046                 else
2047                         printk("half duplex");
2048
2049                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2050                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2051                                 printk(", receive ");
2052                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2053                                         printk("& transmit ");
2054                         } else {
2055                                 printk(", transmit ");
2056                         }
2057                         printk("flow control ON");
2058                 }
2059                 printk("\n");
2060
2061         } else { /* link_down */
2062                 netif_carrier_off(bp->dev);
2063                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2064         }
2065 }
2066
2067 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2068 {
2069         if (!BP_NOMCP(bp)) {
2070                 u8 rc;
2071
2072                 /* Initialize link parameters structure variables */
2073                 /* It is recommended to turn off RX FC for jumbo frames
2074                    for better performance */
2075                 if (IS_E1HMF(bp))
2076                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2077                 else if (bp->dev->mtu > 5000)
2078                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2079                 else
2080                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2081
2082                 bnx2x_acquire_phy_lock(bp);
2083
2084                 if (load_mode == LOAD_DIAG)
2085                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2086
2087                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2088
2089                 bnx2x_release_phy_lock(bp);
2090
2091                 bnx2x_calc_fc_adv(bp);
2092
2093                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2094                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2095                         bnx2x_link_report(bp);
2096                 }
2097
2098                 return rc;
2099         }
2100         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2101         return -EINVAL;
2102 }
2103
2104 static void bnx2x_link_set(struct bnx2x *bp)
2105 {
2106         if (!BP_NOMCP(bp)) {
2107                 bnx2x_acquire_phy_lock(bp);
2108                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2109                 bnx2x_release_phy_lock(bp);
2110
2111                 bnx2x_calc_fc_adv(bp);
2112         } else
2113                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2114 }
2115
2116 static void bnx2x__link_reset(struct bnx2x *bp)
2117 {
2118         if (!BP_NOMCP(bp)) {
2119                 bnx2x_acquire_phy_lock(bp);
2120                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2121                 bnx2x_release_phy_lock(bp);
2122         } else
2123                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2124 }
2125
2126 static u8 bnx2x_link_test(struct bnx2x *bp)
2127 {
2128         u8 rc;
2129
2130         bnx2x_acquire_phy_lock(bp);
2131         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2132         bnx2x_release_phy_lock(bp);
2133
2134         return rc;
2135 }
2136
2137 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2138 {
2139         u32 r_param = bp->link_vars.line_speed / 8;
2140         u32 fair_periodic_timeout_usec;
2141         u32 t_fair;
2142
2143         memset(&(bp->cmng.rs_vars), 0,
2144                sizeof(struct rate_shaping_vars_per_port));
2145         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2146
2147         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2148         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2149
2150         /* this is the threshold below which no timer arming will occur
2151            1.25 coefficient is for the threshold to be a little bigger
2152            than the real time, to compensate for timer in-accuracy */
2153         bp->cmng.rs_vars.rs_threshold =
2154                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2155
2156         /* resolution of fairness timer */
2157         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2158         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2159         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2160
2161         /* this is the threshold below which we won't arm the timer anymore */
2162         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2163
2164         /* we multiply by 1e3/8 to get bytes/msec.
2165            We don't want the credits to pass a credit
2166            of the t_fair*FAIR_MEM (algorithm resolution) */
2167         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2168         /* since each tick is 4 usec */
2169         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2170 }
2171
2172 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2173 {
2174         struct rate_shaping_vars_per_vn m_rs_vn;
2175         struct fairness_vars_per_vn m_fair_vn;
2176         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2177         u16 vn_min_rate, vn_max_rate;
2178         int i;
2179
2180         /* If function is hidden - set min and max to zeroes */
2181         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2182                 vn_min_rate = 0;
2183                 vn_max_rate = 0;
2184
2185         } else {
2186                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2187                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2188                 /* If fairness is enabled (not all min rates are zeroes) and
2189                    if current min rate is zero - set it to 1.
2190                    This is a requirement of the algorithm. */
2191                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2192                         vn_min_rate = DEF_MIN_RATE;
2193                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2194                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2195         }
2196
2197         DP(NETIF_MSG_IFUP,
2198            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2199            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2200
2201         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2202         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2203
2204         /* global vn counter - maximal Mbps for this vn */
2205         m_rs_vn.vn_counter.rate = vn_max_rate;
2206
2207         /* quota - number of bytes transmitted in this period */
2208         m_rs_vn.vn_counter.quota =
2209                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2210
2211         if (bp->vn_weight_sum) {
2212                 /* credit for each period of the fairness algorithm:
2213                    number of bytes in T_FAIR (the vn share the port rate).
2214                    vn_weight_sum should not be larger than 10000, thus
2215                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2216                    than zero */
2217                 m_fair_vn.vn_credit_delta =
2218                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2219                                                  (8 * bp->vn_weight_sum))),
2220                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2221                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2222                    m_fair_vn.vn_credit_delta);
2223         }
2224
2225         /* Store it to internal memory */
2226         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2227                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2229                        ((u32 *)(&m_rs_vn))[i]);
2230
2231         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2232                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2233                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2234                        ((u32 *)(&m_fair_vn))[i]);
2235 }
2236
2237
2238 /* This function is called upon link interrupt */
2239 static void bnx2x_link_attn(struct bnx2x *bp)
2240 {
2241         /* Make sure that we are synced with the current statistics */
2242         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2243
2244         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2245
2246         if (bp->link_vars.link_up) {
2247
2248                 /* dropless flow control */
2249                 if (CHIP_IS_E1H(bp)) {
2250                         int port = BP_PORT(bp);
2251                         u32 pause_enabled = 0;
2252
2253                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2254                                 pause_enabled = 1;
2255
2256                         REG_WR(bp, BAR_USTRORM_INTMEM +
2257                                USTORM_PAUSE_ENABLED_OFFSET(port),
2258                                pause_enabled);
2259                 }
2260
2261                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2262                         struct host_port_stats *pstats;
2263
2264                         pstats = bnx2x_sp(bp, port_stats);
2265                         /* reset old bmac stats */
2266                         memset(&(pstats->mac_stx[0]), 0,
2267                                sizeof(struct mac_stx));
2268                 }
2269                 if ((bp->state == BNX2X_STATE_OPEN) ||
2270                     (bp->state == BNX2X_STATE_DISABLED))
2271                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2272         }
2273
2274         /* indicate link status */
2275         bnx2x_link_report(bp);
2276
2277         if (IS_E1HMF(bp)) {
2278                 int port = BP_PORT(bp);
2279                 int func;
2280                 int vn;
2281
2282                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2283                         if (vn == BP_E1HVN(bp))
2284                                 continue;
2285
2286                         func = ((vn << 1) | port);
2287
2288                         /* Set the attention towards other drivers
2289                            on the same port */
2290                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2291                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2292                 }
2293
2294                 if (bp->link_vars.link_up) {
2295                         int i;
2296
2297                         /* Init rate shaping and fairness contexts */
2298                         bnx2x_init_port_minmax(bp);
2299
2300                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2301                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2302
2303                         /* Store it to internal memory */
2304                         for (i = 0;
2305                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2306                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2307                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2308                                        ((u32 *)(&bp->cmng))[i]);
2309                 }
2310         }
2311 }
2312
2313 static void bnx2x__link_status_update(struct bnx2x *bp)
2314 {
2315         if (bp->state != BNX2X_STATE_OPEN)
2316                 return;
2317
2318         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2319
2320         if (bp->link_vars.link_up)
2321                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2322         else
2323                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2324
2325         /* indicate link status */
2326         bnx2x_link_report(bp);
2327 }
2328
2329 static void bnx2x_pmf_update(struct bnx2x *bp)
2330 {
2331         int port = BP_PORT(bp);
2332         u32 val;
2333
2334         bp->port.pmf = 1;
2335         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2336
2337         /* enable nig attention */
2338         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2339         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2340         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2341
2342         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2343 }
2344
2345 /* end of Link */
2346
2347 /* slow path */
2348
2349 /*
2350  * General service functions
2351  */
2352
2353 /* the slow path queue is odd since completions arrive on the fastpath ring */
2354 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2355                          u32 data_hi, u32 data_lo, int common)
2356 {
2357         int func = BP_FUNC(bp);
2358
2359         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2360            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2361            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2362            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2363            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2364
2365 #ifdef BNX2X_STOP_ON_ERROR
2366         if (unlikely(bp->panic))
2367                 return -EIO;
2368 #endif
2369
2370         spin_lock_bh(&bp->spq_lock);
2371
2372         if (!bp->spq_left) {
2373                 BNX2X_ERR("BUG! SPQ ring full!\n");
2374                 spin_unlock_bh(&bp->spq_lock);
2375                 bnx2x_panic();
2376                 return -EBUSY;
2377         }
2378
2379         /* CID needs port number to be encoded int it */
2380         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2381                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2382                                      HW_CID(bp, cid)));
2383         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2384         if (common)
2385                 bp->spq_prod_bd->hdr.type |=
2386                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2387
2388         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2389         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2390
2391         bp->spq_left--;
2392
2393         if (bp->spq_prod_bd == bp->spq_last_bd) {
2394                 bp->spq_prod_bd = bp->spq;
2395                 bp->spq_prod_idx = 0;
2396                 DP(NETIF_MSG_TIMER, "end of spq\n");
2397
2398         } else {
2399                 bp->spq_prod_bd++;
2400                 bp->spq_prod_idx++;
2401         }
2402
2403         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2404                bp->spq_prod_idx);
2405
2406         spin_unlock_bh(&bp->spq_lock);
2407         return 0;
2408 }
2409
2410 /* acquire split MCP access lock register */
2411 static int bnx2x_acquire_alr(struct bnx2x *bp)
2412 {
2413         u32 i, j, val;
2414         int rc = 0;
2415
2416         might_sleep();
2417         i = 100;
2418         for (j = 0; j < i*10; j++) {
2419                 val = (1UL << 31);
2420                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2421                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2422                 if (val & (1L << 31))
2423                         break;
2424
2425                 msleep(5);
2426         }
2427         if (!(val & (1L << 31))) {
2428                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2429                 rc = -EBUSY;
2430         }
2431
2432         return rc;
2433 }
2434
2435 /* release split MCP access lock register */
2436 static void bnx2x_release_alr(struct bnx2x *bp)
2437 {
2438         u32 val = 0;
2439
2440         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2441 }
2442
2443 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2444 {
2445         struct host_def_status_block *def_sb = bp->def_status_blk;
2446         u16 rc = 0;
2447
2448         barrier(); /* status block is written to by the chip */
2449         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2450                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2451                 rc |= 1;
2452         }
2453         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2454                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2455                 rc |= 2;
2456         }
2457         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2458                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2459                 rc |= 4;
2460         }
2461         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2462                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2463                 rc |= 8;
2464         }
2465         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2466                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2467                 rc |= 16;
2468         }
2469         return rc;
2470 }
2471
2472 /*
2473  * slow path service functions
2474  */
2475
2476 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2477 {
2478         int port = BP_PORT(bp);
2479         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2480                        COMMAND_REG_ATTN_BITS_SET);
2481         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2482                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2483         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2484                                        NIG_REG_MASK_INTERRUPT_PORT0;
2485         u32 aeu_mask;
2486         u32 nig_mask = 0;
2487
2488         if (bp->attn_state & asserted)
2489                 BNX2X_ERR("IGU ERROR\n");
2490
2491         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2492         aeu_mask = REG_RD(bp, aeu_addr);
2493
2494         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2495            aeu_mask, asserted);
2496         aeu_mask &= ~(asserted & 0xff);
2497         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2498
2499         REG_WR(bp, aeu_addr, aeu_mask);
2500         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2501
2502         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2503         bp->attn_state |= asserted;
2504         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2505
2506         if (asserted & ATTN_HARD_WIRED_MASK) {
2507                 if (asserted & ATTN_NIG_FOR_FUNC) {
2508
2509                         bnx2x_acquire_phy_lock(bp);
2510
2511                         /* save nig interrupt mask */
2512                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2513                         REG_WR(bp, nig_int_mask_addr, 0);
2514
2515                         bnx2x_link_attn(bp);
2516
2517                         /* handle unicore attn? */
2518                 }
2519                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2520                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2521
2522                 if (asserted & GPIO_2_FUNC)
2523                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2524
2525                 if (asserted & GPIO_3_FUNC)
2526                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2527
2528                 if (asserted & GPIO_4_FUNC)
2529                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2530
2531                 if (port == 0) {
2532                         if (asserted & ATTN_GENERAL_ATTN_1) {
2533                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2534                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2535                         }
2536                         if (asserted & ATTN_GENERAL_ATTN_2) {
2537                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2538                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2539                         }
2540                         if (asserted & ATTN_GENERAL_ATTN_3) {
2541                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2542                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2543                         }
2544                 } else {
2545                         if (asserted & ATTN_GENERAL_ATTN_4) {
2546                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2547                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2548                         }
2549                         if (asserted & ATTN_GENERAL_ATTN_5) {
2550                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2551                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2552                         }
2553                         if (asserted & ATTN_GENERAL_ATTN_6) {
2554                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2555                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2556                         }
2557                 }
2558
2559         } /* if hardwired */
2560
2561         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2562            asserted, hc_addr);
2563         REG_WR(bp, hc_addr, asserted);
2564
2565         /* now set back the mask */
2566         if (asserted & ATTN_NIG_FOR_FUNC) {
2567                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2568                 bnx2x_release_phy_lock(bp);
2569         }
2570 }
2571
2572 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2573 {
2574         int port = BP_PORT(bp);
2575         int reg_offset;
2576         u32 val;
2577
2578         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2579                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2580
2581         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2582
2583                 val = REG_RD(bp, reg_offset);
2584                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2585                 REG_WR(bp, reg_offset, val);
2586
2587                 BNX2X_ERR("SPIO5 hw attention\n");
2588
2589                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2590                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2591                         /* Fan failure attention */
2592
2593                         /* The PHY reset is controlled by GPIO 1 */
2594                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2595                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2596                         /* Low power mode is controlled by GPIO 2 */
2597                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2598                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2599                         /* mark the failure */
2600                         bp->link_params.ext_phy_config &=
2601                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2602                         bp->link_params.ext_phy_config |=
2603                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2604                         SHMEM_WR(bp,
2605                                  dev_info.port_hw_config[port].
2606                                                         external_phy_config,
2607                                  bp->link_params.ext_phy_config);
2608                         /* log the failure */
2609                         printk(KERN_ERR PFX "Fan Failure on Network"
2610                                " Controller %s has caused the driver to"
2611                                " shutdown the card to prevent permanent"
2612                                " damage.  Please contact Dell Support for"
2613                                " assistance\n", bp->dev->name);
2614                         break;
2615
2616                 default:
2617                         break;
2618                 }
2619         }
2620
2621         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2622                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2623                 bnx2x_acquire_phy_lock(bp);
2624                 bnx2x_handle_module_detect_int(&bp->link_params);
2625                 bnx2x_release_phy_lock(bp);
2626         }
2627
2628         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629
2630                 val = REG_RD(bp, reg_offset);
2631                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632                 REG_WR(bp, reg_offset, val);
2633
2634                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635                           (attn & HW_INTERRUT_ASSERT_SET_0));
2636                 bnx2x_panic();
2637         }
2638 }
2639
2640 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2641 {
2642         u32 val;
2643
2644         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2645
2646                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648                 /* DORQ discard attention */
2649                 if (val & 0x2)
2650                         BNX2X_ERR("FATAL error from DORQ\n");
2651         }
2652
2653         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654
2655                 int port = BP_PORT(bp);
2656                 int reg_offset;
2657
2658                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660
2661                 val = REG_RD(bp, reg_offset);
2662                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663                 REG_WR(bp, reg_offset, val);
2664
2665                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666                           (attn & HW_INTERRUT_ASSERT_SET_1));
2667                 bnx2x_panic();
2668         }
2669 }
2670
2671 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2672 {
2673         u32 val;
2674
2675         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676
2677                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679                 /* CFC error attention */
2680                 if (val & 0x2)
2681                         BNX2X_ERR("FATAL error from CFC\n");
2682         }
2683
2684         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685
2686                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688                 /* RQ_USDMDP_FIFO_OVERFLOW */
2689                 if (val & 0x18000)
2690                         BNX2X_ERR("FATAL error from PXP\n");
2691         }
2692
2693         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694
2695                 int port = BP_PORT(bp);
2696                 int reg_offset;
2697
2698                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700
2701                 val = REG_RD(bp, reg_offset);
2702                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703                 REG_WR(bp, reg_offset, val);
2704
2705                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706                           (attn & HW_INTERRUT_ASSERT_SET_2));
2707                 bnx2x_panic();
2708         }
2709 }
2710
2711 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2712 {
2713         u32 val;
2714
2715         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716
2717                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718                         int func = BP_FUNC(bp);
2719
2720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721                         bnx2x__link_status_update(bp);
2722                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723                                                         DRV_STATUS_PMF)
2724                                 bnx2x_pmf_update(bp);
2725
2726                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2727
2728                         BNX2X_ERR("MC assert!\n");
2729                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2733                         bnx2x_panic();
2734
2735                 } else if (attn & BNX2X_MCP_ASSERT) {
2736
2737                         BNX2X_ERR("MCP assert!\n");
2738                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2739                         bnx2x_fw_dump(bp);
2740
2741                 } else
2742                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2743         }
2744
2745         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2746                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747                 if (attn & BNX2X_GRC_TIMEOUT) {
2748                         val = CHIP_IS_E1H(bp) ?
2749                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751                 }
2752                 if (attn & BNX2X_GRC_RSV) {
2753                         val = CHIP_IS_E1H(bp) ?
2754                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756                 }
2757                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2758         }
2759 }
2760
2761 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762 {
2763         struct attn_route attn;
2764         struct attn_route group_mask;
2765         int port = BP_PORT(bp);
2766         int index;
2767         u32 reg_addr;
2768         u32 val;
2769         u32 aeu_mask;
2770
2771         /* need to take HW lock because MCP or other port might also
2772            try to handle this event */
2773         bnx2x_acquire_alr(bp);
2774
2775         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2779         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2781
2782         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783                 if (deasserted & (1 << index)) {
2784                         group_mask = bp->attn_group[index];
2785
2786                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787                            index, group_mask.sig[0], group_mask.sig[1],
2788                            group_mask.sig[2], group_mask.sig[3]);
2789
2790                         bnx2x_attn_int_deasserted3(bp,
2791                                         attn.sig[3] & group_mask.sig[3]);
2792                         bnx2x_attn_int_deasserted1(bp,
2793                                         attn.sig[1] & group_mask.sig[1]);
2794                         bnx2x_attn_int_deasserted2(bp,
2795                                         attn.sig[2] & group_mask.sig[2]);
2796                         bnx2x_attn_int_deasserted0(bp,
2797                                         attn.sig[0] & group_mask.sig[0]);
2798
2799                         if ((attn.sig[0] & group_mask.sig[0] &
2800                                                 HW_PRTY_ASSERT_SET_0) ||
2801                             (attn.sig[1] & group_mask.sig[1] &
2802                                                 HW_PRTY_ASSERT_SET_1) ||
2803                             (attn.sig[2] & group_mask.sig[2] &
2804                                                 HW_PRTY_ASSERT_SET_2))
2805                                 BNX2X_ERR("FATAL HW block parity attention\n");
2806                 }
2807         }
2808
2809         bnx2x_release_alr(bp);
2810
2811         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2812
2813         val = ~deasserted;
2814         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815            val, reg_addr);
2816         REG_WR(bp, reg_addr, val);
2817
2818         if (~bp->attn_state & deasserted)
2819                 BNX2X_ERR("IGU ERROR\n");
2820
2821         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823
2824         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825         aeu_mask = REG_RD(bp, reg_addr);
2826
2827         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2828            aeu_mask, deasserted);
2829         aeu_mask |= (deasserted & 0xff);
2830         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2831
2832         REG_WR(bp, reg_addr, aeu_mask);
2833         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2834
2835         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836         bp->attn_state &= ~deasserted;
2837         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2838 }
2839
2840 static void bnx2x_attn_int(struct bnx2x *bp)
2841 {
2842         /* read local copy of bits */
2843         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844                                                                 attn_bits);
2845         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846                                                                 attn_bits_ack);
2847         u32 attn_state = bp->attn_state;
2848
2849         /* look for changed bits */
2850         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2851         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2852
2853         DP(NETIF_MSG_HW,
2854            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2855            attn_bits, attn_ack, asserted, deasserted);
2856
2857         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2858                 BNX2X_ERR("BAD attention state\n");
2859
2860         /* handle bits that were raised */
2861         if (asserted)
2862                 bnx2x_attn_int_asserted(bp, asserted);
2863
2864         if (deasserted)
2865                 bnx2x_attn_int_deasserted(bp, deasserted);
2866 }
2867
2868 static void bnx2x_sp_task(struct work_struct *work)
2869 {
2870         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2871         u16 status;
2872
2873
2874         /* Return here if interrupt is disabled */
2875         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2876                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2877                 return;
2878         }
2879
2880         status = bnx2x_update_dsb_idx(bp);
2881 /*      if (status == 0)                                     */
2882 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2883
2884         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2885
2886         /* HW attentions */
2887         if (status & 0x1)
2888                 bnx2x_attn_int(bp);
2889
2890         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2891                      IGU_INT_NOP, 1);
2892         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2893                      IGU_INT_NOP, 1);
2894         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2895                      IGU_INT_NOP, 1);
2896         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2897                      IGU_INT_NOP, 1);
2898         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2899                      IGU_INT_ENABLE, 1);
2900
2901 }
2902
2903 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2904 {
2905         struct net_device *dev = dev_instance;
2906         struct bnx2x *bp = netdev_priv(dev);
2907
2908         /* Return here if interrupt is disabled */
2909         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2910                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2911                 return IRQ_HANDLED;
2912         }
2913
2914         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2915
2916 #ifdef BNX2X_STOP_ON_ERROR
2917         if (unlikely(bp->panic))
2918                 return IRQ_HANDLED;
2919 #endif
2920
2921         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2922
2923         return IRQ_HANDLED;
2924 }
2925
2926 /* end of slow path */
2927
2928 /* Statistics */
2929
2930 /****************************************************************************
2931 * Macros
2932 ****************************************************************************/
2933
2934 /* sum[hi:lo] += add[hi:lo] */
2935 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2936         do { \
2937                 s_lo += a_lo; \
2938                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2939         } while (0)
2940
2941 /* difference = minuend - subtrahend */
2942 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2943         do { \
2944                 if (m_lo < s_lo) { \
2945                         /* underflow */ \
2946                         d_hi = m_hi - s_hi; \
2947                         if (d_hi > 0) { \
2948                                 /* we can 'loan' 1 */ \
2949                                 d_hi--; \
2950                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2951                         } else { \
2952                                 /* m_hi <= s_hi */ \
2953                                 d_hi = 0; \
2954                                 d_lo = 0; \
2955                         } \
2956                 } else { \
2957                         /* m_lo >= s_lo */ \
2958                         if (m_hi < s_hi) { \
2959                                 d_hi = 0; \
2960                                 d_lo = 0; \
2961                         } else { \
2962                                 /* m_hi >= s_hi */ \
2963                                 d_hi = m_hi - s_hi; \
2964                                 d_lo = m_lo - s_lo; \
2965                         } \
2966                 } \
2967         } while (0)
2968
2969 #define UPDATE_STAT64(s, t) \
2970         do { \
2971                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2972                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2973                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2974                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2975                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2976                        pstats->mac_stx[1].t##_lo, diff.lo); \
2977         } while (0)
2978
2979 #define UPDATE_STAT64_NIG(s, t) \
2980         do { \
2981                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2982                         diff.lo, new->s##_lo, old->s##_lo); \
2983                 ADD_64(estats->t##_hi, diff.hi, \
2984                        estats->t##_lo, diff.lo); \
2985         } while (0)
2986
2987 /* sum[hi:lo] += add */
2988 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2989         do { \
2990                 s_lo += a; \
2991                 s_hi += (s_lo < a) ? 1 : 0; \
2992         } while (0)
2993
2994 #define UPDATE_EXTEND_STAT(s) \
2995         do { \
2996                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2997                               pstats->mac_stx[1].s##_lo, \
2998                               new->s); \
2999         } while (0)
3000
3001 #define UPDATE_EXTEND_TSTAT(s, t) \
3002         do { \
3003                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3004                 old_tclient->s = le32_to_cpu(tclient->s); \
3005                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3006         } while (0)
3007
3008 #define UPDATE_EXTEND_USTAT(s, t) \
3009         do { \
3010                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3011                 old_uclient->s = uclient->s; \
3012                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3013         } while (0)
3014
3015 #define UPDATE_EXTEND_XSTAT(s, t) \
3016         do { \
3017                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3018                 old_xclient->s = le32_to_cpu(xclient->s); \
3019                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3020         } while (0)
3021
3022 /* minuend -= subtrahend */
3023 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3024         do { \
3025                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3026         } while (0)
3027
3028 /* minuend[hi:lo] -= subtrahend */
3029 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3030         do { \
3031                 SUB_64(m_hi, 0, m_lo, s); \
3032         } while (0)
3033
3034 #define SUB_EXTEND_USTAT(s, t) \
3035         do { \
3036                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3037                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038         } while (0)
3039
3040 /*
3041  * General service functions
3042  */
3043
3044 static inline long bnx2x_hilo(u32 *hiref)
3045 {
3046         u32 lo = *(hiref + 1);
3047 #if (BITS_PER_LONG == 64)
3048         u32 hi = *hiref;
3049
3050         return HILO_U64(hi, lo);
3051 #else
3052         return lo;
3053 #endif
3054 }
3055
3056 /*
3057  * Init service functions
3058  */
3059
3060 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3061 {
3062         if (!bp->stats_pending) {
3063                 struct eth_query_ramrod_data ramrod_data = {0};
3064                 int i, rc;
3065
3066                 ramrod_data.drv_counter = bp->stats_counter++;
3067                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3068                 for_each_queue(bp, i)
3069                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3070
3071                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3072                                    ((u32 *)&ramrod_data)[1],
3073                                    ((u32 *)&ramrod_data)[0], 0);
3074                 if (rc == 0) {
3075                         /* stats ramrod has it's own slot on the spq */
3076                         bp->spq_left++;
3077                         bp->stats_pending = 1;
3078                 }
3079         }
3080 }
3081
3082 static void bnx2x_stats_init(struct bnx2x *bp)
3083 {
3084         int port = BP_PORT(bp);
3085         int i;
3086
3087         bp->stats_pending = 0;
3088         bp->executer_idx = 0;
3089         bp->stats_counter = 0;
3090
3091         /* port stats */
3092         if (!BP_NOMCP(bp))
3093                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3094         else
3095                 bp->port.port_stx = 0;
3096         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3097
3098         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3099         bp->port.old_nig_stats.brb_discard =
3100                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3101         bp->port.old_nig_stats.brb_truncate =
3102                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3103         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3104                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3105         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3106                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3107
3108         /* function stats */
3109         for_each_queue(bp, i) {
3110                 struct bnx2x_fastpath *fp = &bp->fp[i];
3111
3112                 memset(&fp->old_tclient, 0,
3113                        sizeof(struct tstorm_per_client_stats));
3114                 memset(&fp->old_uclient, 0,
3115                        sizeof(struct ustorm_per_client_stats));
3116                 memset(&fp->old_xclient, 0,
3117                        sizeof(struct xstorm_per_client_stats));
3118                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3119         }
3120
3121         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3122         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3123
3124         bp->stats_state = STATS_STATE_DISABLED;
3125         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3126                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3127 }
3128
3129 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3130 {
3131         struct dmae_command *dmae = &bp->stats_dmae;
3132         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3133
3134         *stats_comp = DMAE_COMP_VAL;
3135         if (CHIP_REV_IS_SLOW(bp))
3136                 return;
3137
3138         /* loader */
3139         if (bp->executer_idx) {
3140                 int loader_idx = PMF_DMAE_C(bp);
3141
3142                 memset(dmae, 0, sizeof(struct dmae_command));
3143
3144                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3145                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3146                                 DMAE_CMD_DST_RESET |
3147 #ifdef __BIG_ENDIAN
3148                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3149 #else
3150                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3151 #endif
3152                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3153                                                DMAE_CMD_PORT_0) |
3154                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3155                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3156                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3157                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3158                                      sizeof(struct dmae_command) *
3159                                      (loader_idx + 1)) >> 2;
3160                 dmae->dst_addr_hi = 0;
3161                 dmae->len = sizeof(struct dmae_command) >> 2;
3162                 if (CHIP_IS_E1(bp))
3163                         dmae->len--;
3164                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3165                 dmae->comp_addr_hi = 0;
3166                 dmae->comp_val = 1;
3167
3168                 *stats_comp = 0;
3169                 bnx2x_post_dmae(bp, dmae, loader_idx);
3170
3171         } else if (bp->func_stx) {
3172                 *stats_comp = 0;
3173                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3174         }
3175 }
3176
3177 static int bnx2x_stats_comp(struct bnx2x *bp)
3178 {
3179         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180         int cnt = 10;
3181
3182         might_sleep();
3183         while (*stats_comp != DMAE_COMP_VAL) {
3184                 if (!cnt) {
3185                         BNX2X_ERR("timeout waiting for stats finished\n");
3186                         break;
3187                 }
3188                 cnt--;
3189                 msleep(1);
3190         }
3191         return 1;
3192 }
3193
3194 /*
3195  * Statistics service functions
3196  */
3197
3198 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3199 {
3200         struct dmae_command *dmae;
3201         u32 opcode;
3202         int loader_idx = PMF_DMAE_C(bp);
3203         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3204
3205         /* sanity */
3206         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3207                 BNX2X_ERR("BUG!\n");
3208                 return;
3209         }
3210
3211         bp->executer_idx = 0;
3212
3213         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3214                   DMAE_CMD_C_ENABLE |
3215                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3216 #ifdef __BIG_ENDIAN
3217                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3218 #else
3219                   DMAE_CMD_ENDIANITY_DW_SWAP |
3220 #endif
3221                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3222                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3223
3224         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3226         dmae->src_addr_lo = bp->port.port_stx >> 2;
3227         dmae->src_addr_hi = 0;
3228         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3229         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3230         dmae->len = DMAE_LEN32_RD_MAX;
3231         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3232         dmae->comp_addr_hi = 0;
3233         dmae->comp_val = 1;
3234
3235         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3236         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3237         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3238         dmae->src_addr_hi = 0;
3239         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3240                                    DMAE_LEN32_RD_MAX * 4);
3241         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3242                                    DMAE_LEN32_RD_MAX * 4);
3243         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3244         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3245         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3246         dmae->comp_val = DMAE_COMP_VAL;
3247
3248         *stats_comp = 0;
3249         bnx2x_hw_stats_post(bp);
3250         bnx2x_stats_comp(bp);
3251 }
3252
3253 static void bnx2x_port_stats_init(struct bnx2x *bp)
3254 {
3255         struct dmae_command *dmae;
3256         int port = BP_PORT(bp);
3257         int vn = BP_E1HVN(bp);
3258         u32 opcode;
3259         int loader_idx = PMF_DMAE_C(bp);
3260         u32 mac_addr;
3261         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3262
3263         /* sanity */
3264         if (!bp->link_vars.link_up || !bp->port.pmf) {
3265                 BNX2X_ERR("BUG!\n");
3266                 return;
3267         }
3268
3269         bp->executer_idx = 0;
3270
3271         /* MCP */
3272         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3273                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3274                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3275 #ifdef __BIG_ENDIAN
3276                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3277 #else
3278                   DMAE_CMD_ENDIANITY_DW_SWAP |
3279 #endif
3280                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3281                   (vn << DMAE_CMD_E1HVN_SHIFT));
3282
3283         if (bp->port.port_stx) {
3284
3285                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286                 dmae->opcode = opcode;
3287                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3288                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3289                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3290                 dmae->dst_addr_hi = 0;
3291                 dmae->len = sizeof(struct host_port_stats) >> 2;
3292                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293                 dmae->comp_addr_hi = 0;
3294                 dmae->comp_val = 1;
3295         }
3296
3297         if (bp->func_stx) {
3298
3299                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3300                 dmae->opcode = opcode;
3301                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3302                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3303                 dmae->dst_addr_lo = bp->func_stx >> 2;
3304                 dmae->dst_addr_hi = 0;
3305                 dmae->len = sizeof(struct host_func_stats) >> 2;
3306                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307                 dmae->comp_addr_hi = 0;
3308                 dmae->comp_val = 1;
3309         }
3310
3311         /* MAC */
3312         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3313                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3314                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3315 #ifdef __BIG_ENDIAN
3316                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3317 #else
3318                   DMAE_CMD_ENDIANITY_DW_SWAP |
3319 #endif
3320                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3321                   (vn << DMAE_CMD_E1HVN_SHIFT));
3322
3323         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3324
3325                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3326                                    NIG_REG_INGRESS_BMAC0_MEM);
3327
3328                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3329                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3330                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331                 dmae->opcode = opcode;
3332                 dmae->src_addr_lo = (mac_addr +
3333                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3334                 dmae->src_addr_hi = 0;
3335                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3336                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3337                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3338                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3339                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3340                 dmae->comp_addr_hi = 0;
3341                 dmae->comp_val = 1;
3342
3343                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3344                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3345                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346                 dmae->opcode = opcode;
3347                 dmae->src_addr_lo = (mac_addr +
3348                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3349                 dmae->src_addr_hi = 0;
3350                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3351                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3352                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3353                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3354                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3355                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3356                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3357                 dmae->comp_addr_hi = 0;
3358                 dmae->comp_val = 1;
3359
3360         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3361
3362                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3363
3364                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3365                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366                 dmae->opcode = opcode;
3367                 dmae->src_addr_lo = (mac_addr +
3368                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3369                 dmae->src_addr_hi = 0;
3370                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3371                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3372                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3373                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374                 dmae->comp_addr_hi = 0;
3375                 dmae->comp_val = 1;
3376
3377                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3378                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3379                 dmae->opcode = opcode;
3380                 dmae->src_addr_lo = (mac_addr +
3381                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3382                 dmae->src_addr_hi = 0;
3383                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3384                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3385                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3386                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3387                 dmae->len = 1;
3388                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3389                 dmae->comp_addr_hi = 0;
3390                 dmae->comp_val = 1;
3391
3392                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3393                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3394                 dmae->opcode = opcode;
3395                 dmae->src_addr_lo = (mac_addr +
3396                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3397                 dmae->src_addr_hi = 0;
3398                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3399                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3400                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3401                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3402                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3403                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404                 dmae->comp_addr_hi = 0;
3405                 dmae->comp_val = 1;
3406         }
3407
3408         /* NIG */
3409         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410         dmae->opcode = opcode;
3411         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3412                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3413         dmae->src_addr_hi = 0;
3414         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3415         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3416         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3417         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418         dmae->comp_addr_hi = 0;
3419         dmae->comp_val = 1;
3420
3421         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3422         dmae->opcode = opcode;
3423         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3424                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3425         dmae->src_addr_hi = 0;
3426         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3427                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3428         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3429                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3430         dmae->len = (2*sizeof(u32)) >> 2;
3431         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3432         dmae->comp_addr_hi = 0;
3433         dmae->comp_val = 1;
3434
3435         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3436         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3437                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3438                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3439 #ifdef __BIG_ENDIAN
3440                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3441 #else
3442                         DMAE_CMD_ENDIANITY_DW_SWAP |
3443 #endif
3444                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3445                         (vn << DMAE_CMD_E1HVN_SHIFT));
3446         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3447                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3448         dmae->src_addr_hi = 0;
3449         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3450                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3451         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3452                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3453         dmae->len = (2*sizeof(u32)) >> 2;
3454         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456         dmae->comp_val = DMAE_COMP_VAL;
3457
3458         *stats_comp = 0;
3459 }
3460
3461 static void bnx2x_func_stats_init(struct bnx2x *bp)
3462 {
3463         struct dmae_command *dmae = &bp->stats_dmae;
3464         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3465
3466         /* sanity */
3467         if (!bp->func_stx) {
3468                 BNX2X_ERR("BUG!\n");
3469                 return;
3470         }
3471
3472         bp->executer_idx = 0;
3473         memset(dmae, 0, sizeof(struct dmae_command));
3474
3475         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3476                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3477                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3478 #ifdef __BIG_ENDIAN
3479                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3480 #else
3481                         DMAE_CMD_ENDIANITY_DW_SWAP |
3482 #endif
3483                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3484                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3485         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3486         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3487         dmae->dst_addr_lo = bp->func_stx >> 2;
3488         dmae->dst_addr_hi = 0;
3489         dmae->len = sizeof(struct host_func_stats) >> 2;
3490         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3491         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3492         dmae->comp_val = DMAE_COMP_VAL;
3493
3494         *stats_comp = 0;
3495 }
3496
3497 static void bnx2x_stats_start(struct bnx2x *bp)
3498 {
3499         if (bp->port.pmf)
3500                 bnx2x_port_stats_init(bp);
3501
3502         else if (bp->func_stx)
3503                 bnx2x_func_stats_init(bp);
3504
3505         bnx2x_hw_stats_post(bp);
3506         bnx2x_storm_stats_post(bp);
3507 }
3508
3509 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3510 {
3511         bnx2x_stats_comp(bp);
3512         bnx2x_stats_pmf_update(bp);
3513         bnx2x_stats_start(bp);
3514 }
3515
3516 static void bnx2x_stats_restart(struct bnx2x *bp)
3517 {
3518         bnx2x_stats_comp(bp);
3519         bnx2x_stats_start(bp);
3520 }
3521
3522 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3523 {
3524         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3525         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3526         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3527         struct regpair diff;
3528
3529         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3530         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3531         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3532         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3533         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3534         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3535         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3536         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3537         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3538         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3539         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3540         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3541         UPDATE_STAT64(tx_stat_gt127,
3542                                 tx_stat_etherstatspkts65octetsto127octets);
3543         UPDATE_STAT64(tx_stat_gt255,
3544                                 tx_stat_etherstatspkts128octetsto255octets);
3545         UPDATE_STAT64(tx_stat_gt511,
3546                                 tx_stat_etherstatspkts256octetsto511octets);
3547         UPDATE_STAT64(tx_stat_gt1023,
3548                                 tx_stat_etherstatspkts512octetsto1023octets);
3549         UPDATE_STAT64(tx_stat_gt1518,
3550                                 tx_stat_etherstatspkts1024octetsto1522octets);
3551         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3552         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3553         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3554         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3555         UPDATE_STAT64(tx_stat_gterr,
3556                                 tx_stat_dot3statsinternalmactransmiterrors);
3557         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3558
3559         estats->pause_frames_received_hi =
3560                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3561         estats->pause_frames_received_lo =
3562                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3563
3564         estats->pause_frames_sent_hi =
3565                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3566         estats->pause_frames_sent_lo =
3567                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3568 }
3569
3570 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3571 {
3572         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3573         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3574         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3575
3576         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3577         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3578         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3579         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3580         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3581         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3582         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3583         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3584         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3585         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3586         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3587         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3588         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3589         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3590         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3591         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3592         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3593         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3594         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3595         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3596         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3597         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3598         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3599         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3600         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3601         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3602         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3603         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3604         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3605         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3606         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3607
3608         estats->pause_frames_received_hi =
3609                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3610         estats->pause_frames_received_lo =
3611                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3612         ADD_64(estats->pause_frames_received_hi,
3613                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3614                estats->pause_frames_received_lo,
3615                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3616
3617         estats->pause_frames_sent_hi =
3618                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3619         estats->pause_frames_sent_lo =
3620                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3621         ADD_64(estats->pause_frames_sent_hi,
3622                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3623                estats->pause_frames_sent_lo,
3624                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3625 }
3626
3627 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3628 {
3629         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3630         struct nig_stats *old = &(bp->port.old_nig_stats);
3631         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3632         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3633         struct regpair diff;
3634         u32 nig_timer_max;
3635
3636         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3637                 bnx2x_bmac_stats_update(bp);
3638
3639         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3640                 bnx2x_emac_stats_update(bp);
3641
3642         else { /* unreached */
3643                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3644                 return -1;
3645         }
3646
3647         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3648                       new->brb_discard - old->brb_discard);
3649         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3650                       new->brb_truncate - old->brb_truncate);
3651
3652         UPDATE_STAT64_NIG(egress_mac_pkt0,
3653                                         etherstatspkts1024octetsto1522octets);
3654         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3655
3656         memcpy(old, new, sizeof(struct nig_stats));
3657
3658         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3659                sizeof(struct mac_stx));
3660         estats->brb_drop_hi = pstats->brb_drop_hi;
3661         estats->brb_drop_lo = pstats->brb_drop_lo;
3662
3663         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3664
3665         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3666         if (nig_timer_max != estats->nig_timer_max) {
3667                 estats->nig_timer_max = nig_timer_max;
3668                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3669         }
3670
3671         return 0;
3672 }
3673
3674 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3675 {
3676         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3677         struct tstorm_per_port_stats *tport =
3678                                         &stats->tstorm_common.port_statistics;
3679         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3680         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3681         int i;
3682
3683         memset(&(fstats->total_bytes_received_hi), 0,
3684                sizeof(struct host_func_stats) - 2*sizeof(u32));
3685         estats->error_bytes_received_hi = 0;
3686         estats->error_bytes_received_lo = 0;
3687         estats->etherstatsoverrsizepkts_hi = 0;
3688         estats->etherstatsoverrsizepkts_lo = 0;
3689         estats->no_buff_discard_hi = 0;
3690         estats->no_buff_discard_lo = 0;
3691
3692         for_each_queue(bp, i) {
3693                 struct bnx2x_fastpath *fp = &bp->fp[i];
3694                 int cl_id = fp->cl_id;
3695                 struct tstorm_per_client_stats *tclient =
3696                                 &stats->tstorm_common.client_statistics[cl_id];
3697                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3698                 struct ustorm_per_client_stats *uclient =
3699                                 &stats->ustorm_common.client_statistics[cl_id];
3700                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3701                 struct xstorm_per_client_stats *xclient =
3702                                 &stats->xstorm_common.client_statistics[cl_id];
3703                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3704                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3705                 u32 diff;
3706
3707                 /* are storm stats valid? */
3708                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3709                                                         bp->stats_counter) {
3710                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3711                            "  xstorm counter (%d) != stats_counter (%d)\n",
3712                            i, xclient->stats_counter, bp->stats_counter);
3713                         return -1;
3714                 }
3715                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3716                                                         bp->stats_counter) {
3717                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3718                            "  tstorm counter (%d) != stats_counter (%d)\n",
3719                            i, tclient->stats_counter, bp->stats_counter);
3720                         return -2;
3721                 }
3722                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3723                                                         bp->stats_counter) {
3724                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3725                            "  ustorm counter (%d) != stats_counter (%d)\n",
3726                            i, uclient->stats_counter, bp->stats_counter);
3727                         return -4;
3728                 }
3729
3730                 qstats->total_bytes_received_hi =
3731                 qstats->valid_bytes_received_hi =
3732                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3733                 qstats->total_bytes_received_lo =
3734                 qstats->valid_bytes_received_lo =
3735                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3736
3737                 qstats->error_bytes_received_hi =
3738                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3739                 qstats->error_bytes_received_lo =
3740                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3741
3742                 ADD_64(qstats->total_bytes_received_hi,
3743                        qstats->error_bytes_received_hi,
3744                        qstats->total_bytes_received_lo,
3745                        qstats->error_bytes_received_lo);
3746
3747                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3748                                         total_unicast_packets_received);
3749                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3750                                         total_multicast_packets_received);
3751                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3752                                         total_broadcast_packets_received);
3753                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3754                                         etherstatsoverrsizepkts);
3755                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3756
3757                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3758                                         total_unicast_packets_received);
3759                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3760                                         total_multicast_packets_received);
3761                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3762                                         total_broadcast_packets_received);
3763                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3764                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3765                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3766
3767                 qstats->total_bytes_transmitted_hi =
3768                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3769                 qstats->total_bytes_transmitted_lo =
3770                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3771
3772                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3773                                         total_unicast_packets_transmitted);
3774                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3775                                         total_multicast_packets_transmitted);
3776                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3777                                         total_broadcast_packets_transmitted);
3778
3779                 old_tclient->checksum_discard = tclient->checksum_discard;
3780                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3781
3782                 ADD_64(fstats->total_bytes_received_hi,
3783                        qstats->total_bytes_received_hi,
3784                        fstats->total_bytes_received_lo,
3785                        qstats->total_bytes_received_lo);
3786                 ADD_64(fstats->total_bytes_transmitted_hi,
3787                        qstats->total_bytes_transmitted_hi,
3788                        fstats->total_bytes_transmitted_lo,
3789                        qstats->total_bytes_transmitted_lo);
3790                 ADD_64(fstats->total_unicast_packets_received_hi,
3791                        qstats->total_unicast_packets_received_hi,
3792                        fstats->total_unicast_packets_received_lo,
3793                        qstats->total_unicast_packets_received_lo);
3794                 ADD_64(fstats->total_multicast_packets_received_hi,
3795                        qstats->total_multicast_packets_received_hi,
3796                        fstats->total_multicast_packets_received_lo,
3797                        qstats->total_multicast_packets_received_lo);
3798                 ADD_64(fstats->total_broadcast_packets_received_hi,
3799                        qstats->total_broadcast_packets_received_hi,
3800                        fstats->total_broadcast_packets_received_lo,
3801                        qstats->total_broadcast_packets_received_lo);
3802                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3803                        qstats->total_unicast_packets_transmitted_hi,
3804                        fstats->total_unicast_packets_transmitted_lo,
3805                        qstats->total_unicast_packets_transmitted_lo);
3806                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3807                        qstats->total_multicast_packets_transmitted_hi,
3808                        fstats->total_multicast_packets_transmitted_lo,
3809                        qstats->total_multicast_packets_transmitted_lo);
3810                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3811                        qstats->total_broadcast_packets_transmitted_hi,
3812                        fstats->total_broadcast_packets_transmitted_lo,
3813                        qstats->total_broadcast_packets_transmitted_lo);
3814                 ADD_64(fstats->valid_bytes_received_hi,
3815                        qstats->valid_bytes_received_hi,
3816                        fstats->valid_bytes_received_lo,
3817                        qstats->valid_bytes_received_lo);
3818
3819                 ADD_64(estats->error_bytes_received_hi,
3820                        qstats->error_bytes_received_hi,
3821                        estats->error_bytes_received_lo,
3822                        qstats->error_bytes_received_lo);
3823                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3824                        qstats->etherstatsoverrsizepkts_hi,
3825                        estats->etherstatsoverrsizepkts_lo,
3826                        qstats->etherstatsoverrsizepkts_lo);
3827                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3828                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3829         }
3830
3831         ADD_64(fstats->total_bytes_received_hi,
3832                estats->rx_stat_ifhcinbadoctets_hi,
3833                fstats->total_bytes_received_lo,
3834                estats->rx_stat_ifhcinbadoctets_lo);
3835
3836         memcpy(estats, &(fstats->total_bytes_received_hi),
3837                sizeof(struct host_func_stats) - 2*sizeof(u32));
3838
3839         ADD_64(estats->etherstatsoverrsizepkts_hi,
3840                estats->rx_stat_dot3statsframestoolong_hi,
3841                estats->etherstatsoverrsizepkts_lo,
3842                estats->rx_stat_dot3statsframestoolong_lo);
3843         ADD_64(estats->error_bytes_received_hi,
3844                estats->rx_stat_ifhcinbadoctets_hi,
3845                estats->error_bytes_received_lo,
3846                estats->rx_stat_ifhcinbadoctets_lo);
3847
3848         if (bp->port.pmf) {
3849                 estats->mac_filter_discard =
3850                                 le32_to_cpu(tport->mac_filter_discard);
3851                 estats->xxoverflow_discard =
3852                                 le32_to_cpu(tport->xxoverflow_discard);
3853                 estats->brb_truncate_discard =
3854                                 le32_to_cpu(tport->brb_truncate_discard);
3855                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3856         }
3857
3858         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3859
3860         bp->stats_pending = 0;
3861
3862         return 0;
3863 }
3864
3865 static void bnx2x_net_stats_update(struct bnx2x *bp)
3866 {
3867         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3868         struct net_device_stats *nstats = &bp->dev->stats;
3869         int i;
3870
3871         nstats->rx_packets =
3872                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3873                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3874                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3875
3876         nstats->tx_packets =
3877                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3878                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3879                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3880
3881         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3882
3883         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3884
3885         nstats->rx_dropped = estats->mac_discard;
3886         for_each_queue(bp, i)
3887                 nstats->rx_dropped +=
3888                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3889
3890         nstats->tx_dropped = 0;
3891
3892         nstats->multicast =
3893                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3894
3895         nstats->collisions =
3896                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3897
3898         nstats->rx_length_errors =
3899                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3900                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3901         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3902                                  bnx2x_hilo(&estats->brb_truncate_hi);
3903         nstats->rx_crc_errors =
3904                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3905         nstats->rx_frame_errors =
3906                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3907         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3908         nstats->rx_missed_errors = estats->xxoverflow_discard;
3909
3910         nstats->rx_errors = nstats->rx_length_errors +
3911                             nstats->rx_over_errors +
3912                             nstats->rx_crc_errors +
3913                             nstats->rx_frame_errors +
3914                             nstats->rx_fifo_errors +
3915                             nstats->rx_missed_errors;
3916
3917         nstats->tx_aborted_errors =
3918                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3919                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3920         nstats->tx_carrier_errors =
3921                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3922         nstats->tx_fifo_errors = 0;
3923         nstats->tx_heartbeat_errors = 0;
3924         nstats->tx_window_errors = 0;
3925
3926         nstats->tx_errors = nstats->tx_aborted_errors +
3927                             nstats->tx_carrier_errors +
3928             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3929 }
3930
3931 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3932 {
3933         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934         int i;
3935
3936         estats->driver_xoff = 0;
3937         estats->rx_err_discard_pkt = 0;
3938         estats->rx_skb_alloc_failed = 0;
3939         estats->hw_csum_err = 0;
3940         for_each_queue(bp, i) {
3941                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3942
3943                 estats->driver_xoff += qstats->driver_xoff;
3944                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3945                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3946                 estats->hw_csum_err += qstats->hw_csum_err;
3947         }
3948 }
3949
3950 static void bnx2x_stats_update(struct bnx2x *bp)
3951 {
3952         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3953
3954         if (*stats_comp != DMAE_COMP_VAL)
3955                 return;
3956
3957         if (bp->port.pmf)
3958                 bnx2x_hw_stats_update(bp);
3959
3960         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3961                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3962                 bnx2x_panic();
3963                 return;
3964         }
3965
3966         bnx2x_net_stats_update(bp);
3967         bnx2x_drv_stats_update(bp);
3968
3969         if (bp->msglevel & NETIF_MSG_TIMER) {
3970                 struct tstorm_per_client_stats *old_tclient =
3971                                                         &bp->fp->old_tclient;
3972                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3973                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3974                 struct net_device_stats *nstats = &bp->dev->stats;
3975                 int i;
3976
3977                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3978                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3979                                   "  tx pkt (%lx)\n",
3980                        bnx2x_tx_avail(bp->fp),
3981                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3982                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3983                                   "  rx pkt (%lx)\n",
3984                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3985                              bp->fp->rx_comp_cons),
3986                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3987                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
3988                                   "brb truncate %u\n",
3989                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3990                        qstats->driver_xoff,
3991                        estats->brb_drop_lo, estats->brb_truncate_lo);
3992                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3993                         "packets_too_big_discard %lu  no_buff_discard %lu  "
3994                         "mac_discard %u  mac_filter_discard %u  "
3995                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3996                         "ttl0_discard %u\n",
3997                        old_tclient->checksum_discard,
3998                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3999                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4000                        estats->mac_discard, estats->mac_filter_discard,
4001                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4002                        old_tclient->ttl0_discard);
4003
4004                 for_each_queue(bp, i) {
4005                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4006                                bnx2x_fp(bp, i, tx_pkt),
4007                                bnx2x_fp(bp, i, rx_pkt),
4008                                bnx2x_fp(bp, i, rx_calls));
4009                 }
4010         }
4011
4012         bnx2x_hw_stats_post(bp);
4013         bnx2x_storm_stats_post(bp);
4014 }
4015
4016 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4017 {
4018         struct dmae_command *dmae;
4019         u32 opcode;
4020         int loader_idx = PMF_DMAE_C(bp);
4021         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4022
4023         bp->executer_idx = 0;
4024
4025         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4026                   DMAE_CMD_C_ENABLE |
4027                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4028 #ifdef __BIG_ENDIAN
4029                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4030 #else
4031                   DMAE_CMD_ENDIANITY_DW_SWAP |
4032 #endif
4033                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4034                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4035
4036         if (bp->port.port_stx) {
4037
4038                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039                 if (bp->func_stx)
4040                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4041                 else
4042                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4043                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4044                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4045                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4046                 dmae->dst_addr_hi = 0;
4047                 dmae->len = sizeof(struct host_port_stats) >> 2;
4048                 if (bp->func_stx) {
4049                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050                         dmae->comp_addr_hi = 0;
4051                         dmae->comp_val = 1;
4052                 } else {
4053                         dmae->comp_addr_lo =
4054                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4055                         dmae->comp_addr_hi =
4056                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4057                         dmae->comp_val = DMAE_COMP_VAL;
4058
4059                         *stats_comp = 0;
4060                 }
4061         }
4062
4063         if (bp->func_stx) {
4064
4065                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4068                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4069                 dmae->dst_addr_lo = bp->func_stx >> 2;
4070                 dmae->dst_addr_hi = 0;
4071                 dmae->len = sizeof(struct host_func_stats) >> 2;
4072                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4073                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4074                 dmae->comp_val = DMAE_COMP_VAL;
4075
4076                 *stats_comp = 0;
4077         }
4078 }
4079
4080 static void bnx2x_stats_stop(struct bnx2x *bp)
4081 {
4082         int update = 0;
4083
4084         bnx2x_stats_comp(bp);
4085
4086         if (bp->port.pmf)
4087                 update = (bnx2x_hw_stats_update(bp) == 0);
4088
4089         update |= (bnx2x_storm_stats_update(bp) == 0);
4090
4091         if (update) {
4092                 bnx2x_net_stats_update(bp);
4093
4094                 if (bp->port.pmf)
4095                         bnx2x_port_stats_stop(bp);
4096
4097                 bnx2x_hw_stats_post(bp);
4098                 bnx2x_stats_comp(bp);
4099         }
4100 }
4101
4102 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4103 {
4104 }
4105
4106 static const struct {
4107         void (*action)(struct bnx2x *bp);
4108         enum bnx2x_stats_state next_state;
4109 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4110 /* state        event   */
4111 {
4112 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4113 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4114 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4115 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4116 },
4117 {
4118 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4119 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4120 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4121 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4122 }
4123 };
4124
4125 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4126 {
4127         enum bnx2x_stats_state state = bp->stats_state;
4128
4129         bnx2x_stats_stm[state][event].action(bp);
4130         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4131
4132         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4133                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4134                    state, event, bp->stats_state);
4135 }
4136
4137 static void bnx2x_timer(unsigned long data)
4138 {
4139         struct bnx2x *bp = (struct bnx2x *) data;
4140
4141         if (!netif_running(bp->dev))
4142                 return;
4143
4144         if (atomic_read(&bp->intr_sem) != 0)
4145                 goto timer_restart;
4146
4147         if (poll) {
4148                 struct bnx2x_fastpath *fp = &bp->fp[0];
4149                 int rc;
4150
4151                 bnx2x_tx_int(fp, 1000);
4152                 rc = bnx2x_rx_int(fp, 1000);
4153         }
4154
4155         if (!BP_NOMCP(bp)) {
4156                 int func = BP_FUNC(bp);
4157                 u32 drv_pulse;
4158                 u32 mcp_pulse;
4159
4160                 ++bp->fw_drv_pulse_wr_seq;
4161                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4162                 /* TBD - add SYSTEM_TIME */
4163                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4164                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4165
4166                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4167                              MCP_PULSE_SEQ_MASK);
4168                 /* The delta between driver pulse and mcp response
4169                  * should be 1 (before mcp response) or 0 (after mcp response)
4170                  */
4171                 if ((drv_pulse != mcp_pulse) &&
4172                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4173                         /* someone lost a heartbeat... */
4174                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4175                                   drv_pulse, mcp_pulse);
4176                 }
4177         }
4178
4179         if ((bp->state == BNX2X_STATE_OPEN) ||
4180             (bp->state == BNX2X_STATE_DISABLED))
4181                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4182
4183 timer_restart:
4184         mod_timer(&bp->timer, jiffies + bp->current_interval);
4185 }
4186
4187 /* end of Statistics */
4188
4189 /* nic init */
4190
4191 /*
4192  * nic init service functions
4193  */
4194
4195 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4196 {
4197         int port = BP_PORT(bp);
4198
4199         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4200                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4201                         sizeof(struct ustorm_status_block)/4);
4202         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4203                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4204                         sizeof(struct cstorm_status_block)/4);
4205 }
4206
4207 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4208                           dma_addr_t mapping, int sb_id)
4209 {
4210         int port = BP_PORT(bp);
4211         int func = BP_FUNC(bp);
4212         int index;
4213         u64 section;
4214
4215         /* USTORM */
4216         section = ((u64)mapping) + offsetof(struct host_status_block,
4217                                             u_status_block);
4218         sb->u_status_block.status_block_id = sb_id;
4219
4220         REG_WR(bp, BAR_USTRORM_INTMEM +
4221                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4222         REG_WR(bp, BAR_USTRORM_INTMEM +
4223                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4224                U64_HI(section));
4225         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4226                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4227
4228         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4229                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4230                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4231
4232         /* CSTORM */
4233         section = ((u64)mapping) + offsetof(struct host_status_block,
4234                                             c_status_block);
4235         sb->c_status_block.status_block_id = sb_id;
4236
4237         REG_WR(bp, BAR_CSTRORM_INTMEM +
4238                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4239         REG_WR(bp, BAR_CSTRORM_INTMEM +
4240                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4241                U64_HI(section));
4242         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4243                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4244
4245         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4246                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4247                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4248
4249         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4250 }
4251
4252 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4253 {
4254         int func = BP_FUNC(bp);
4255
4256         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4257                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4258                         sizeof(struct ustorm_def_status_block)/4);
4259         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4260                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4261                         sizeof(struct cstorm_def_status_block)/4);
4262         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4263                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4264                         sizeof(struct xstorm_def_status_block)/4);
4265         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4266                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4267                         sizeof(struct tstorm_def_status_block)/4);
4268 }
4269
4270 static void bnx2x_init_def_sb(struct bnx2x *bp,
4271                               struct host_def_status_block *def_sb,
4272                               dma_addr_t mapping, int sb_id)
4273 {
4274         int port = BP_PORT(bp);
4275         int func = BP_FUNC(bp);
4276         int index, val, reg_offset;
4277         u64 section;
4278
4279         /* ATTN */
4280         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4281                                             atten_status_block);
4282         def_sb->atten_status_block.status_block_id = sb_id;
4283
4284         bp->attn_state = 0;
4285
4286         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4287                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4288
4289         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4290                 bp->attn_group[index].sig[0] = REG_RD(bp,
4291                                                      reg_offset + 0x10*index);
4292                 bp->attn_group[index].sig[1] = REG_RD(bp,
4293                                                reg_offset + 0x4 + 0x10*index);
4294                 bp->attn_group[index].sig[2] = REG_RD(bp,
4295                                                reg_offset + 0x8 + 0x10*index);
4296                 bp->attn_group[index].sig[3] = REG_RD(bp,
4297                                                reg_offset + 0xc + 0x10*index);
4298         }
4299
4300         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4301                              HC_REG_ATTN_MSG0_ADDR_L);
4302
4303         REG_WR(bp, reg_offset, U64_LO(section));
4304         REG_WR(bp, reg_offset + 4, U64_HI(section));
4305
4306         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4307
4308         val = REG_RD(bp, reg_offset);
4309         val |= sb_id;
4310         REG_WR(bp, reg_offset, val);
4311
4312         /* USTORM */
4313         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4314                                             u_def_status_block);
4315         def_sb->u_def_status_block.status_block_id = sb_id;
4316
4317         REG_WR(bp, BAR_USTRORM_INTMEM +
4318                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4319         REG_WR(bp, BAR_USTRORM_INTMEM +
4320                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4321                U64_HI(section));
4322         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4323                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4324
4325         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4326                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4327                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4328
4329         /* CSTORM */
4330         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4331                                             c_def_status_block);
4332         def_sb->c_def_status_block.status_block_id = sb_id;
4333
4334         REG_WR(bp, BAR_CSTRORM_INTMEM +
4335                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4336         REG_WR(bp, BAR_CSTRORM_INTMEM +
4337                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4338                U64_HI(section));
4339         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4340                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4341
4342         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4343                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4344                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4345
4346         /* TSTORM */
4347         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4348                                             t_def_status_block);
4349         def_sb->t_def_status_block.status_block_id = sb_id;
4350
4351         REG_WR(bp, BAR_TSTRORM_INTMEM +
4352                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4353         REG_WR(bp, BAR_TSTRORM_INTMEM +
4354                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4355                U64_HI(section));
4356         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4357                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4358
4359         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4360                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4361                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4362
4363         /* XSTORM */
4364         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4365                                             x_def_status_block);
4366         def_sb->x_def_status_block.status_block_id = sb_id;
4367
4368         REG_WR(bp, BAR_XSTRORM_INTMEM +
4369                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4370         REG_WR(bp, BAR_XSTRORM_INTMEM +
4371                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4372                U64_HI(section));
4373         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4374                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4375
4376         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4377                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4378                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4379
4380         bp->stats_pending = 0;
4381         bp->set_mac_pending = 0;
4382
4383         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4384 }
4385
4386 static void bnx2x_update_coalesce(struct bnx2x *bp)
4387 {
4388         int port = BP_PORT(bp);
4389         int i;
4390
4391         for_each_queue(bp, i) {
4392                 int sb_id = bp->fp[i].sb_id;
4393
4394                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4395                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4396                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4397                                                     U_SB_ETH_RX_CQ_INDEX),
4398                         bp->rx_ticks/12);
4399                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4400                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4401                                                      U_SB_ETH_RX_CQ_INDEX),
4402                          bp->rx_ticks ? 0 : 1);
4403
4404                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4405                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4406                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4407                                                     C_SB_ETH_TX_CQ_INDEX),
4408                         bp->tx_ticks/12);
4409                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4410                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4411                                                      C_SB_ETH_TX_CQ_INDEX),
4412                          bp->tx_ticks ? 0 : 1);
4413         }
4414 }
4415
4416 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4417                                        struct bnx2x_fastpath *fp, int last)
4418 {
4419         int i;
4420
4421         for (i = 0; i < last; i++) {
4422                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4423                 struct sk_buff *skb = rx_buf->skb;
4424
4425                 if (skb == NULL) {
4426                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4427                         continue;
4428                 }
4429
4430                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4431                         pci_unmap_single(bp->pdev,
4432                                          pci_unmap_addr(rx_buf, mapping),
4433                                          bp->rx_buf_size,
4434                                          PCI_DMA_FROMDEVICE);
4435
4436                 dev_kfree_skb(skb);
4437                 rx_buf->skb = NULL;
4438         }
4439 }
4440
4441 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4442 {
4443         int func = BP_FUNC(bp);
4444         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4445                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4446         u16 ring_prod, cqe_ring_prod;
4447         int i, j;
4448
4449         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4450         DP(NETIF_MSG_IFUP,
4451            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4452
4453         if (bp->flags & TPA_ENABLE_FLAG) {
4454
4455                 for_each_rx_queue(bp, j) {
4456                         struct bnx2x_fastpath *fp = &bp->fp[j];
4457
4458                         for (i = 0; i < max_agg_queues; i++) {
4459                                 fp->tpa_pool[i].skb =
4460                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4461                                 if (!fp->tpa_pool[i].skb) {
4462                                         BNX2X_ERR("Failed to allocate TPA "
4463                                                   "skb pool for queue[%d] - "
4464                                                   "disabling TPA on this "
4465                                                   "queue!\n", j);
4466                                         bnx2x_free_tpa_pool(bp, fp, i);
4467                                         fp->disable_tpa = 1;
4468                                         break;
4469                                 }
4470                                 pci_unmap_addr_set((struct sw_rx_bd *)
4471                                                         &bp->fp->tpa_pool[i],
4472                                                    mapping, 0);
4473                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4474                         }
4475                 }
4476         }
4477
4478         for_each_rx_queue(bp, j) {
4479                 struct bnx2x_fastpath *fp = &bp->fp[j];
4480
4481                 fp->rx_bd_cons = 0;
4482                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4483                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4484
4485                 /* "next page" elements initialization */
4486                 /* SGE ring */
4487                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4488                         struct eth_rx_sge *sge;
4489
4490                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4491                         sge->addr_hi =
4492                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4493                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4494                         sge->addr_lo =
4495                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4496                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4497                 }
4498
4499                 bnx2x_init_sge_ring_bit_mask(fp);
4500
4501                 /* RX BD ring */
4502                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4503                         struct eth_rx_bd *rx_bd;
4504
4505                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4506                         rx_bd->addr_hi =
4507                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4508                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4509                         rx_bd->addr_lo =
4510                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4511                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4512                 }
4513
4514                 /* CQ ring */
4515                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4516                         struct eth_rx_cqe_next_page *nextpg;
4517
4518                         nextpg = (struct eth_rx_cqe_next_page *)
4519                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4520                         nextpg->addr_hi =
4521                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4522                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4523                         nextpg->addr_lo =
4524                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4525                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4526                 }
4527
4528                 /* Allocate SGEs and initialize the ring elements */
4529                 for (i = 0, ring_prod = 0;
4530                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4531
4532                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4533                                 BNX2X_ERR("was only able to allocate "
4534                                           "%d rx sges\n", i);
4535                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4536                                 /* Cleanup already allocated elements */
4537                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4538                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4539                                 fp->disable_tpa = 1;
4540                                 ring_prod = 0;
4541                                 break;
4542                         }
4543                         ring_prod = NEXT_SGE_IDX(ring_prod);
4544                 }
4545                 fp->rx_sge_prod = ring_prod;
4546
4547                 /* Allocate BDs and initialize BD ring */
4548                 fp->rx_comp_cons = 0;
4549                 cqe_ring_prod = ring_prod = 0;
4550                 for (i = 0; i < bp->rx_ring_size; i++) {
4551                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4552                                 BNX2X_ERR("was only able to allocate "
4553                                           "%d rx skbs on queue[%d]\n", i, j);
4554                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4555                                 break;
4556                         }
4557                         ring_prod = NEXT_RX_IDX(ring_prod);
4558                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4559                         WARN_ON(ring_prod <= i);
4560                 }
4561
4562                 fp->rx_bd_prod = ring_prod;
4563                 /* must not have more available CQEs than BDs */
4564                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4565                                        cqe_ring_prod);
4566                 fp->rx_pkt = fp->rx_calls = 0;
4567
4568                 /* Warning!
4569                  * this will generate an interrupt (to the TSTORM)
4570                  * must only be done after chip is initialized
4571                  */
4572                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4573                                      fp->rx_sge_prod);
4574                 if (j != 0)
4575                         continue;
4576
4577                 REG_WR(bp, BAR_USTRORM_INTMEM +
4578                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4579                        U64_LO(fp->rx_comp_mapping));
4580                 REG_WR(bp, BAR_USTRORM_INTMEM +
4581                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4582                        U64_HI(fp->rx_comp_mapping));
4583         }
4584 }
4585
4586 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4587 {
4588         int i, j;
4589
4590         for_each_tx_queue(bp, j) {
4591                 struct bnx2x_fastpath *fp = &bp->fp[j];
4592
4593                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4594                         struct eth_tx_bd *tx_bd =
4595                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4596
4597                         tx_bd->addr_hi =
4598                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4599                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4600                         tx_bd->addr_lo =
4601                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4602                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4603                 }
4604
4605                 fp->tx_pkt_prod = 0;
4606                 fp->tx_pkt_cons = 0;
4607                 fp->tx_bd_prod = 0;
4608                 fp->tx_bd_cons = 0;
4609                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4610                 fp->tx_pkt = 0;
4611         }
4612 }
4613
4614 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4615 {
4616         int func = BP_FUNC(bp);
4617
4618         spin_lock_init(&bp->spq_lock);
4619
4620         bp->spq_left = MAX_SPQ_PENDING;
4621         bp->spq_prod_idx = 0;
4622         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4623         bp->spq_prod_bd = bp->spq;
4624         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4625
4626         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4627                U64_LO(bp->spq_mapping));
4628         REG_WR(bp,
4629                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4630                U64_HI(bp->spq_mapping));
4631
4632         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4633                bp->spq_prod_idx);
4634 }
4635
4636 static void bnx2x_init_context(struct bnx2x *bp)
4637 {
4638         int i;
4639
4640         for_each_queue(bp, i) {
4641                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4642                 struct bnx2x_fastpath *fp = &bp->fp[i];
4643                 u8 cl_id = fp->cl_id;
4644                 u8 sb_id = fp->sb_id;
4645
4646                 context->ustorm_st_context.common.sb_index_numbers =
4647                                                 BNX2X_RX_SB_INDEX_NUM;
4648                 context->ustorm_st_context.common.clientId = cl_id;
4649                 context->ustorm_st_context.common.status_block_id = sb_id;
4650                 context->ustorm_st_context.common.flags =
4651                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4652                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4653                 context->ustorm_st_context.common.statistics_counter_id =
4654                                                 cl_id;
4655                 context->ustorm_st_context.common.mc_alignment_log_size =
4656                                                 BNX2X_RX_ALIGN_SHIFT;
4657                 context->ustorm_st_context.common.bd_buff_size =
4658                                                 bp->rx_buf_size;
4659                 context->ustorm_st_context.common.bd_page_base_hi =
4660                                                 U64_HI(fp->rx_desc_mapping);
4661                 context->ustorm_st_context.common.bd_page_base_lo =
4662                                                 U64_LO(fp->rx_desc_mapping);
4663                 if (!fp->disable_tpa) {
4664                         context->ustorm_st_context.common.flags |=
4665                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4666                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4667                         context->ustorm_st_context.common.sge_buff_size =
4668                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4669                                          (u32)0xffff);
4670                         context->ustorm_st_context.common.sge_page_base_hi =
4671                                                 U64_HI(fp->rx_sge_mapping);
4672                         context->ustorm_st_context.common.sge_page_base_lo =
4673                                                 U64_LO(fp->rx_sge_mapping);
4674                 }
4675
4676                 context->ustorm_ag_context.cdu_usage =
4677                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4678                                                CDU_REGION_NUMBER_UCM_AG,
4679                                                ETH_CONNECTION_TYPE);
4680
4681                 context->xstorm_st_context.tx_bd_page_base_hi =
4682                                                 U64_HI(fp->tx_desc_mapping);
4683                 context->xstorm_st_context.tx_bd_page_base_lo =
4684                                                 U64_LO(fp->tx_desc_mapping);
4685                 context->xstorm_st_context.db_data_addr_hi =
4686                                                 U64_HI(fp->tx_prods_mapping);
4687                 context->xstorm_st_context.db_data_addr_lo =
4688                                                 U64_LO(fp->tx_prods_mapping);
4689                 context->xstorm_st_context.statistics_data = (cl_id |
4690                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4691                 context->cstorm_st_context.sb_index_number =
4692                                                 C_SB_ETH_TX_CQ_INDEX;
4693                 context->cstorm_st_context.status_block_id = sb_id;
4694
4695                 context->xstorm_ag_context.cdu_reserved =
4696                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4697                                                CDU_REGION_NUMBER_XCM_AG,
4698                                                ETH_CONNECTION_TYPE);
4699         }
4700 }
4701
4702 static void bnx2x_init_ind_table(struct bnx2x *bp)
4703 {
4704         int func = BP_FUNC(bp);
4705         int i;
4706
4707         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4708                 return;
4709
4710         DP(NETIF_MSG_IFUP,
4711            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4712         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4713                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4714                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4715                         bp->fp->cl_id + (i % bp->num_rx_queues));
4716 }
4717
4718 static void bnx2x_set_client_config(struct bnx2x *bp)
4719 {
4720         struct tstorm_eth_client_config tstorm_client = {0};
4721         int port = BP_PORT(bp);
4722         int i;
4723
4724         tstorm_client.mtu = bp->dev->mtu;
4725         tstorm_client.config_flags =
4726                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4727                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4728 #ifdef BCM_VLAN
4729         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4730                 tstorm_client.config_flags |=
4731                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4732                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4733         }
4734 #endif
4735
4736         if (bp->flags & TPA_ENABLE_FLAG) {
4737                 tstorm_client.max_sges_for_packet =
4738                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4739                 tstorm_client.max_sges_for_packet =
4740                         ((tstorm_client.max_sges_for_packet +
4741                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4742                         PAGES_PER_SGE_SHIFT;
4743
4744                 tstorm_client.config_flags |=
4745                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4746         }
4747
4748         for_each_queue(bp, i) {
4749                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4750
4751                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4752                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4753                        ((u32 *)&tstorm_client)[0]);
4754                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4755                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4756                        ((u32 *)&tstorm_client)[1]);
4757         }
4758
4759         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4760            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4761 }
4762
4763 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4764 {
4765         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4766         int mode = bp->rx_mode;
4767         int mask = (1 << BP_L_ID(bp));
4768         int func = BP_FUNC(bp);
4769         int i;
4770
4771         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4772
4773         switch (mode) {
4774         case BNX2X_RX_MODE_NONE: /* no Rx */
4775                 tstorm_mac_filter.ucast_drop_all = mask;
4776                 tstorm_mac_filter.mcast_drop_all = mask;
4777                 tstorm_mac_filter.bcast_drop_all = mask;
4778                 break;
4779         case BNX2X_RX_MODE_NORMAL:
4780                 tstorm_mac_filter.bcast_accept_all = mask;
4781                 break;
4782         case BNX2X_RX_MODE_ALLMULTI:
4783                 tstorm_mac_filter.mcast_accept_all = mask;
4784                 tstorm_mac_filter.bcast_accept_all = mask;
4785                 break;
4786         case BNX2X_RX_MODE_PROMISC:
4787                 tstorm_mac_filter.ucast_accept_all = mask;
4788                 tstorm_mac_filter.mcast_accept_all = mask;
4789                 tstorm_mac_filter.bcast_accept_all = mask;
4790                 break;
4791         default:
4792                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4793                 break;
4794         }
4795
4796         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4797                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4798                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4799                        ((u32 *)&tstorm_mac_filter)[i]);
4800
4801 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4802                    ((u32 *)&tstorm_mac_filter)[i]); */
4803         }
4804
4805         if (mode != BNX2X_RX_MODE_NONE)
4806                 bnx2x_set_client_config(bp);
4807 }
4808
4809 static void bnx2x_init_internal_common(struct bnx2x *bp)
4810 {
4811         int i;
4812
4813         if (bp->flags & TPA_ENABLE_FLAG) {
4814                 struct tstorm_eth_tpa_exist tpa = {0};
4815
4816                 tpa.tpa_exist = 1;
4817
4818                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4819                        ((u32 *)&tpa)[0]);
4820                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4821                        ((u32 *)&tpa)[1]);
4822         }
4823
4824         /* Zero this manually as its initialization is
4825            currently missing in the initTool */
4826         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4827                 REG_WR(bp, BAR_USTRORM_INTMEM +
4828                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4829 }
4830
4831 static void bnx2x_init_internal_port(struct bnx2x *bp)
4832 {
4833         int port = BP_PORT(bp);
4834
4835         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4836         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4837         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4838         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4839 }
4840
4841 /* Calculates the sum of vn_min_rates.
4842    It's needed for further normalizing of the min_rates.
4843    Returns:
4844      sum of vn_min_rates.
4845        or
4846      0 - if all the min_rates are 0.
4847      In the later case fainess algorithm should be deactivated.
4848      If not all min_rates are zero then those that are zeroes will be set to 1.
4849  */
4850 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4851 {
4852         int all_zero = 1;
4853         int port = BP_PORT(bp);
4854         int vn;
4855
4856         bp->vn_weight_sum = 0;
4857         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4858                 int func = 2*vn + port;
4859                 u32 vn_cfg =
4860                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4861                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4862                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4863
4864                 /* Skip hidden vns */
4865                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4866                         continue;
4867
4868                 /* If min rate is zero - set it to 1 */
4869                 if (!vn_min_rate)
4870                         vn_min_rate = DEF_MIN_RATE;
4871                 else
4872                         all_zero = 0;
4873
4874                 bp->vn_weight_sum += vn_min_rate;
4875         }
4876
4877         /* ... only if all min rates are zeros - disable fairness */
4878         if (all_zero)
4879                 bp->vn_weight_sum = 0;
4880 }
4881
4882 static void bnx2x_init_internal_func(struct bnx2x *bp)
4883 {
4884         struct tstorm_eth_function_common_config tstorm_config = {0};
4885         struct stats_indication_flags stats_flags = {0};
4886         int port = BP_PORT(bp);
4887         int func = BP_FUNC(bp);
4888         int i, j;
4889         u32 offset;
4890         u16 max_agg_size;
4891
4892         if (is_multi(bp)) {
4893                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4894                 tstorm_config.rss_result_mask = MULTI_MASK;
4895         }
4896         if (IS_E1HMF(bp))
4897                 tstorm_config.config_flags |=
4898                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4899
4900         tstorm_config.leading_client_id = BP_L_ID(bp);
4901
4902         REG_WR(bp, BAR_TSTRORM_INTMEM +
4903                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4904                (*(u32 *)&tstorm_config));
4905
4906         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4907         bnx2x_set_storm_rx_mode(bp);
4908
4909         for_each_queue(bp, i) {
4910                 u8 cl_id = bp->fp[i].cl_id;
4911
4912                 /* reset xstorm per client statistics */
4913                 offset = BAR_XSTRORM_INTMEM +
4914                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4915                 for (j = 0;
4916                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4917                         REG_WR(bp, offset + j*4, 0);
4918
4919                 /* reset tstorm per client statistics */
4920                 offset = BAR_TSTRORM_INTMEM +
4921                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4922                 for (j = 0;
4923                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4924                         REG_WR(bp, offset + j*4, 0);
4925
4926                 /* reset ustorm per client statistics */
4927                 offset = BAR_USTRORM_INTMEM +
4928                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4929                 for (j = 0;
4930                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4931                         REG_WR(bp, offset + j*4, 0);
4932         }
4933
4934         /* Init statistics related context */
4935         stats_flags.collect_eth = 1;
4936
4937         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4938                ((u32 *)&stats_flags)[0]);
4939         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4940                ((u32 *)&stats_flags)[1]);
4941
4942         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4943                ((u32 *)&stats_flags)[0]);
4944         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4945                ((u32 *)&stats_flags)[1]);
4946
4947         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4948                ((u32 *)&stats_flags)[0]);
4949         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4950                ((u32 *)&stats_flags)[1]);
4951
4952         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4953                ((u32 *)&stats_flags)[0]);
4954         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4955                ((u32 *)&stats_flags)[1]);
4956
4957         REG_WR(bp, BAR_XSTRORM_INTMEM +
4958                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4959                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4960         REG_WR(bp, BAR_XSTRORM_INTMEM +
4961                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4962                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4963
4964         REG_WR(bp, BAR_TSTRORM_INTMEM +
4965                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4966                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4967         REG_WR(bp, BAR_TSTRORM_INTMEM +
4968                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4969                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4970
4971         REG_WR(bp, BAR_USTRORM_INTMEM +
4972                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4973                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4974         REG_WR(bp, BAR_USTRORM_INTMEM +
4975                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4976                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4977
4978         if (CHIP_IS_E1H(bp)) {
4979                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4980                         IS_E1HMF(bp));
4981                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4982                         IS_E1HMF(bp));
4983                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4984                         IS_E1HMF(bp));
4985                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4986                         IS_E1HMF(bp));
4987
4988                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4989                          bp->e1hov);
4990         }
4991
4992         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4993         max_agg_size =
4994                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4995                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4996                     (u32)0xffff);
4997         for_each_rx_queue(bp, i) {
4998                 struct bnx2x_fastpath *fp = &bp->fp[i];
4999
5000                 REG_WR(bp, BAR_USTRORM_INTMEM +
5001                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5002                        U64_LO(fp->rx_comp_mapping));
5003                 REG_WR(bp, BAR_USTRORM_INTMEM +
5004                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5005                        U64_HI(fp->rx_comp_mapping));
5006
5007                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5008                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5009                          max_agg_size);
5010         }
5011
5012         /* dropless flow control */
5013         if (CHIP_IS_E1H(bp)) {
5014                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5015
5016                 rx_pause.bd_thr_low = 250;
5017                 rx_pause.cqe_thr_low = 250;
5018                 rx_pause.cos = 1;
5019                 rx_pause.sge_thr_low = 0;
5020                 rx_pause.bd_thr_high = 350;
5021                 rx_pause.cqe_thr_high = 350;
5022                 rx_pause.sge_thr_high = 0;
5023
5024                 for_each_rx_queue(bp, i) {
5025                         struct bnx2x_fastpath *fp = &bp->fp[i];
5026
5027                         if (!fp->disable_tpa) {
5028                                 rx_pause.sge_thr_low = 150;
5029                                 rx_pause.sge_thr_high = 250;
5030                         }
5031
5032
5033                         offset = BAR_USTRORM_INTMEM +
5034                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5035                                                                    fp->cl_id);
5036                         for (j = 0;
5037                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5038                              j++)
5039                                 REG_WR(bp, offset + j*4,
5040                                        ((u32 *)&rx_pause)[j]);
5041                 }
5042         }
5043
5044         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5045
5046         /* Init rate shaping and fairness contexts */
5047         if (IS_E1HMF(bp)) {
5048                 int vn;
5049
5050                 /* During init there is no active link
5051                    Until link is up, set link rate to 10Gbps */
5052                 bp->link_vars.line_speed = SPEED_10000;
5053                 bnx2x_init_port_minmax(bp);
5054
5055                 bnx2x_calc_vn_weight_sum(bp);
5056
5057                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5058                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5059
5060                 /* Enable rate shaping and fairness */
5061                 bp->cmng.flags.cmng_enables =
5062                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5063                 if (bp->vn_weight_sum)
5064                         bp->cmng.flags.cmng_enables |=
5065                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5066                 else
5067                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5068                            "  fairness will be disabled\n");
5069         } else {
5070                 /* rate shaping and fairness are disabled */
5071                 DP(NETIF_MSG_IFUP,
5072                    "single function mode  minmax will be disabled\n");
5073         }
5074
5075
5076         /* Store it to internal memory */
5077         if (bp->port.pmf)
5078                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5079                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5080                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5081                                ((u32 *)(&bp->cmng))[i]);
5082 }
5083
5084 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5085 {
5086         switch (load_code) {
5087         case FW_MSG_CODE_DRV_LOAD_COMMON:
5088                 bnx2x_init_internal_common(bp);
5089                 /* no break */
5090
5091         case FW_MSG_CODE_DRV_LOAD_PORT:
5092                 bnx2x_init_internal_port(bp);
5093                 /* no break */
5094
5095         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5096                 bnx2x_init_internal_func(bp);
5097                 break;
5098
5099         default:
5100                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5101                 break;
5102         }
5103 }
5104
5105 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5106 {
5107         int i;
5108
5109         for_each_queue(bp, i) {
5110                 struct bnx2x_fastpath *fp = &bp->fp[i];
5111
5112                 fp->bp = bp;
5113                 fp->state = BNX2X_FP_STATE_CLOSED;
5114                 fp->index = i;
5115                 fp->cl_id = BP_L_ID(bp) + i;
5116                 fp->sb_id = fp->cl_id;
5117                 DP(NETIF_MSG_IFUP,
5118                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
5119                    bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
5120                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5121                               fp->sb_id);
5122                 bnx2x_update_fpsb_idx(fp);
5123         }
5124
5125         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5126                           DEF_SB_ID);
5127         bnx2x_update_dsb_idx(bp);
5128         bnx2x_update_coalesce(bp);
5129         bnx2x_init_rx_rings(bp);
5130         bnx2x_init_tx_ring(bp);
5131         bnx2x_init_sp_ring(bp);
5132         bnx2x_init_context(bp);
5133         bnx2x_init_internal(bp, load_code);
5134         bnx2x_init_ind_table(bp);
5135         bnx2x_stats_init(bp);
5136
5137         /* At this point, we are ready for interrupts */
5138         atomic_set(&bp->intr_sem, 0);
5139
5140         /* flush all before enabling interrupts */
5141         mb();
5142         mmiowb();
5143
5144         bnx2x_int_enable(bp);
5145 }
5146
5147 /* end of nic init */
5148
5149 /*
5150  * gzip service functions
5151  */
5152
5153 static int bnx2x_gunzip_init(struct bnx2x *bp)
5154 {
5155         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5156                                               &bp->gunzip_mapping);
5157         if (bp->gunzip_buf  == NULL)
5158                 goto gunzip_nomem1;
5159
5160         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5161         if (bp->strm  == NULL)
5162                 goto gunzip_nomem2;
5163
5164         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5165                                       GFP_KERNEL);
5166         if (bp->strm->workspace == NULL)
5167                 goto gunzip_nomem3;
5168
5169         return 0;
5170
5171 gunzip_nomem3:
5172         kfree(bp->strm);
5173         bp->strm = NULL;
5174
5175 gunzip_nomem2:
5176         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5177                             bp->gunzip_mapping);
5178         bp->gunzip_buf = NULL;
5179
5180 gunzip_nomem1:
5181         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5182                " un-compression\n", bp->dev->name);
5183         return -ENOMEM;
5184 }
5185
5186 static void bnx2x_gunzip_end(struct bnx2x *bp)
5187 {
5188         kfree(bp->strm->workspace);
5189
5190         kfree(bp->strm);
5191         bp->strm = NULL;
5192
5193         if (bp->gunzip_buf) {
5194                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5195                                     bp->gunzip_mapping);
5196                 bp->gunzip_buf = NULL;
5197         }
5198 }
5199
5200 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5201 {
5202         int n, rc;
5203
5204         /* check gzip header */
5205         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5206                 return -EINVAL;
5207
5208         n = 10;
5209
5210 #define FNAME                           0x8
5211
5212         if (zbuf[3] & FNAME)
5213                 while ((zbuf[n++] != 0) && (n < len));
5214
5215         bp->strm->next_in = zbuf + n;
5216         bp->strm->avail_in = len - n;
5217         bp->strm->next_out = bp->gunzip_buf;
5218         bp->strm->avail_out = FW_BUF_SIZE;
5219
5220         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5221         if (rc != Z_OK)
5222                 return rc;
5223
5224         rc = zlib_inflate(bp->strm, Z_FINISH);
5225         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5226                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5227                        bp->dev->name, bp->strm->msg);
5228
5229         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5230         if (bp->gunzip_outlen & 0x3)
5231                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5232                                     " gunzip_outlen (%d) not aligned\n",
5233                        bp->dev->name, bp->gunzip_outlen);
5234         bp->gunzip_outlen >>= 2;
5235
5236         zlib_inflateEnd(bp->strm);
5237
5238         if (rc == Z_STREAM_END)
5239                 return 0;
5240
5241         return rc;
5242 }
5243
5244 /* nic load/unload */
5245
5246 /*
5247  * General service functions
5248  */
5249
5250 /* send a NIG loopback debug packet */
5251 static void bnx2x_lb_pckt(struct bnx2x *bp)
5252 {
5253         u32 wb_write[3];
5254
5255         /* Ethernet source and destination addresses */
5256         wb_write[0] = 0x55555555;
5257         wb_write[1] = 0x55555555;
5258         wb_write[2] = 0x20;             /* SOP */
5259         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5260
5261         /* NON-IP protocol */
5262         wb_write[0] = 0x09000000;
5263         wb_write[1] = 0x55555555;
5264         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5265         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5266 }
5267
5268 /* some of the internal memories
5269  * are not directly readable from the driver
5270  * to test them we send debug packets
5271  */
5272 static int bnx2x_int_mem_test(struct bnx2x *bp)
5273 {
5274         int factor;
5275         int count, i;
5276         u32 val = 0;
5277
5278         if (CHIP_REV_IS_FPGA(bp))
5279                 factor = 120;
5280         else if (CHIP_REV_IS_EMUL(bp))
5281                 factor = 200;
5282         else
5283                 factor = 1;
5284
5285         DP(NETIF_MSG_HW, "start part1\n");
5286
5287         /* Disable inputs of parser neighbor blocks */
5288         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5289         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5290         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5291         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5292
5293         /*  Write 0 to parser credits for CFC search request */
5294         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5295
5296         /* send Ethernet packet */
5297         bnx2x_lb_pckt(bp);
5298
5299         /* TODO do i reset NIG statistic? */
5300         /* Wait until NIG register shows 1 packet of size 0x10 */
5301         count = 1000 * factor;
5302         while (count) {
5303
5304                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5305                 val = *bnx2x_sp(bp, wb_data[0]);
5306                 if (val == 0x10)
5307                         break;
5308
5309                 msleep(10);
5310                 count--;
5311         }
5312         if (val != 0x10) {
5313                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5314                 return -1;
5315         }
5316
5317         /* Wait until PRS register shows 1 packet */
5318         count = 1000 * factor;
5319         while (count) {
5320                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5321                 if (val == 1)
5322                         break;
5323
5324                 msleep(10);
5325                 count--;
5326         }
5327         if (val != 0x1) {
5328                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5329                 return -2;
5330         }
5331
5332         /* Reset and init BRB, PRS */
5333         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5334         msleep(50);
5335         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5336         msleep(50);
5337         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5338         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5339
5340         DP(NETIF_MSG_HW, "part2\n");
5341
5342         /* Disable inputs of parser neighbor blocks */
5343         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5344         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5345         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5346         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5347
5348         /* Write 0 to parser credits for CFC search request */
5349         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5350
5351         /* send 10 Ethernet packets */
5352         for (i = 0; i < 10; i++)
5353                 bnx2x_lb_pckt(bp);
5354
5355         /* Wait until NIG register shows 10 + 1
5356            packets of size 11*0x10 = 0xb0 */
5357         count = 1000 * factor;
5358         while (count) {
5359
5360                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5361                 val = *bnx2x_sp(bp, wb_data[0]);
5362                 if (val == 0xb0)
5363                         break;
5364
5365                 msleep(10);
5366                 count--;
5367         }
5368         if (val != 0xb0) {
5369                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5370                 return -3;
5371         }
5372
5373         /* Wait until PRS register shows 2 packets */
5374         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5375         if (val != 2)
5376                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5377
5378         /* Write 1 to parser credits for CFC search request */
5379         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5380
5381         /* Wait until PRS register shows 3 packets */
5382         msleep(10 * factor);
5383         /* Wait until NIG register shows 1 packet of size 0x10 */
5384         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5385         if (val != 3)
5386                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5387
5388         /* clear NIG EOP FIFO */
5389         for (i = 0; i < 11; i++)
5390                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5391         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5392         if (val != 1) {
5393                 BNX2X_ERR("clear of NIG failed\n");
5394                 return -4;
5395         }
5396
5397         /* Reset and init BRB, PRS, NIG */
5398         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5399         msleep(50);
5400         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5401         msleep(50);
5402         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5403         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5404 #ifndef BCM_ISCSI
5405         /* set NIC mode */
5406         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5407 #endif
5408
5409         /* Enable inputs of parser neighbor blocks */
5410         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5411         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5412         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5413         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5414
5415         DP(NETIF_MSG_HW, "done\n");
5416
5417         return 0; /* OK */
5418 }
5419
5420 static void enable_blocks_attention(struct bnx2x *bp)
5421 {
5422         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5423         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5424         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5425         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5426         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5427         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5428         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5429         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5430         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5431 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5432 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5433         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5434         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5435         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5436 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5437 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5438         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5439         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5440         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5441         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5442 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5443 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5444         if (CHIP_REV_IS_FPGA(bp))
5445                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5446         else
5447                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5448         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5449         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5450         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5451 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5452 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5453         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5454         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5455 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5456         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5457 }
5458
5459
5460 static void bnx2x_reset_common(struct bnx2x *bp)
5461 {
5462         /* reset_common */
5463         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5464                0xd3ffff7f);
5465         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5466 }
5467
5468 static int bnx2x_init_common(struct bnx2x *bp)
5469 {
5470         u32 val, i;
5471
5472         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5473
5474         bnx2x_reset_common(bp);
5475         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5476         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5477
5478         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5479         if (CHIP_IS_E1H(bp))
5480                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5481
5482         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5483         msleep(30);
5484         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5485
5486         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5487         if (CHIP_IS_E1(bp)) {
5488                 /* enable HW interrupt from PXP on USDM overflow
5489                    bit 16 on INT_MASK_0 */
5490                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5491         }
5492
5493         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5494         bnx2x_init_pxp(bp);
5495
5496 #ifdef __BIG_ENDIAN
5497         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5498         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5499         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5500         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5501         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5502         /* make sure this value is 0 */
5503         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5504
5505 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5506         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5507         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5508         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5509         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5510 #endif
5511
5512         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5513 #ifdef BCM_ISCSI
5514         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5515         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5516         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5517 #endif
5518
5519         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5520                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5521
5522         /* let the HW do it's magic ... */
5523         msleep(100);
5524         /* finish PXP init */
5525         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5526         if (val != 1) {
5527                 BNX2X_ERR("PXP2 CFG failed\n");
5528                 return -EBUSY;
5529         }
5530         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5531         if (val != 1) {
5532                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5533                 return -EBUSY;
5534         }
5535
5536         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5537         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5538
5539         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5540
5541         /* clean the DMAE memory */
5542         bp->dmae_ready = 1;
5543         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5544
5545         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5546         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5547         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5548         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5549
5550         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5551         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5552         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5553         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5554
5555         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5556         /* soft reset pulse */
5557         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5558         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5559
5560 #ifdef BCM_ISCSI
5561         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5562 #endif
5563
5564         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5565         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5566         if (!CHIP_REV_IS_SLOW(bp)) {
5567                 /* enable hw interrupt from doorbell Q */
5568                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5569         }
5570
5571         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5572         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5573         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5574         /* set NIC mode */
5575         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5576         if (CHIP_IS_E1H(bp))
5577                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5578
5579         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5580         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5581         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5582         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5583
5584         if (CHIP_IS_E1H(bp)) {
5585                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5586                                 STORM_INTMEM_SIZE_E1H/2);
5587                 bnx2x_init_fill(bp,
5588                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5589                                 0, STORM_INTMEM_SIZE_E1H/2);
5590                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5591                                 STORM_INTMEM_SIZE_E1H/2);
5592                 bnx2x_init_fill(bp,
5593                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5594                                 0, STORM_INTMEM_SIZE_E1H/2);
5595                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5596                                 STORM_INTMEM_SIZE_E1H/2);
5597                 bnx2x_init_fill(bp,
5598                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5599                                 0, STORM_INTMEM_SIZE_E1H/2);
5600                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5601                                 STORM_INTMEM_SIZE_E1H/2);
5602                 bnx2x_init_fill(bp,
5603                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5604                                 0, STORM_INTMEM_SIZE_E1H/2);
5605         } else { /* E1 */
5606                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5607                                 STORM_INTMEM_SIZE_E1);
5608                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5609                                 STORM_INTMEM_SIZE_E1);
5610                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5611                                 STORM_INTMEM_SIZE_E1);
5612                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5613                                 STORM_INTMEM_SIZE_E1);
5614         }
5615
5616         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5617         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5618         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5619         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5620
5621         /* sync semi rtc */
5622         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5623                0x80000000);
5624         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5625                0x80000000);
5626
5627         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5628         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5629         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5630
5631         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5632         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5633                 REG_WR(bp, i, 0xc0cac01a);
5634                 /* TODO: replace with something meaningful */
5635         }
5636         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5637         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5638
5639         if (sizeof(union cdu_context) != 1024)
5640                 /* we currently assume that a context is 1024 bytes */
5641                 printk(KERN_ALERT PFX "please adjust the size of"
5642                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5643
5644         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5645         val = (4 << 24) + (0 << 12) + 1024;
5646         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5647         if (CHIP_IS_E1(bp)) {
5648                 /* !!! fix pxp client crdit until excel update */
5649                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5650                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5651         }
5652
5653         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5654         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5655         /* enable context validation interrupt from CFC */
5656         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5657
5658         /* set the thresholds to prevent CFC/CDU race */
5659         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5660
5661         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5662         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5663
5664         /* PXPCS COMMON comes here */
5665         /* Reset PCIE errors for debug */
5666         REG_WR(bp, 0x2814, 0xffffffff);
5667         REG_WR(bp, 0x3820, 0xffffffff);
5668
5669         /* EMAC0 COMMON comes here */
5670         /* EMAC1 COMMON comes here */
5671         /* DBU COMMON comes here */
5672         /* DBG COMMON comes here */
5673
5674         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5675         if (CHIP_IS_E1H(bp)) {
5676                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5677                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5678         }
5679
5680         if (CHIP_REV_IS_SLOW(bp))
5681                 msleep(200);
5682
5683         /* finish CFC init */
5684         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5685         if (val != 1) {
5686                 BNX2X_ERR("CFC LL_INIT failed\n");
5687                 return -EBUSY;
5688         }
5689         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5690         if (val != 1) {
5691                 BNX2X_ERR("CFC AC_INIT failed\n");
5692                 return -EBUSY;
5693         }
5694         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5695         if (val != 1) {
5696                 BNX2X_ERR("CFC CAM_INIT failed\n");
5697                 return -EBUSY;
5698         }
5699         REG_WR(bp, CFC_REG_DEBUG0, 0);
5700
5701         /* read NIG statistic
5702            to see if this is our first up since powerup */
5703         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5704         val = *bnx2x_sp(bp, wb_data[0]);
5705
5706         /* do internal memory self test */
5707         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5708                 BNX2X_ERR("internal mem self test failed\n");
5709                 return -EBUSY;
5710         }
5711
5712         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5713         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5714         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5715         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5716                 bp->port.need_hw_lock = 1;
5717                 break;
5718
5719         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5720                 /* Fan failure is indicated by SPIO 5 */
5721                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5722                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5723
5724                 /* set to active low mode */
5725                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5726                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5727                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5728                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5729
5730                 /* enable interrupt to signal the IGU */
5731                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5732                 val |= (1 << MISC_REGISTERS_SPIO_5);
5733                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5734                 break;
5735
5736         default:
5737                 break;
5738         }
5739
5740         /* clear PXP2 attentions */
5741         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5742
5743         enable_blocks_attention(bp);
5744
5745         if (!BP_NOMCP(bp)) {
5746                 bnx2x_acquire_phy_lock(bp);
5747                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5748                &n