272f5d112bb3800f26c1788145fe05af159caeb2
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION      "1.45.17"
63 #define DRV_MODULE_RELDATE      "2008/08/13"
64 #define BNX2X_BC_VER            0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT              (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98
99 enum bnx2x_board_type {
100         BCM57710 = 0,
101         BCM57711 = 1,
102         BCM57711E = 2,
103 };
104
105 /* indexed by board_type, above */
106 static struct {
107         char *name;
108 } board_info[] __devinitdata = {
109         { "Broadcom NetXtreme II BCM57710 XGb" },
110         { "Broadcom NetXtreme II BCM57711 XGb" },
111         { "Broadcom NetXtreme II BCM57711E XGb" }
112 };
113
114
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
122         { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
130
131 /* used only at init
132  * locking is done by mcp
133  */
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 {
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139                                PCICFG_VENDOR_ID_OFFSET);
140 }
141
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143 {
144         u32 val;
145
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150
151         return val;
152 }
153
154 static const u32 dmae_reg_go_c[] = {
155         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159 };
160
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163                             int idx)
164 {
165         u32 cmd_offset;
166         int i;
167
168         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
172                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174         }
175         REG_WR(bp, dmae_reg_go_c[idx], 1);
176 }
177
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179                       u32 len32)
180 {
181         struct dmae_command *dmae = &bp->init_dmae;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int cnt = 200;
184
185         if (!bp->dmae_ready) {
186                 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
189                    "  using indirect\n", dst_addr, len32);
190                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191                 return;
192         }
193
194         mutex_lock(&bp->dmae_mutex);
195
196         memset(dmae, 0, sizeof(struct dmae_command));
197
198         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 #ifdef __BIG_ENDIAN
202                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 #else
204                         DMAE_CMD_ENDIANITY_DW_SWAP |
205 #endif
206                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208         dmae->src_addr_lo = U64_LO(dma_addr);
209         dmae->src_addr_hi = U64_HI(dma_addr);
210         dmae->dst_addr_lo = dst_addr >> 2;
211         dmae->dst_addr_hi = 0;
212         dmae->len = len32;
213         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_val = DMAE_COMP_VAL;
216
217         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
219                     "dst_addr [%x:%08x (%08x)]\n"
220            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
221            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
227
228         *wb_comp = 0;
229
230         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
231
232         udelay(5);
233
234         while (*wb_comp != DMAE_COMP_VAL) {
235                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
237                 if (!cnt) {
238                         BNX2X_ERR("dmae timeout!\n");
239                         break;
240                 }
241                 cnt--;
242                 /* adjust delay for emulation/FPGA */
243                 if (CHIP_REV_IS_SLOW(bp))
244                         msleep(100);
245                 else
246                         udelay(5);
247         }
248
249         mutex_unlock(&bp->dmae_mutex);
250 }
251
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 {
254         struct dmae_command *dmae = &bp->init_dmae;
255         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
256         int cnt = 200;
257
258         if (!bp->dmae_ready) {
259                 u32 *data = bnx2x_sp(bp, wb_data[0]);
260                 int i;
261
262                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
263                    "  using indirect\n", src_addr, len32);
264                 for (i = 0; i < len32; i++)
265                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266                 return;
267         }
268
269         mutex_lock(&bp->dmae_mutex);
270
271         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272         memset(dmae, 0, sizeof(struct dmae_command));
273
274         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 #ifdef __BIG_ENDIAN
278                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 #else
280                         DMAE_CMD_ENDIANITY_DW_SWAP |
281 #endif
282                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284         dmae->src_addr_lo = src_addr >> 2;
285         dmae->src_addr_hi = 0;
286         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288         dmae->len = len32;
289         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_val = DMAE_COMP_VAL;
292
293         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
295                     "dst_addr [%x:%08x (%08x)]\n"
296            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
297            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
300
301         *wb_comp = 0;
302
303         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
304
305         udelay(5);
306
307         while (*wb_comp != DMAE_COMP_VAL) {
308
309                 if (!cnt) {
310                         BNX2X_ERR("dmae timeout!\n");
311                         break;
312                 }
313                 cnt--;
314                 /* adjust delay for emulation/FPGA */
315                 if (CHIP_REV_IS_SLOW(bp))
316                         msleep(100);
317                 else
318                         udelay(5);
319         }
320         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323
324         mutex_unlock(&bp->dmae_mutex);
325 }
326
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329 {
330         u32 wb_write[2];
331
332         wb_write[0] = val_hi;
333         wb_write[1] = val_lo;
334         REG_WR_DMAE(bp, reg, wb_write, 2);
335 }
336
337 #ifdef USE_WB_RD
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339 {
340         u32 wb_data[2];
341
342         REG_RD_DMAE(bp, reg, wb_data, 2);
343
344         return HILO_U64(wb_data[0], wb_data[1]);
345 }
346 #endif
347
348 static int bnx2x_mc_assert(struct bnx2x *bp)
349 {
350         char last_idx;
351         int i, rc = 0;
352         u32 row0, row1, row2, row3;
353
354         /* XSTORM */
355         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
357         if (last_idx)
358                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360         /* print the asserts */
361         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364                               XSTORM_ASSERT_LIST_OFFSET(i));
365                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374                                   " 0x%08x 0x%08x 0x%08x\n",
375                                   i, row3, row2, row1, row0);
376                         rc++;
377                 } else {
378                         break;
379                 }
380         }
381
382         /* TSTORM */
383         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
385         if (last_idx)
386                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388         /* print the asserts */
389         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392                               TSTORM_ASSERT_LIST_OFFSET(i));
393                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402                                   " 0x%08x 0x%08x 0x%08x\n",
403                                   i, row3, row2, row1, row0);
404                         rc++;
405                 } else {
406                         break;
407                 }
408         }
409
410         /* CSTORM */
411         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
413         if (last_idx)
414                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416         /* print the asserts */
417         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420                               CSTORM_ASSERT_LIST_OFFSET(i));
421                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430                                   " 0x%08x 0x%08x 0x%08x\n",
431                                   i, row3, row2, row1, row0);
432                         rc++;
433                 } else {
434                         break;
435                 }
436         }
437
438         /* USTORM */
439         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440                            USTORM_ASSERT_LIST_INDEX_OFFSET);
441         if (last_idx)
442                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444         /* print the asserts */
445         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448                               USTORM_ASSERT_LIST_OFFSET(i));
449                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
451                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
453                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458                                   " 0x%08x 0x%08x 0x%08x\n",
459                                   i, row3, row2, row1, row0);
460                         rc++;
461                 } else {
462                         break;
463                 }
464         }
465
466         return rc;
467 }
468
469 static void bnx2x_fw_dump(struct bnx2x *bp)
470 {
471         u32 mark, offset;
472         u32 data[9];
473         int word;
474
475         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476         mark = ((mark + 0x3) & ~0x3);
477         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478
479         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480                 for (word = 0; word < 8; word++)
481                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482                                                   offset + 4*word));
483                 data[8] = 0x0;
484                 printk(KERN_CONT "%s", (char *)data);
485         }
486         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487                 for (word = 0; word < 8; word++)
488                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489                                                   offset + 4*word));
490                 data[8] = 0x0;
491                 printk(KERN_CONT "%s", (char *)data);
492         }
493         printk("\n" KERN_ERR PFX "end of fw dump\n");
494 }
495
496 static void bnx2x_panic_dump(struct bnx2x *bp)
497 {
498         int i;
499         u16 j, start, end;
500
501         bp->stats_state = STATS_STATE_DISABLED;
502         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
504         BNX2X_ERR("begin crash dump -----------------\n");
505
506         for_each_queue(bp, i) {
507                 struct bnx2x_fastpath *fp = &bp->fp[i];
508                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
511                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
512                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
515                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
516                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
517                           fp->rx_bd_prod, fp->rx_bd_cons,
518                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
521                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
522                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
523                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524                           fp->status_blk->c_status_block.status_block_index,
525                           fp->fp_u_idx,
526                           fp->status_blk->u_status_block.status_block_index,
527                           hw_prods->packets_prod, hw_prods->bds_prod);
528
529                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531                 for (j = start; j < end; j++) {
532                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535                                   sw_bd->skb, sw_bd->first_bd);
536                 }
537
538                 start = TX_BD(fp->tx_bd_cons - 10);
539                 end = TX_BD(fp->tx_bd_cons + 254);
540                 for (j = start; j < end; j++) {
541                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545                 }
546
547                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549                 for (j = start; j < end; j++) {
550                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
554                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
555                 }
556
557                 start = RX_SGE(fp->rx_sge_prod);
558                 end = RX_SGE(fp->last_max_sge);
559                 for (j = start; j < end; j++) {
560                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
564                                   j, rx_sge[1], rx_sge[0], sw_page->page);
565                 }
566
567                 start = RCQ_BD(fp->rx_comp_cons - 10);
568                 end = RCQ_BD(fp->rx_comp_cons + 503);
569                 for (j = start; j < end; j++) {
570                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
574                 }
575         }
576
577         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
578                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
579                   "  spq_prod_idx(%u)\n",
580                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
583         bnx2x_fw_dump(bp);
584         bnx2x_mc_assert(bp);
585         BNX2X_ERR("end crash dump -----------------\n");
586 }
587
588 static void bnx2x_int_enable(struct bnx2x *bp)
589 {
590         int port = BP_PORT(bp);
591         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592         u32 val = REG_RD(bp, addr);
593         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599         } else {
600                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
603                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604
605                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
606                    val, port, addr, msix);
607
608                 REG_WR(bp, addr, val);
609
610                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611         }
612
613         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
614            val, port, addr, msix);
615
616         REG_WR(bp, addr, val);
617
618         if (CHIP_IS_E1H(bp)) {
619                 /* init leading/trailing edge */
620                 if (IS_E1HMF(bp)) {
621                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622                         if (bp->port.pmf)
623                                 /* enable nig attention */
624                                 val |= 0x0100;
625                 } else
626                         val = 0xffff;
627
628                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630         }
631 }
632
633 static void bnx2x_int_disable(struct bnx2x *bp)
634 {
635         int port = BP_PORT(bp);
636         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637         u32 val = REG_RD(bp, addr);
638
639         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645            val, port, addr);
646
647         REG_WR(bp, addr, val);
648         if (REG_RD(bp, addr) != val)
649                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650 }
651
652 static void bnx2x_int_disable_sync(struct bnx2x *bp)
653 {
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int i;
656
657         /* disable interrupt handling */
658         atomic_inc(&bp->intr_sem);
659         /* prevent the HW from sending interrupts */
660         bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_work_sync(&bp->sp_task);
674 }
675
676 /* fast path */
677
678 /*
679  * General service functions
680  */
681
682 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
683                                 u8 storm, u16 index, u8 op, u8 update)
684 {
685         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686                        COMMAND_REG_INT_ACK);
687         struct igu_ack_register igu_ack;
688
689         igu_ack.status_block_index = index;
690         igu_ack.sb_id_and_flags =
691                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
692                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
693                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
694                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
695
696         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
697            (*(u32 *)&igu_ack), hc_addr);
698         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
699 }
700
701 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
702 {
703         struct host_status_block *fpsb = fp->status_blk;
704         u16 rc = 0;
705
706         barrier(); /* status block is written to by the chip */
707         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
708                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
709                 rc |= 1;
710         }
711         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
712                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
713                 rc |= 2;
714         }
715         return rc;
716 }
717
718 static u16 bnx2x_ack_int(struct bnx2x *bp)
719 {
720         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721                        COMMAND_REG_SIMD_MASK);
722         u32 result = REG_RD(bp, hc_addr);
723
724         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
725            result, hc_addr);
726
727         return result;
728 }
729
730
731 /*
732  * fast path service functions
733  */
734
735 /* free skb in the packet ring at pos idx
736  * return idx of last bd freed
737  */
738 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739                              u16 idx)
740 {
741         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
742         struct eth_tx_bd *tx_bd;
743         struct sk_buff *skb = tx_buf->skb;
744         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
745         int nbd;
746
747         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
748            idx, tx_buf, skb);
749
750         /* unmap first bd */
751         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
752         tx_bd = &fp->tx_desc_ring[bd_idx];
753         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
754                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
755
756         nbd = le16_to_cpu(tx_bd->nbd) - 1;
757         new_cons = nbd + tx_buf->first_bd;
758 #ifdef BNX2X_STOP_ON_ERROR
759         if (nbd > (MAX_SKB_FRAGS + 2)) {
760                 BNX2X_ERR("BAD nbd!\n");
761                 bnx2x_panic();
762         }
763 #endif
764
765         /* Skip a parse bd and the TSO split header bd
766            since they have no mapping */
767         if (nbd)
768                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
769
770         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
771                                            ETH_TX_BD_FLAGS_TCP_CSUM |
772                                            ETH_TX_BD_FLAGS_SW_LSO)) {
773                 if (--nbd)
774                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
775                 tx_bd = &fp->tx_desc_ring[bd_idx];
776                 /* is this a TSO split header bd? */
777                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
778                         if (--nbd)
779                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
780                 }
781         }
782
783         /* now free frags */
784         while (nbd > 0) {
785
786                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
787                 tx_bd = &fp->tx_desc_ring[bd_idx];
788                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
789                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
790                 if (--nbd)
791                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792         }
793
794         /* release skb */
795         WARN_ON(!skb);
796         dev_kfree_skb(skb);
797         tx_buf->first_bd = 0;
798         tx_buf->skb = NULL;
799
800         return new_cons;
801 }
802
803 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
804 {
805         s16 used;
806         u16 prod;
807         u16 cons;
808
809         barrier(); /* Tell compiler that prod and cons can change */
810         prod = fp->tx_bd_prod;
811         cons = fp->tx_bd_cons;
812
813         /* NUM_TX_RINGS = number of "next-page" entries
814            It will be used as a threshold */
815         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
816
817 #ifdef BNX2X_STOP_ON_ERROR
818         WARN_ON(used < 0);
819         WARN_ON(used > fp->bp->tx_ring_size);
820         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
821 #endif
822
823         return (s16)(fp->bp->tx_ring_size) - used;
824 }
825
826 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
827 {
828         struct bnx2x *bp = fp->bp;
829         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
830         int done = 0;
831
832 #ifdef BNX2X_STOP_ON_ERROR
833         if (unlikely(bp->panic))
834                 return;
835 #endif
836
837         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
838         sw_cons = fp->tx_pkt_cons;
839
840         while (sw_cons != hw_cons) {
841                 u16 pkt_cons;
842
843                 pkt_cons = TX_BD(sw_cons);
844
845                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
846
847                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
848                    hw_cons, sw_cons, pkt_cons);
849
850 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
851                         rmb();
852                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
853                 }
854 */
855                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
856                 sw_cons++;
857                 done++;
858
859                 if (done == work)
860                         break;
861         }
862
863         fp->tx_pkt_cons = sw_cons;
864         fp->tx_bd_cons = bd_cons;
865
866         /* Need to make the tx_cons update visible to start_xmit()
867          * before checking for netif_queue_stopped().  Without the
868          * memory barrier, there is a small possibility that start_xmit()
869          * will miss it and cause the queue to be stopped forever.
870          */
871         smp_mb();
872
873         /* TBD need a thresh? */
874         if (unlikely(netif_queue_stopped(bp->dev))) {
875
876                 netif_tx_lock(bp->dev);
877
878                 if (netif_queue_stopped(bp->dev) &&
879                     (bp->state == BNX2X_STATE_OPEN) &&
880                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
881                         netif_wake_queue(bp->dev);
882
883                 netif_tx_unlock(bp->dev);
884         }
885 }
886
887
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889                            union eth_rx_cqe *rr_cqe)
890 {
891         struct bnx2x *bp = fp->bp;
892         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
895         DP(BNX2X_MSG_SP,
896            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
897            FP_IDX(fp), cid, command, bp->state,
898            rr_cqe->ramrod_cqe.ramrod_type);
899
900         bp->spq_left++;
901
902         if (FP_IDX(fp)) {
903                 switch (command | fp->state) {
904                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905                                                 BNX2X_FP_STATE_OPENING):
906                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907                            cid);
908                         fp->state = BNX2X_FP_STATE_OPEN;
909                         break;
910
911                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913                            cid);
914                         fp->state = BNX2X_FP_STATE_HALTED;
915                         break;
916
917                 default:
918                         BNX2X_ERR("unexpected MC reply (%d)  "
919                                   "fp->state is %x\n", command, fp->state);
920                         break;
921                 }
922                 mb(); /* force bnx2x_wait_ramrod() to see the change */
923                 return;
924         }
925
926         switch (command | bp->state) {
927         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929                 bp->state = BNX2X_STATE_OPEN;
930                 break;
931
932         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935                 fp->state = BNX2X_FP_STATE_HALTED;
936                 break;
937
938         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941                 break;
942
943
944         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
946                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
947                 bp->set_mac_pending = 0;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
951                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
952                 break;
953
954         default:
955                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
956                           command, bp->state);
957                 break;
958         }
959         mb(); /* force bnx2x_wait_ramrod() to see the change */
960 }
961
962 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
963                                      struct bnx2x_fastpath *fp, u16 index)
964 {
965         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
966         struct page *page = sw_buf->page;
967         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
968
969         /* Skip "next page" elements */
970         if (!page)
971                 return;
972
973         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
974                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
975         __free_pages(page, PAGES_PER_SGE_SHIFT);
976
977         sw_buf->page = NULL;
978         sge->addr_hi = 0;
979         sge->addr_lo = 0;
980 }
981
982 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
983                                            struct bnx2x_fastpath *fp, int last)
984 {
985         int i;
986
987         for (i = 0; i < last; i++)
988                 bnx2x_free_rx_sge(bp, fp, i);
989 }
990
991 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
992                                      struct bnx2x_fastpath *fp, u16 index)
993 {
994         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
995         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
997         dma_addr_t mapping;
998
999         if (unlikely(page == NULL))
1000                 return -ENOMEM;
1001
1002         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1003                                PCI_DMA_FROMDEVICE);
1004         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1005                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006                 return -ENOMEM;
1007         }
1008
1009         sw_buf->page = page;
1010         pci_unmap_addr_set(sw_buf, mapping, mapping);
1011
1012         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1013         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1014
1015         return 0;
1016 }
1017
1018 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1019                                      struct bnx2x_fastpath *fp, u16 index)
1020 {
1021         struct sk_buff *skb;
1022         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1023         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1024         dma_addr_t mapping;
1025
1026         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1027         if (unlikely(skb == NULL))
1028                 return -ENOMEM;
1029
1030         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1031                                  PCI_DMA_FROMDEVICE);
1032         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1033                 dev_kfree_skb(skb);
1034                 return -ENOMEM;
1035         }
1036
1037         rx_buf->skb = skb;
1038         pci_unmap_addr_set(rx_buf, mapping, mapping);
1039
1040         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1041         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1042
1043         return 0;
1044 }
1045
1046 /* note that we are not allocating a new skb,
1047  * we are just moving one from cons to prod
1048  * we are not creating a new mapping,
1049  * so there is no need to check for dma_mapping_error().
1050  */
1051 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1052                                struct sk_buff *skb, u16 cons, u16 prod)
1053 {
1054         struct bnx2x *bp = fp->bp;
1055         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1056         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1057         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1058         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1059
1060         pci_dma_sync_single_for_device(bp->pdev,
1061                                        pci_unmap_addr(cons_rx_buf, mapping),
1062                                        bp->rx_offset + RX_COPY_THRESH,
1063                                        PCI_DMA_FROMDEVICE);
1064
1065         prod_rx_buf->skb = cons_rx_buf->skb;
1066         pci_unmap_addr_set(prod_rx_buf, mapping,
1067                            pci_unmap_addr(cons_rx_buf, mapping));
1068         *prod_bd = *cons_bd;
1069 }
1070
1071 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1072                                              u16 idx)
1073 {
1074         u16 last_max = fp->last_max_sge;
1075
1076         if (SUB_S16(idx, last_max) > 0)
1077                 fp->last_max_sge = idx;
1078 }
1079
1080 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1081 {
1082         int i, j;
1083
1084         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1085                 int idx = RX_SGE_CNT * i - 1;
1086
1087                 for (j = 0; j < 2; j++) {
1088                         SGE_MASK_CLEAR_BIT(fp, idx);
1089                         idx--;
1090                 }
1091         }
1092 }
1093
1094 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1095                                   struct eth_fast_path_rx_cqe *fp_cqe)
1096 {
1097         struct bnx2x *bp = fp->bp;
1098         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1099                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1100                       BCM_PAGE_SHIFT;
1101         u16 last_max, last_elem, first_elem;
1102         u16 delta = 0;
1103         u16 i;
1104
1105         if (!sge_len)
1106                 return;
1107
1108         /* First mark all used pages */
1109         for (i = 0; i < sge_len; i++)
1110                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1111
1112         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1113            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1114
1115         /* Here we assume that the last SGE index is the biggest */
1116         prefetch((void *)(fp->sge_mask));
1117         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1118
1119         last_max = RX_SGE(fp->last_max_sge);
1120         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1121         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1122
1123         /* If ring is not full */
1124         if (last_elem + 1 != first_elem)
1125                 last_elem++;
1126
1127         /* Now update the prod */
1128         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1129                 if (likely(fp->sge_mask[i]))
1130                         break;
1131
1132                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1133                 delta += RX_SGE_MASK_ELEM_SZ;
1134         }
1135
1136         if (delta > 0) {
1137                 fp->rx_sge_prod += delta;
1138                 /* clear page-end entries */
1139                 bnx2x_clear_sge_mask_next_elems(fp);
1140         }
1141
1142         DP(NETIF_MSG_RX_STATUS,
1143            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1144            fp->last_max_sge, fp->rx_sge_prod);
1145 }
1146
1147 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1148 {
1149         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1150         memset(fp->sge_mask, 0xff,
1151                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1152
1153         /* Clear the two last indices in the page to 1:
1154            these are the indices that correspond to the "next" element,
1155            hence will never be indicated and should be removed from
1156            the calculations. */
1157         bnx2x_clear_sge_mask_next_elems(fp);
1158 }
1159
1160 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1161                             struct sk_buff *skb, u16 cons, u16 prod)
1162 {
1163         struct bnx2x *bp = fp->bp;
1164         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1165         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1166         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1167         dma_addr_t mapping;
1168
1169         /* move empty skb from pool to prod and map it */
1170         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1173         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1174
1175         /* move partial skb from cons to pool (don't unmap yet) */
1176         fp->tpa_pool[queue] = *cons_rx_buf;
1177
1178         /* mark bin state as start - print error if current state != stop */
1179         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1180                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1181
1182         fp->tpa_state[queue] = BNX2X_TPA_START;
1183
1184         /* point prod_bd to new skb */
1185         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1186         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1187
1188 #ifdef BNX2X_STOP_ON_ERROR
1189         fp->tpa_queue_used |= (1 << queue);
1190 #ifdef __powerpc64__
1191         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1192 #else
1193         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1194 #endif
1195            fp->tpa_queue_used);
1196 #endif
1197 }
1198
1199 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1200                                struct sk_buff *skb,
1201                                struct eth_fast_path_rx_cqe *fp_cqe,
1202                                u16 cqe_idx)
1203 {
1204         struct sw_rx_page *rx_pg, old_rx_pg;
1205         struct page *sge;
1206         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207         u32 i, frag_len, frag_size, pages;
1208         int err;
1209         int j;
1210
1211         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1213
1214         /* This is needed in order to enable forwarding support */
1215         if (frag_size)
1216                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1217                                                max(frag_size, (u32)len_on_bd));
1218
1219 #ifdef BNX2X_STOP_ON_ERROR
1220         if (pages > 8*PAGES_PER_SGE) {
1221                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1222                           pages, cqe_idx);
1223                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1224                           fp_cqe->pkt_len, len_on_bd);
1225                 bnx2x_panic();
1226                 return -EINVAL;
1227         }
1228 #endif
1229
1230         /* Run through the SGL and compose the fragmented skb */
1231         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1232                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1233
1234                 /* FW gives the indices of the SGE as if the ring is an array
1235                    (meaning that "next" element will consume 2 indices) */
1236                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1237                 rx_pg = &fp->rx_page_ring[sge_idx];
1238                 sge = rx_pg->page;
1239                 old_rx_pg = *rx_pg;
1240
1241                 /* If we fail to allocate a substitute page, we simply stop
1242                    where we are and drop the whole packet */
1243                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244                 if (unlikely(err)) {
1245                         bp->eth_stats.rx_skb_alloc_failed++;
1246                         return err;
1247                 }
1248
1249                 /* Unmap the page as we r going to pass it to the stack */
1250                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253                 /* Add one frag and update the appropriate fields in the skb */
1254                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256                 skb->data_len += frag_len;
1257                 skb->truesize += frag_len;
1258                 skb->len += frag_len;
1259
1260                 frag_size -= frag_len;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268                            u16 cqe_idx)
1269 {
1270         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271         struct sk_buff *skb = rx_buf->skb;
1272         /* alloc new skb */
1273         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275         /* Unmap skb in the pool anyway, as we are going to change
1276            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277            fails. */
1278         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1280
1281         if (likely(new_skb)) {
1282                 /* fix ip xsum and give it to the stack */
1283                 /* (no need to map the new skb) */
1284
1285                 prefetch(skb);
1286                 prefetch(((char *)(skb)) + 128);
1287
1288 #ifdef BNX2X_STOP_ON_ERROR
1289                 if (pad + len > bp->rx_buf_size) {
1290                         BNX2X_ERR("skb_put is about to fail...  "
1291                                   "pad %d  len %d  rx_buf_size %d\n",
1292                                   pad, len, bp->rx_buf_size);
1293                         bnx2x_panic();
1294                         return;
1295                 }
1296 #endif
1297
1298                 skb_reserve(skb, pad);
1299                 skb_put(skb, len);
1300
1301                 skb->protocol = eth_type_trans(skb, bp->dev);
1302                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1303
1304                 {
1305                         struct iphdr *iph;
1306
1307                         iph = (struct iphdr *)skb->data;
1308                         iph->check = 0;
1309                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1310                 }
1311
1312                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1313                                          &cqe->fast_path_cqe, cqe_idx)) {
1314 #ifdef BCM_VLAN
1315                         if ((bp->vlgrp != NULL) &&
1316                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1317                              PARSING_FLAGS_VLAN))
1318                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1319                                                 le16_to_cpu(cqe->fast_path_cqe.
1320                                                             vlan_tag));
1321                         else
1322 #endif
1323                                 netif_receive_skb(skb);
1324                 } else {
1325                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1326                            " - dropping packet!\n");
1327                         dev_kfree_skb(skb);
1328                 }
1329
1330                 bp->dev->last_rx = jiffies;
1331
1332                 /* put new skb in bin */
1333                 fp->tpa_pool[queue].skb = new_skb;
1334
1335         } else {
1336                 /* else drop the packet and keep the buffer in the bin */
1337                 DP(NETIF_MSG_RX_STATUS,
1338                    "Failed to allocate new skb - dropping packet!\n");
1339                 bp->eth_stats.rx_skb_alloc_failed++;
1340         }
1341
1342         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343 }
1344
1345 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346                                         struct bnx2x_fastpath *fp,
1347                                         u16 bd_prod, u16 rx_comp_prod,
1348                                         u16 rx_sge_prod)
1349 {
1350         struct tstorm_eth_rx_producers rx_prods = {0};
1351         int i;
1352
1353         /* Update producers */
1354         rx_prods.bd_prod = bd_prod;
1355         rx_prods.cqe_prod = rx_comp_prod;
1356         rx_prods.sge_prod = rx_sge_prod;
1357
1358         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361                        ((u32 *)&rx_prods)[i]);
1362
1363         DP(NETIF_MSG_RX_STATUS,
1364            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1365            bd_prod, rx_comp_prod, rx_sge_prod);
1366 }
1367
1368 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369 {
1370         struct bnx2x *bp = fp->bp;
1371         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1372         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373         int rx_pkt = 0;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 u16 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_use_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559                 bp->dev->last_rx = jiffies;
1560
1561 next_rx:
1562                 rx_buf->skb = NULL;
1563
1564                 bd_cons = NEXT_RX_IDX(bd_cons);
1565                 bd_prod = NEXT_RX_IDX(bd_prod);
1566                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567                 rx_pkt++;
1568 next_cqe:
1569                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1571
1572                 if (rx_pkt == budget)
1573                         break;
1574         } /* while */
1575
1576         fp->rx_bd_cons = bd_cons;
1577         fp->rx_bd_prod = bd_prod_fw;
1578         fp->rx_comp_cons = sw_comp_cons;
1579         fp->rx_comp_prod = sw_comp_prod;
1580
1581         /* Update producers */
1582         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583                              fp->rx_sge_prod);
1584         mmiowb(); /* keep prod updates ordered */
1585
1586         fp->rx_pkt += rx_pkt;
1587         fp->rx_calls++;
1588
1589         return rx_pkt;
1590 }
1591
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593 {
1594         struct bnx2x_fastpath *fp = fp_cookie;
1595         struct bnx2x *bp = fp->bp;
1596         struct net_device *dev = bp->dev;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638         /* Return here if interrupt is disabled */
1639         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641                 return IRQ_HANDLED;
1642         }
1643
1644 #ifdef BNX2X_STOP_ON_ERROR
1645         if (unlikely(bp->panic))
1646                 return IRQ_HANDLED;
1647 #endif
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 schedule_work(&bp->sp_task);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 1 second every 5ms */
1721         for (cnt = 0; cnt < 200; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 /* It is recommended to turn off RX FC for jumbo frames
1950                    for better performance */
1951                 if (IS_E1HMF(bp))
1952                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953                 else if (bp->dev->mtu > 5000)
1954                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955                 else
1956                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1957
1958                 bnx2x_acquire_phy_lock(bp);
1959                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960                 bnx2x_release_phy_lock(bp);
1961
1962                 if (bp->link_vars.link_up)
1963                         bnx2x_link_report(bp);
1964
1965                 bnx2x_calc_fc_adv(bp);
1966
1967                 return rc;
1968         }
1969         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970         return -EINVAL;
1971 }
1972
1973 static void bnx2x_link_set(struct bnx2x *bp)
1974 {
1975         if (!BP_NOMCP(bp)) {
1976                 bnx2x_acquire_phy_lock(bp);
1977                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978                 bnx2x_release_phy_lock(bp);
1979
1980                 bnx2x_calc_fc_adv(bp);
1981         } else
1982                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1983 }
1984
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1986 {
1987         if (!BP_NOMCP(bp)) {
1988                 bnx2x_acquire_phy_lock(bp);
1989                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990                 bnx2x_release_phy_lock(bp);
1991         } else
1992                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1993 }
1994
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1996 {
1997         u8 rc;
1998
1999         bnx2x_acquire_phy_lock(bp);
2000         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001         bnx2x_release_phy_lock(bp);
2002
2003         return rc;
2004 }
2005
2006 /* Calculates the sum of vn_min_rates.
2007    It's needed for further normalizing of the min_rates.
2008
2009    Returns:
2010      sum of vn_min_rates
2011        or
2012      0 - if all the min_rates are 0.
2013      In the later case fairness algorithm should be deactivated.
2014      If not all min_rates are zero then those that are zeroes will
2015      be set to 1.
2016  */
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018 {
2019         int i, port = BP_PORT(bp);
2020         u32 wsum = 0;
2021         int all_zero = 1;
2022
2023         for (i = 0; i < E1HVN_MAX; i++) {
2024                 u32 vn_cfg =
2025                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029                         /* If min rate is zero - set it to 1 */
2030                         if (!vn_min_rate)
2031                                 vn_min_rate = DEF_MIN_RATE;
2032                         else
2033                                 all_zero = 0;
2034
2035                         wsum += vn_min_rate;
2036                 }
2037         }
2038
2039         /* ... only if all min rates are zeros - disable FAIRNESS */
2040         if (all_zero)
2041                 return 0;
2042
2043         return wsum;
2044 }
2045
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047                                    int en_fness,
2048                                    u16 port_rate,
2049                                    struct cmng_struct_per_port *m_cmng_port)
2050 {
2051         u32 r_param = port_rate / 8;
2052         int port = BP_PORT(bp);
2053         int i;
2054
2055         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057         /* Enable minmax only if we are in e1hmf mode */
2058         if (IS_E1HMF(bp)) {
2059                 u32 fair_periodic_timeout_usec;
2060                 u32 t_fair;
2061
2062                 /* Enable rate shaping and fairness */
2063                 m_cmng_port->flags.cmng_vn_enable = 1;
2064                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065                 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067                 if (!en_fness)
2068                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069                            "  fairness will be disabled\n");
2070
2071                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072                 m_cmng_port->rs_vars.rs_periodic_timeout =
2073                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075                 /* this is the threshold below which no timer arming will occur
2076                    1.25 coefficient is for the threshold to be a little bigger
2077                    than the real time, to compensate for timer in-accuracy */
2078                 m_cmng_port->rs_vars.rs_threshold =
2079                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081                 /* resolution of fairness timer */
2082                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084                 t_fair = T_FAIR_COEF / port_rate;
2085
2086                 /* this is the threshold below which we won't arm
2087                    the timer anymore */
2088                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090                 /* we multiply by 1e3/8 to get bytes/msec.
2091                    We don't want the credits to pass a credit
2092                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093                 m_cmng_port->fair_vars.upper_bound =
2094                                                 r_param * t_fair * FAIR_MEM;
2095                 /* since each tick is 4 usec */
2096                 m_cmng_port->fair_vars.fairness_timeout =
2097                                                 fair_periodic_timeout_usec / 4;
2098
2099         } else {
2100                 /* Disable rate shaping and fairness */
2101                 m_cmng_port->flags.cmng_vn_enable = 0;
2102                 m_cmng_port->flags.fairness_enable = 0;
2103                 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105                 DP(NETIF_MSG_IFUP,
2106                    "Single function mode  minmax will be disabled\n");
2107         }
2108
2109         /* Store it to internal memory */
2110         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113                        ((u32 *)(m_cmng_port))[i]);
2114 }
2115
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117                                    u32 wsum, u16 port_rate,
2118                                  struct cmng_struct_per_port *m_cmng_port)
2119 {
2120         struct rate_shaping_vars_per_vn m_rs_vn;
2121         struct fairness_vars_per_vn m_fair_vn;
2122         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123         u16 vn_min_rate, vn_max_rate;
2124         int i;
2125
2126         /* If function is hidden - set min and max to zeroes */
2127         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128                 vn_min_rate = 0;
2129                 vn_max_rate = 0;
2130
2131         } else {
2132                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135                    if current min rate is zero - set it to 1.
2136                    This is a requirement of the algorithm. */
2137                 if ((vn_min_rate == 0) && wsum)
2138                         vn_min_rate = DEF_MIN_RATE;
2139                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141         }
2142
2143         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2144            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149         /* global vn counter - maximal Mbps for this vn */
2150         m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152         /* quota - number of bytes transmitted in this period */
2153         m_rs_vn.vn_counter.quota =
2154                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156 #ifdef BNX2X_PER_PROT_QOS
2157         /* per protocol counter */
2158         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159                 /* maximal Mbps for this protocol */
2160                 m_rs_vn.protocol_counters[protocol].rate =
2161                                                 protocol_max_rate[protocol];
2162                 /* the quota in each timer period -
2163                    number of bytes transmitted in this period */
2164                 m_rs_vn.protocol_counters[protocol].quota =
2165                         (u32)(rs_periodic_timeout_usec *
2166                           ((double)m_rs_vn.
2167                                    protocol_counters[protocol].rate/8));
2168         }
2169 #endif
2170
2171         if (wsum) {
2172                 /* credit for each period of the fairness algorithm:
2173                    number of bytes in T_FAIR (the vn share the port rate).
2174                    wsum should not be larger than 10000, thus
2175                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176                 m_fair_vn.vn_credit_delta =
2177                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180                    m_fair_vn.vn_credit_delta);
2181         }
2182
2183 #ifdef BNX2X_PER_PROT_QOS
2184         do {
2185                 u32 protocolWeightSum = 0;
2186
2187                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188                         protocolWeightSum +=
2189                                         drvInit.protocol_min_rate[protocol];
2190                 /* per protocol counter -
2191                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192                 if (protocolWeightSum > 0) {
2193                         for (protocol = 0;
2194                              protocol < NUM_OF_PROTOCOLS; protocol++)
2195                                 /* credit for each period of the
2196                                    fairness algorithm - number of bytes in
2197                                    T_FAIR (the protocol share the vn rate) */
2198                                 m_fair_vn.protocol_credit_delta[protocol] =
2199                                         (u32)((vn_min_rate / 8) * t_fair *
2200                                         protocol_min_rate / protocolWeightSum);
2201                 }
2202         } while (0);
2203 #endif
2204
2205         /* Store it to internal memory */
2206         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209                        ((u32 *)(&m_rs_vn))[i]);
2210
2211         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214                        ((u32 *)(&m_fair_vn))[i]);
2215 }
2216
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2219 {
2220         int vn;
2221
2222         /* Make sure that we are synced with the current statistics */
2223         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
2225         bnx2x_acquire_phy_lock(bp);
2226         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227         bnx2x_release_phy_lock(bp);
2228
2229         if (bp->link_vars.link_up) {
2230
2231                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232                         struct host_port_stats *pstats;
2233
2234                         pstats = bnx2x_sp(bp, port_stats);
2235                         /* reset old bmac stats */
2236                         memset(&(pstats->mac_stx[0]), 0,
2237                                sizeof(struct mac_stx));
2238                 }
2239                 if ((bp->state == BNX2X_STATE_OPEN) ||
2240                     (bp->state == BNX2X_STATE_DISABLED))
2241                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242         }
2243
2244         /* indicate link status */
2245         bnx2x_link_report(bp);
2246
2247         if (IS_E1HMF(bp)) {
2248                 int func;
2249
2250                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251                         if (vn == BP_E1HVN(bp))
2252                                 continue;
2253
2254                         func = ((vn << 1) | BP_PORT(bp));
2255
2256                         /* Set the attention towards other drivers
2257                            on the same port */
2258                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260                 }
2261         }
2262
2263         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264                 struct cmng_struct_per_port m_cmng_port;
2265                 u32 wsum;
2266                 int port = BP_PORT(bp);
2267
2268                 /* Init RATE SHAPING and FAIRNESS contexts */
2269                 wsum = bnx2x_calc_vn_wsum(bp);
2270                 bnx2x_init_port_minmax(bp, (int)wsum,
2271                                         bp->link_vars.line_speed,
2272                                         &m_cmng_port);
2273                 if (IS_E1HMF(bp))
2274                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276                                         wsum, bp->link_vars.line_speed,
2277                                                      &m_cmng_port);
2278         }
2279 }
2280
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2282 {
2283         if (bp->state != BNX2X_STATE_OPEN)
2284                 return;
2285
2286         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2287
2288         if (bp->link_vars.link_up)
2289                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290         else
2291                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
2293         /* indicate link status */
2294         bnx2x_link_report(bp);
2295 }
2296
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2298 {
2299         int port = BP_PORT(bp);
2300         u32 val;
2301
2302         bp->port.pmf = 1;
2303         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305         /* enable nig attention */
2306         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2309
2310         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 }
2312
2313 /* end of Link */
2314
2315 /* slow path */
2316
2317 /*
2318  * General service functions
2319  */
2320
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323                          u32 data_hi, u32 data_lo, int common)
2324 {
2325         int func = BP_FUNC(bp);
2326
2327         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2329            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333 #ifdef BNX2X_STOP_ON_ERROR
2334         if (unlikely(bp->panic))
2335                 return -EIO;
2336 #endif
2337
2338         spin_lock_bh(&bp->spq_lock);
2339
2340         if (!bp->spq_left) {
2341                 BNX2X_ERR("BUG! SPQ ring full!\n");
2342                 spin_unlock_bh(&bp->spq_lock);
2343                 bnx2x_panic();
2344                 return -EBUSY;
2345         }
2346
2347         /* CID needs port number to be encoded int it */
2348         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350                                      HW_CID(bp, cid)));
2351         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352         if (common)
2353                 bp->spq_prod_bd->hdr.type |=
2354                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359         bp->spq_left--;
2360
2361         if (bp->spq_prod_bd == bp->spq_last_bd) {
2362                 bp->spq_prod_bd = bp->spq;
2363                 bp->spq_prod_idx = 0;
2364                 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366         } else {
2367                 bp->spq_prod_bd++;
2368                 bp->spq_prod_idx++;
2369         }
2370
2371         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2372                bp->spq_prod_idx);
2373
2374         spin_unlock_bh(&bp->spq_lock);
2375         return 0;
2376 }
2377
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2380 {
2381         u32 i, j, val;
2382         int rc = 0;
2383
2384         might_sleep();
2385         i = 100;
2386         for (j = 0; j < i*10; j++) {
2387                 val = (1UL << 31);
2388                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390                 if (val & (1L << 31))
2391                         break;
2392
2393                 msleep(5);
2394         }
2395         if (!(val & (1L << 31))) {
2396                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2397                 rc = -EBUSY;
2398         }
2399
2400         return rc;
2401 }
2402
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2405 {
2406         u32 val = 0;
2407
2408         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 }
2410
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412 {
2413         struct host_def_status_block *def_sb = bp->def_status_blk;
2414         u16 rc = 0;
2415
2416         barrier(); /* status block is written to by the chip */
2417         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419                 rc |= 1;
2420         }
2421         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423                 rc |= 2;
2424         }
2425         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427                 rc |= 4;
2428         }
2429         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431                 rc |= 8;
2432         }
2433         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435                 rc |= 16;
2436         }
2437         return rc;
2438 }
2439
2440 /*
2441  * slow path service functions
2442  */
2443
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445 {
2446         int port = BP_PORT(bp);
2447         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448                        COMMAND_REG_ATTN_BITS_SET);
2449         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452                                        NIG_REG_MASK_INTERRUPT_PORT0;
2453         u32 aeu_mask;
2454
2455         if (bp->attn_state & asserted)
2456                 BNX2X_ERR("IGU ERROR\n");
2457
2458         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459         aeu_mask = REG_RD(bp, aeu_addr);
2460
2461         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2462            aeu_mask, asserted);
2463         aeu_mask &= ~(asserted & 0xff);
2464         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2465
2466         REG_WR(bp, aeu_addr, aeu_mask);
2467         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2468
2469         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470         bp->attn_state |= asserted;
2471         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2472
2473         if (asserted & ATTN_HARD_WIRED_MASK) {
2474                 if (asserted & ATTN_NIG_FOR_FUNC) {
2475
2476                         /* save nig interrupt mask */
2477                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478                         REG_WR(bp, nig_int_mask_addr, 0);
2479
2480                         bnx2x_link_attn(bp);
2481
2482                         /* handle unicore attn? */
2483                 }
2484                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487                 if (asserted & GPIO_2_FUNC)
2488                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490                 if (asserted & GPIO_3_FUNC)
2491                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493                 if (asserted & GPIO_4_FUNC)
2494                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496                 if (port == 0) {
2497                         if (asserted & ATTN_GENERAL_ATTN_1) {
2498                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500                         }
2501                         if (asserted & ATTN_GENERAL_ATTN_2) {
2502                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504                         }
2505                         if (asserted & ATTN_GENERAL_ATTN_3) {
2506                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508                         }
2509                 } else {
2510                         if (asserted & ATTN_GENERAL_ATTN_4) {
2511                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513                         }
2514                         if (asserted & ATTN_GENERAL_ATTN_5) {
2515                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517                         }
2518                         if (asserted & ATTN_GENERAL_ATTN_6) {
2519                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521                         }
2522                 }
2523
2524         } /* if hardwired */
2525
2526         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527            asserted, hc_addr);
2528         REG_WR(bp, hc_addr, asserted);
2529
2530         /* now set back the mask */
2531         if (asserted & ATTN_NIG_FOR_FUNC)
2532                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2533 }
2534
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2536 {
2537         int port = BP_PORT(bp);
2538         int reg_offset;
2539         u32 val;
2540
2541         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2543
2544         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2545
2546                 val = REG_RD(bp, reg_offset);
2547                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548                 REG_WR(bp, reg_offset, val);
2549
2550                 BNX2X_ERR("SPIO5 hw attention\n");
2551
2552                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554                         /* Fan failure attention */
2555
2556                         /* The PHY reset is controlled by GPIO 1 */
2557                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2559                         /* Low power mode is controlled by GPIO 2 */
2560                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2562                         /* mark the failure */
2563                         bp->link_params.ext_phy_config &=
2564                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2565                         bp->link_params.ext_phy_config |=
2566                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567                         SHMEM_WR(bp,
2568                                  dev_info.port_hw_config[port].
2569                                                         external_phy_config,
2570                                  bp->link_params.ext_phy_config);
2571                         /* log the failure */
2572                         printk(KERN_ERR PFX "Fan Failure on Network"
2573                                " Controller %s has caused the driver to"
2574                                " shutdown the card to prevent permanent"
2575                                " damage.  Please contact Dell Support for"
2576                                " assistance\n", bp->dev->name);
2577                         break;
2578
2579                 default:
2580                         break;
2581                 }
2582         }
2583
2584         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586                 val = REG_RD(bp, reg_offset);
2587                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588                 REG_WR(bp, reg_offset, val);
2589
2590                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591                           (attn & HW_INTERRUT_ASSERT_SET_0));
2592                 bnx2x_panic();
2593         }
2594 }
2595
2596 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597 {
2598         u32 val;
2599
2600         if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604                 /* DORQ discard attention */
2605                 if (val & 0x2)
2606                         BNX2X_ERR("FATAL error from DORQ\n");
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611                 int port = BP_PORT(bp);
2612                 int reg_offset;
2613
2614                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617                 val = REG_RD(bp, reg_offset);
2618                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619                 REG_WR(bp, reg_offset, val);
2620
2621                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622                           (attn & HW_INTERRUT_ASSERT_SET_1));
2623                 bnx2x_panic();
2624         }
2625 }
2626
2627 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628 {
2629         u32 val;
2630
2631         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635                 /* CFC error attention */
2636                 if (val & 0x2)
2637                         BNX2X_ERR("FATAL error from CFC\n");
2638         }
2639
2640         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644                 /* RQ_USDMDP_FIFO_OVERFLOW */
2645                 if (val & 0x18000)
2646                         BNX2X_ERR("FATAL error from PXP\n");
2647         }
2648
2649         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651                 int port = BP_PORT(bp);
2652                 int reg_offset;
2653
2654                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657                 val = REG_RD(bp, reg_offset);
2658                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659                 REG_WR(bp, reg_offset, val);
2660
2661                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662                           (attn & HW_INTERRUT_ASSERT_SET_2));
2663                 bnx2x_panic();
2664         }
2665 }
2666
2667 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668 {
2669         u32 val;
2670
2671         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
2673                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674                         int func = BP_FUNC(bp);
2675
2676                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677                         bnx2x__link_status_update(bp);
2678                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679                                                         DRV_STATUS_PMF)
2680                                 bnx2x_pmf_update(bp);
2681
2682                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2683
2684                         BNX2X_ERR("MC assert!\n");
2685                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689                         bnx2x_panic();
2690
2691                 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693                         BNX2X_ERR("MCP assert!\n");
2694                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2695                         bnx2x_fw_dump(bp);
2696
2697                 } else
2698                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699         }
2700
2701         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2702                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703                 if (attn & BNX2X_GRC_TIMEOUT) {
2704                         val = CHIP_IS_E1H(bp) ?
2705                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707                 }
2708                 if (attn & BNX2X_GRC_RSV) {
2709                         val = CHIP_IS_E1H(bp) ?
2710                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712                 }
2713                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2714         }
2715 }
2716
2717 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718 {
2719         struct attn_route attn;
2720         struct attn_route group_mask;
2721         int port = BP_PORT(bp);
2722         int index;
2723         u32 reg_addr;
2724         u32 val;
2725         u32 aeu_mask;
2726
2727         /* need to take HW lock because MCP or other port might also
2728            try to handle this event */
2729         bnx2x_acquire_alr(bp);
2730
2731         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2735         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2737
2738         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739                 if (deasserted & (1 << index)) {
2740                         group_mask = bp->attn_group[index];
2741
2742                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743                            index, group_mask.sig[0], group_mask.sig[1],
2744                            group_mask.sig[2], group_mask.sig[3]);
2745
2746                         bnx2x_attn_int_deasserted3(bp,
2747                                         attn.sig[3] & group_mask.sig[3]);
2748                         bnx2x_attn_int_deasserted1(bp,
2749                                         attn.sig[1] & group_mask.sig[1]);
2750                         bnx2x_attn_int_deasserted2(bp,
2751                                         attn.sig[2] & group_mask.sig[2]);
2752                         bnx2x_attn_int_deasserted0(bp,
2753                                         attn.sig[0] & group_mask.sig[0]);
2754
2755                         if ((attn.sig[0] & group_mask.sig[0] &
2756                                                 HW_PRTY_ASSERT_SET_0) ||
2757                             (attn.sig[1] & group_mask.sig[1] &
2758                                                 HW_PRTY_ASSERT_SET_1) ||
2759                             (attn.sig[2] & group_mask.sig[2] &
2760                                                 HW_PRTY_ASSERT_SET_2))
2761                                 BNX2X_ERR("FATAL HW block parity attention\n");
2762                 }
2763         }
2764
2765         bnx2x_release_alr(bp);
2766
2767         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2768
2769         val = ~deasserted;
2770         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771            val, reg_addr);
2772         REG_WR(bp, reg_addr, val);
2773
2774         if (~bp->attn_state & deasserted)
2775                 BNX2X_ERR("IGU ERROR\n");
2776
2777         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
2780         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781         aeu_mask = REG_RD(bp, reg_addr);
2782
2783         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2784            aeu_mask, deasserted);
2785         aeu_mask |= (deasserted & 0xff);
2786         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2787
2788         REG_WR(bp, reg_addr, aeu_mask);
2789         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2790
2791         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792         bp->attn_state &= ~deasserted;
2793         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794 }
2795
2796 static void bnx2x_attn_int(struct bnx2x *bp)
2797 {
2798         /* read local copy of bits */
2799         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801         u32 attn_state = bp->attn_state;
2802
2803         /* look for changed bits */
2804         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2805         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2806
2807         DP(NETIF_MSG_HW,
2808            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2809            attn_bits, attn_ack, asserted, deasserted);
2810
2811         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2812                 BNX2X_ERR("BAD attention state\n");
2813
2814         /* handle bits that were raised */
2815         if (asserted)
2816                 bnx2x_attn_int_asserted(bp, asserted);
2817
2818         if (deasserted)
2819                 bnx2x_attn_int_deasserted(bp, deasserted);
2820 }
2821
2822 static void bnx2x_sp_task(struct work_struct *work)
2823 {
2824         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825         u16 status;
2826
2827
2828         /* Return here if interrupt is disabled */
2829         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2830                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2831                 return;
2832         }
2833
2834         status = bnx2x_update_dsb_idx(bp);
2835 /*      if (status == 0)                                     */
2836 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2837
2838         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2839
2840         /* HW attentions */
2841         if (status & 0x1)
2842                 bnx2x_attn_int(bp);
2843
2844         /* CStorm events: query_stats, port delete ramrod */
2845         if (status & 0x2)
2846                 bp->stats_pending = 0;
2847
2848         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849                      IGU_INT_NOP, 1);
2850         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851                      IGU_INT_NOP, 1);
2852         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853                      IGU_INT_NOP, 1);
2854         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855                      IGU_INT_NOP, 1);
2856         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857                      IGU_INT_ENABLE, 1);
2858
2859 }
2860
2861 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862 {
2863         struct net_device *dev = dev_instance;
2864         struct bnx2x *bp = netdev_priv(dev);
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869                 return IRQ_HANDLED;
2870         }
2871
2872         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2873
2874 #ifdef BNX2X_STOP_ON_ERROR
2875         if (unlikely(bp->panic))
2876                 return IRQ_HANDLED;
2877 #endif
2878
2879         schedule_work(&bp->sp_task);
2880
2881         return IRQ_HANDLED;
2882 }
2883
2884 /* end of slow path */
2885
2886 /* Statistics */
2887
2888 /****************************************************************************
2889 * Macros
2890 ****************************************************************************/
2891
2892 /* sum[hi:lo] += add[hi:lo] */
2893 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894         do { \
2895                 s_lo += a_lo; \
2896                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897         } while (0)
2898
2899 /* difference = minuend - subtrahend */
2900 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901         do { \
2902                 if (m_lo < s_lo) { \
2903                         /* underflow */ \
2904                         d_hi = m_hi - s_hi; \
2905                         if (d_hi > 0) { \
2906                                 /* we can 'loan' 1 */ \
2907                                 d_hi--; \
2908                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2909                         } else { \
2910                                 /* m_hi <= s_hi */ \
2911                                 d_hi = 0; \
2912                                 d_lo = 0; \
2913                         } \
2914                 } else { \
2915                         /* m_lo >= s_lo */ \
2916                         if (m_hi < s_hi) { \
2917                                 d_hi = 0; \
2918                                 d_lo = 0; \
2919                         } else { \
2920                                 /* m_hi >= s_hi */ \
2921                                 d_hi = m_hi - s_hi; \
2922                                 d_lo = m_lo - s_lo; \
2923                         } \
2924                 } \
2925         } while (0)
2926
2927 #define UPDATE_STAT64(s, t) \
2928         do { \
2929                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934                        pstats->mac_stx[1].t##_lo, diff.lo); \
2935         } while (0)
2936
2937 #define UPDATE_STAT64_NIG(s, t) \
2938         do { \
2939                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940                         diff.lo, new->s##_lo, old->s##_lo); \
2941                 ADD_64(estats->t##_hi, diff.hi, \
2942                        estats->t##_lo, diff.lo); \
2943         } while (0)
2944
2945 /* sum[hi:lo] += add */
2946 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2947         do { \
2948                 s_lo += a; \
2949                 s_hi += (s_lo < a) ? 1 : 0; \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_STAT(s) \
2953         do { \
2954                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955                               pstats->mac_stx[1].s##_lo, \
2956                               new->s); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_TSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962                 old_tclient->s = le32_to_cpu(tclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 #define UPDATE_EXTEND_XSTAT(s, t) \
2967         do { \
2968                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969                 old_xclient->s = le32_to_cpu(xclient->s); \
2970                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2971         } while (0)
2972
2973 /*
2974  * General service functions
2975  */
2976
2977 static inline long bnx2x_hilo(u32 *hiref)
2978 {
2979         u32 lo = *(hiref + 1);
2980 #if (BITS_PER_LONG == 64)
2981         u32 hi = *hiref;
2982
2983         return HILO_U64(hi, lo);
2984 #else
2985         return lo;
2986 #endif
2987 }
2988
2989 /*
2990  * Init service functions
2991  */
2992
2993 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994 {
2995         if (!bp->stats_pending) {
2996                 struct eth_query_ramrod_data ramrod_data = {0};
2997                 int rc;
2998
2999                 ramrod_data.drv_counter = bp->stats_counter++;
3000                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004                                    ((u32 *)&ramrod_data)[1],
3005                                    ((u32 *)&ramrod_data)[0], 0);
3006                 if (rc == 0) {
3007                         /* stats ramrod has it's own slot on the spq */
3008                         bp->spq_left++;
3009                         bp->stats_pending = 1;
3010                 }
3011         }
3012 }
3013
3014 static void bnx2x_stats_init(struct bnx2x *bp)
3015 {
3016         int port = BP_PORT(bp);
3017
3018         bp->executer_idx = 0;
3019         bp->stats_counter = 0;
3020
3021         /* port stats */
3022         if (!BP_NOMCP(bp))
3023                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024         else
3025                 bp->port.port_stx = 0;
3026         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029         bp->port.old_nig_stats.brb_discard =
3030                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3031         bp->port.old_nig_stats.brb_truncate =
3032                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3033         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038         /* function stats */
3039         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044         bp->stats_state = STATS_STATE_DISABLED;
3045         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047 }
3048
3049 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050 {
3051         struct dmae_command *dmae = &bp->stats_dmae;
3052         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054         *stats_comp = DMAE_COMP_VAL;
3055
3056         /* loader */
3057         if (bp->executer_idx) {
3058                 int loader_idx = PMF_DMAE_C(bp);
3059
3060                 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064                                 DMAE_CMD_DST_RESET |
3065 #ifdef __BIG_ENDIAN
3066                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067 #else
3068                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3069 #endif
3070                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071                                                DMAE_CMD_PORT_0) |
3072                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076                                      sizeof(struct dmae_command) *
3077                                      (loader_idx + 1)) >> 2;
3078                 dmae->dst_addr_hi = 0;
3079                 dmae->len = sizeof(struct dmae_command) >> 2;
3080                 if (CHIP_IS_E1(bp))
3081                         dmae->len--;
3082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083                 dmae->comp_addr_hi = 0;
3084                 dmae->comp_val = 1;
3085
3086                 *stats_comp = 0;
3087                 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089         } else if (bp->func_stx) {
3090                 *stats_comp = 0;
3091                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092         }
3093 }
3094
3095 static int bnx2x_stats_comp(struct bnx2x *bp)
3096 {
3097         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098         int cnt = 10;
3099
3100         might_sleep();
3101         while (*stats_comp != DMAE_COMP_VAL) {
3102                 if (!cnt) {
3103                         BNX2X_ERR("timeout waiting for stats finished\n");
3104                         break;
3105                 }
3106                 cnt--;
3107                 msleep(1);
3108         }
3109         return 1;
3110 }
3111
3112 /*
3113  * Statistics service functions
3114  */
3115
3116 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117 {
3118         struct dmae_command *dmae;
3119         u32 opcode;
3120         int loader_idx = PMF_DMAE_C(bp);
3121         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123         /* sanity */
3124         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125                 BNX2X_ERR("BUG!\n");
3126                 return;
3127         }
3128
3129         bp->executer_idx = 0;
3130
3131         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132                   DMAE_CMD_C_ENABLE |
3133                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134 #ifdef __BIG_ENDIAN
3135                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136 #else
3137                   DMAE_CMD_ENDIANITY_DW_SWAP |
3138 #endif
3139                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144         dmae->src_addr_lo = bp->port.port_stx >> 2;
3145         dmae->src_addr_hi = 0;
3146         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->len = DMAE_LEN32_RD_MAX;
3149         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150         dmae->comp_addr_hi = 0;
3151         dmae->comp_val = 1;
3152
3153         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156         dmae->src_addr_hi = 0;
3157         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158                                    DMAE_LEN32_RD_MAX * 4);
3159         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160                                    DMAE_LEN32_RD_MAX * 4);
3161         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_val = DMAE_COMP_VAL;
3165
3166         *stats_comp = 0;
3167         bnx2x_hw_stats_post(bp);
3168         bnx2x_stats_comp(bp);
3169 }
3170
3171 static void bnx2x_port_stats_init(struct bnx2x *bp)
3172 {
3173         struct dmae_command *dmae;
3174         int port = BP_PORT(bp);
3175         int vn = BP_E1HVN(bp);
3176         u32 opcode;
3177         int loader_idx = PMF_DMAE_C(bp);
3178         u32 mac_addr;
3179         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181         /* sanity */
3182         if (!bp->link_vars.link_up || !bp->port.pmf) {
3183                 BNX2X_ERR("BUG!\n");
3184                 return;
3185         }
3186
3187         bp->executer_idx = 0;
3188
3189         /* MCP */
3190         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3193 #ifdef __BIG_ENDIAN
3194                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3195 #else
3196                   DMAE_CMD_ENDIANITY_DW_SWAP |
3197 #endif
3198                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199                   (vn << DMAE_CMD_E1HVN_SHIFT));
3200
3201         if (bp->port.port_stx) {
3202
3203                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204                 dmae->opcode = opcode;
3205                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3208                 dmae->dst_addr_hi = 0;
3209                 dmae->len = sizeof(struct host_port_stats) >> 2;
3210                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211                 dmae->comp_addr_hi = 0;
3212                 dmae->comp_val = 1;
3213         }
3214
3215         if (bp->func_stx) {
3216
3217                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218                 dmae->opcode = opcode;
3219                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->dst_addr_lo = bp->func_stx >> 2;
3222                 dmae->dst_addr_hi = 0;
3223                 dmae->len = sizeof(struct host_func_stats) >> 2;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227         }
3228
3229         /* MAC */
3230         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233 #ifdef __BIG_ENDIAN
3234                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235 #else
3236                   DMAE_CMD_ENDIANITY_DW_SWAP |
3237 #endif
3238                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239                   (vn << DMAE_CMD_E1HVN_SHIFT));
3240
3241         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3242
3243                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244                                    NIG_REG_INGRESS_BMAC0_MEM);
3245
3246                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3248                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249                 dmae->opcode = opcode;
3250                 dmae->src_addr_lo = (mac_addr +
3251                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252                 dmae->src_addr_hi = 0;
3253                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258                 dmae->comp_addr_hi = 0;
3259                 dmae->comp_val = 1;
3260
3261                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264                 dmae->opcode = opcode;
3265                 dmae->src_addr_lo = (mac_addr +
3266                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->src_addr_hi = 0;
3268                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3269                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3270                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3271                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3272                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275                 dmae->comp_addr_hi = 0;
3276                 dmae->comp_val = 1;
3277
3278         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3279
3280                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284                 dmae->opcode = opcode;
3285                 dmae->src_addr_lo = (mac_addr +
3286                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287                 dmae->src_addr_hi = 0;
3288                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292                 dmae->comp_addr_hi = 0;
3293                 dmae->comp_val = 1;
3294
3295                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297                 dmae->opcode = opcode;
3298                 dmae->src_addr_lo = (mac_addr +
3299                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300                 dmae->src_addr_hi = 0;
3301                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3302                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3303                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3304                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3305                 dmae->len = 1;
3306                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307                 dmae->comp_addr_hi = 0;
3308                 dmae->comp_val = 1;
3309
3310                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312                 dmae->opcode = opcode;
3313                 dmae->src_addr_lo = (mac_addr +
3314                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315                 dmae->src_addr_hi = 0;
3316                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3317                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3318                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3319                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3320                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322                 dmae->comp_addr_hi = 0;
3323                 dmae->comp_val = 1;
3324         }
3325
3326         /* NIG */
3327         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328         dmae->opcode = opcode;
3329         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331         dmae->src_addr_hi = 0;
3332         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336         dmae->comp_addr_hi = 0;
3337         dmae->comp_val = 1;
3338
3339         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340         dmae->opcode = opcode;
3341         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343         dmae->src_addr_hi = 0;
3344         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348         dmae->len = (2*sizeof(u32)) >> 2;
3349         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350         dmae->comp_addr_hi = 0;
3351         dmae->comp_val = 1;
3352
3353         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357 #ifdef __BIG_ENDIAN
3358                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359 #else
3360                         DMAE_CMD_ENDIANITY_DW_SWAP |
3361 #endif
3362                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363                         (vn << DMAE_CMD_E1HVN_SHIFT));
3364         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3366         dmae->src_addr_hi = 0;
3367         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371         dmae->len = (2*sizeof(u32)) >> 2;
3372         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_val = DMAE_COMP_VAL;
3375
3376         *stats_comp = 0;
3377 }
3378
3379 static void bnx2x_func_stats_init(struct bnx2x *bp)
3380 {
3381         struct dmae_command *dmae = &bp->stats_dmae;
3382         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3383
3384         /* sanity */
3385         if (!bp->func_stx) {
3386                 BNX2X_ERR("BUG!\n");
3387                 return;
3388         }
3389
3390         bp->executer_idx = 0;
3391         memset(dmae, 0, sizeof(struct dmae_command));
3392
3393         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396 #ifdef __BIG_ENDIAN
3397                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398 #else
3399                         DMAE_CMD_ENDIANITY_DW_SWAP |
3400 #endif
3401                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->dst_addr_lo = bp->func_stx >> 2;
3406         dmae->dst_addr_hi = 0;
3407         dmae->len = sizeof(struct host_func_stats) >> 2;
3408         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_val = DMAE_COMP_VAL;
3411
3412         *stats_comp = 0;
3413 }
3414
3415 static void bnx2x_stats_start(struct bnx2x *bp)
3416 {
3417         if (bp->port.pmf)
3418                 bnx2x_port_stats_init(bp);
3419
3420         else if (bp->func_stx)
3421                 bnx2x_func_stats_init(bp);
3422
3423         bnx2x_hw_stats_post(bp);
3424         bnx2x_storm_stats_post(bp);
3425 }
3426
3427 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_pmf_update(bp);
3431         bnx2x_stats_start(bp);
3432 }
3433
3434 static void bnx2x_stats_restart(struct bnx2x *bp)
3435 {
3436         bnx2x_stats_comp(bp);
3437         bnx2x_stats_start(bp);
3438 }
3439
3440 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441 {
3442         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444         struct regpair diff;
3445
3446         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3452         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3453         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458         UPDATE_STAT64(tx_stat_gt127,
3459                                 tx_stat_etherstatspkts65octetsto127octets);
3460         UPDATE_STAT64(tx_stat_gt255,
3461                                 tx_stat_etherstatspkts128octetsto255octets);
3462         UPDATE_STAT64(tx_stat_gt511,
3463                                 tx_stat_etherstatspkts256octetsto511octets);
3464         UPDATE_STAT64(tx_stat_gt1023,
3465                                 tx_stat_etherstatspkts512octetsto1023octets);
3466         UPDATE_STAT64(tx_stat_gt1518,
3467                                 tx_stat_etherstatspkts1024octetsto1522octets);
3468         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472         UPDATE_STAT64(tx_stat_gterr,
3473                                 tx_stat_dot3statsinternalmactransmiterrors);
3474         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475 }
3476
3477 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478 {
3479         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513 }
3514
3515 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516 {
3517         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518         struct nig_stats *old = &(bp->port.old_nig_stats);
3519         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521         struct regpair diff;
3522
3523         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524                 bnx2x_bmac_stats_update(bp);
3525
3526         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527                 bnx2x_emac_stats_update(bp);
3528
3529         else { /* unreached */
3530                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531                 return -1;
3532         }
3533
3534         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535                       new->brb_discard - old->brb_discard);
3536         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537                       new->brb_truncate - old->brb_truncate);
3538
3539         UPDATE_STAT64_NIG(egress_mac_pkt0,
3540                                         etherstatspkts1024octetsto1522octets);
3541         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3542
3543         memcpy(old, new, sizeof(struct nig_stats));
3544
3545         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546                sizeof(struct mac_stx));
3547         estats->brb_drop_hi = pstats->brb_drop_hi;
3548         estats->brb_drop_lo = pstats->brb_drop_lo;
3549
3550         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3551
3552         return 0;
3553 }
3554
3555 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3556 {
3557         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3558         int cl_id = BP_CL_ID(bp);
3559         struct tstorm_per_port_stats *tport =
3560                                 &stats->tstorm_common.port_statistics;
3561         struct tstorm_per_client_stats *tclient =
3562                         &stats->tstorm_common.client_statistics[cl_id];
3563         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3564         struct xstorm_per_client_stats *xclient =
3565                         &stats->xstorm_common.client_statistics[cl_id];
3566         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3569         u32 diff;
3570
3571         /* are storm stats valid? */
3572         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575                    "  tstorm counter (%d) != stats_counter (%d)\n",
3576                    tclient->stats_counter, bp->stats_counter);
3577                 return -1;
3578         }
3579         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580                                                         bp->stats_counter) {
3581                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582                    "  xstorm counter (%d) != stats_counter (%d)\n",
3583                    xclient->stats_counter, bp->stats_counter);
3584                 return -2;
3585         }
3586
3587         fstats->total_bytes_received_hi =
3588         fstats->valid_bytes_received_hi =
3589                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3590         fstats->total_bytes_received_lo =
3591         fstats->valid_bytes_received_lo =
3592                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3593
3594         estats->error_bytes_received_hi =
3595                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596         estats->error_bytes_received_lo =
3597                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598         ADD_64(estats->error_bytes_received_hi,
3599                estats->rx_stat_ifhcinbadoctets_hi,
3600                estats->error_bytes_received_lo,
3601                estats->rx_stat_ifhcinbadoctets_lo);
3602
3603         ADD_64(fstats->total_bytes_received_hi,
3604                estats->error_bytes_received_hi,
3605                fstats->total_bytes_received_lo,
3606                estats->error_bytes_received_lo);
3607
3608         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3609         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3610                                 total_multicast_packets_received);
3611         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3612                                 total_broadcast_packets_received);
3613
3614         fstats->total_bytes_transmitted_hi =
3615                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3616         fstats->total_bytes_transmitted_lo =
3617                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620                                 total_unicast_packets_transmitted);
3621         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622                                 total_multicast_packets_transmitted);
3623         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624                                 total_broadcast_packets_transmitted);
3625
3626         memcpy(estats, &(fstats->total_bytes_received_hi),
3627                sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631         estats->brb_truncate_discard =
3632                                 le32_to_cpu(tport->brb_truncate_discard);
3633         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635         old_tclient->rcv_unicast_bytes.hi =
3636                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3637         old_tclient->rcv_unicast_bytes.lo =
3638                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3639         old_tclient->rcv_broadcast_bytes.hi =
3640                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3641         old_tclient->rcv_broadcast_bytes.lo =
3642                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3643         old_tclient->rcv_multicast_bytes.hi =
3644                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3645         old_tclient->rcv_multicast_bytes.lo =
3646                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3647         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3648
3649         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650         old_tclient->packets_too_big_discard =
3651                                 le32_to_cpu(tclient->packets_too_big_discard);
3652         estats->no_buff_discard =
3653         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657         old_xclient->unicast_bytes_sent.hi =
3658                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659         old_xclient->unicast_bytes_sent.lo =
3660                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661         old_xclient->multicast_bytes_sent.hi =
3662                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663         old_xclient->multicast_bytes_sent.lo =
3664                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665         old_xclient->broadcast_bytes_sent.hi =
3666                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667         old_xclient->broadcast_bytes_sent.lo =
3668                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3671
3672         return 0;
3673 }
3674
3675 static void bnx2x_net_stats_update(struct bnx2x *bp)
3676 {
3677         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3679         struct net_device_stats *nstats = &bp->dev->stats;
3680
3681         nstats->rx_packets =
3682                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686         nstats->tx_packets =
3687                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
3691         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3692
3693         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3694
3695         nstats->rx_dropped = old_tclient->checksum_discard +
3696                              estats->mac_discard;
3697         nstats->tx_dropped = 0;
3698
3699         nstats->multicast =
3700                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
3702         nstats->collisions =
3703                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3704                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705                         estats->tx_stat_dot3statslatecollisions_lo +
3706                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3707
3708         estats->jabber_packets_received =
3709                                 old_tclient->packets_too_big_discard +
3710                                 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712         nstats->rx_length_errors =
3713                                 estats->rx_stat_etherstatsundersizepkts_lo +
3714                                 estats->jabber_packets_received;
3715         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3716         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3719         nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721         nstats->rx_errors = nstats->rx_length_errors +
3722                             nstats->rx_over_errors +
3723                             nstats->rx_crc_errors +
3724                             nstats->rx_frame_errors +
3725                             nstats->rx_fifo_errors +
3726                             nstats->rx_missed_errors;
3727
3728         nstats->tx_aborted_errors =
3729                         estats->tx_stat_dot3statslatecollisions_lo +
3730                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3731         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3732         nstats->tx_fifo_errors = 0;
3733         nstats->tx_heartbeat_errors = 0;
3734         nstats->tx_window_errors = 0;
3735
3736         nstats->tx_errors = nstats->tx_aborted_errors +
3737                             nstats->tx_carrier_errors;
3738 }
3739
3740 static void bnx2x_stats_update(struct bnx2x *bp)
3741 {
3742         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743         int update = 0;
3744
3745         if (*stats_comp != DMAE_COMP_VAL)
3746                 return;
3747
3748         if (bp->port.pmf)
3749                 update = (bnx2x_hw_stats_update(bp) == 0);
3750
3751         update |= (bnx2x_storm_stats_update(bp) == 0);
3752
3753         if (update)
3754                 bnx2x_net_stats_update(bp);
3755
3756         else {
3757                 if (bp->stats_pending) {
3758                         bp->stats_pending++;
3759                         if (bp->stats_pending == 3) {
3760                                 BNX2X_ERR("stats not updated for 3 times\n");
3761                                 bnx2x_panic();
3762                                 return;
3763                         }
3764                 }
3765         }
3766
3767         if (bp->msglevel & NETIF_MSG_TIMER) {
3768                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3770                 struct net_device_stats *nstats = &bp->dev->stats;
3771                 int i;
3772
3773                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3775                                   "  tx pkt (%lx)\n",
3776                        bnx2x_tx_avail(bp->fp),
3777                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3778                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3779                                   "  rx pkt (%lx)\n",
3780                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781                              bp->fp->rx_comp_cons),
3782                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3783                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3784                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3785                        estats->driver_xoff, estats->brb_drop_lo);
3786                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3787                         "packets_too_big_discard %u  no_buff_discard %u  "
3788                         "mac_discard %u  mac_filter_discard %u  "
3789                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3790                         "ttl0_discard %u\n",
3791                        old_tclient->checksum_discard,
3792                        old_tclient->packets_too_big_discard,
3793                        old_tclient->no_buff_discard, estats->mac_discard,
3794                        estats->mac_filter_discard, estats->xxoverflow_discard,
3795                        estats->brb_truncate_discard,
3796                        old_tclient->ttl0_discard);
3797
3798                 for_each_queue(bp, i) {
3799                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800                                bnx2x_fp(bp, i, tx_pkt),
3801                                bnx2x_fp(bp, i, rx_pkt),
3802                                bnx2x_fp(bp, i, rx_calls));
3803                 }
3804         }
3805
3806         bnx2x_hw_stats_post(bp);
3807         bnx2x_storm_stats_post(bp);
3808 }
3809
3810 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811 {
3812         struct dmae_command *dmae;
3813         u32 opcode;
3814         int loader_idx = PMF_DMAE_C(bp);
3815         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3816
3817         bp->executer_idx = 0;
3818
3819         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820                   DMAE_CMD_C_ENABLE |
3821                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3822 #ifdef __BIG_ENDIAN
3823                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3824 #else
3825                   DMAE_CMD_ENDIANITY_DW_SWAP |
3826 #endif
3827                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830         if (bp->port.port_stx) {
3831
3832                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833                 if (bp->func_stx)
3834                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835                 else
3836                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3840                 dmae->dst_addr_hi = 0;
3841                 dmae->len = sizeof(struct host_port_stats) >> 2;
3842                 if (bp->func_stx) {
3843                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844                         dmae->comp_addr_hi = 0;
3845                         dmae->comp_val = 1;
3846                 } else {
3847                         dmae->comp_addr_lo =
3848                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849                         dmae->comp_addr_hi =
3850                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851                         dmae->comp_val = DMAE_COMP_VAL;
3852
3853                         *stats_comp = 0;
3854                 }
3855         }
3856
3857         if (bp->func_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->dst_addr_lo = bp->func_stx >> 2;
3864                 dmae->dst_addr_hi = 0;
3865                 dmae->len = sizeof(struct host_func_stats) >> 2;
3866                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_val = DMAE_COMP_VAL;
3869
3870                 *stats_comp = 0;
3871         }
3872 }
3873
3874 static void bnx2x_stats_stop(struct bnx2x *bp)
3875 {
3876         int update = 0;
3877
3878         bnx2x_stats_comp(bp);
3879
3880         if (bp->port.pmf)
3881                 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883         update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885         if (update) {
3886                 bnx2x_net_stats_update(bp);
3887
3888                 if (bp->port.pmf)
3889                         bnx2x_port_stats_stop(bp);
3890
3891                 bnx2x_hw_stats_post(bp);
3892                 bnx2x_stats_comp(bp);
3893         }
3894 }
3895
3896 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897 {
3898 }
3899
3900 static const struct {
3901         void (*action)(struct bnx2x *bp);
3902         enum bnx2x_stats_state next_state;
3903 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904 /* state        event   */
3905 {
3906 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3908 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910 },
3911 {
3912 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3913 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3914 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3915 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3916 }
3917 };
3918
3919 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920 {
3921         enum bnx2x_stats_state state = bp->stats_state;
3922
3923         bnx2x_stats_stm[state][event].action(bp);
3924         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928                    state, event, bp->stats_state);
3929 }
3930
3931 static void bnx2x_timer(unsigned long data)
3932 {
3933         struct bnx2x *bp = (struct bnx2x *) data;
3934
3935         if (!netif_running(bp->dev))
3936                 return;
3937
3938         if (atomic_read(&bp->intr_sem) != 0)
3939                 goto timer_restart;
3940
3941         if (poll) {
3942                 struct bnx2x_fastpath *fp = &bp->fp[0];
3943                 int rc;
3944
3945                 bnx2x_tx_int(fp, 1000);
3946                 rc = bnx2x_rx_int(fp, 1000);
3947         }
3948
3949         if (!BP_NOMCP(bp)) {
3950                 int func = BP_FUNC(bp);
3951                 u32 drv_pulse;
3952                 u32 mcp_pulse;
3953
3954                 ++bp->fw_drv_pulse_wr_seq;
3955                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956                 /* TBD - add SYSTEM_TIME */
3957                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3958                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3959
3960                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3961                              MCP_PULSE_SEQ_MASK);
3962                 /* The delta between driver pulse and mcp response
3963                  * should be 1 (before mcp response) or 0 (after mcp response)
3964                  */
3965                 if ((drv_pulse != mcp_pulse) &&
3966                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967                         /* someone lost a heartbeat... */
3968                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969                                   drv_pulse, mcp_pulse);
3970                 }
3971         }
3972
3973         if ((bp->state == BNX2X_STATE_OPEN) ||
3974             (bp->state == BNX2X_STATE_DISABLED))
3975                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3976
3977 timer_restart:
3978         mod_timer(&bp->timer, jiffies + bp->current_interval);
3979 }
3980
3981 /* end of Statistics */
3982
3983 /* nic init */
3984
3985 /*
3986  * nic init service functions
3987  */
3988
3989 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3990 {
3991         int port = BP_PORT(bp);
3992
3993         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995                         sizeof(struct ustorm_status_block)/4);
3996         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998                         sizeof(struct cstorm_status_block)/4);
3999 }
4000
4001 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4002                           dma_addr_t mapping, int sb_id)
4003 {
4004         int port = BP_PORT(bp);
4005         int func = BP_FUNC(bp);
4006         int index;
4007         u64 section;
4008
4009         /* USTORM */
4010         section = ((u64)mapping) + offsetof(struct host_status_block,
4011                                             u_status_block);
4012         sb->u_status_block.status_block_id = sb_id;
4013
4014         REG_WR(bp, BAR_USTRORM_INTMEM +
4015                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4016         REG_WR(bp, BAR_USTRORM_INTMEM +
4017                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4018                U64_HI(section));
4019         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4021
4022         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4024                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4025
4026         /* CSTORM */
4027         section = ((u64)mapping) + offsetof(struct host_status_block,
4028                                             c_status_block);
4029         sb->c_status_block.status_block_id = sb_id;
4030
4031         REG_WR(bp, BAR_CSTRORM_INTMEM +
4032                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4033         REG_WR(bp, BAR_CSTRORM_INTMEM +
4034                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4035                U64_HI(section));
4036         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4038
4039         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4041                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044 }
4045
4046 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047 {
4048         int func = BP_FUNC(bp);
4049
4050         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052                         sizeof(struct ustorm_def_status_block)/4);
4053         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055                         sizeof(struct cstorm_def_status_block)/4);
4056         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058                         sizeof(struct xstorm_def_status_block)/4);
4059         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061                         sizeof(struct tstorm_def_status_block)/4);
4062 }
4063
4064 static void bnx2x_init_def_sb(struct bnx2x *bp,
4065                               struct host_def_status_block *def_sb,
4066                               dma_addr_t mapping, int sb_id)
4067 {
4068         int port = BP_PORT(bp);
4069         int func = BP_FUNC(bp);
4070         int index, val, reg_offset;
4071         u64 section;
4072
4073         /* ATTN */
4074         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075                                             atten_status_block);
4076         def_sb->atten_status_block.status_block_id = sb_id;
4077
4078         bp->attn_state = 0;
4079
4080         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4081                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4082
4083         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4084                 bp->attn_group[index].sig[0] = REG_RD(bp,
4085                                                      reg_offset + 0x10*index);
4086                 bp->attn_group[index].sig[1] = REG_RD(bp,
4087                                                reg_offset + 0x4 + 0x10*index);
4088                 bp->attn_group[index].sig[2] = REG_RD(bp,
4089                                                reg_offset + 0x8 + 0x10*index);
4090                 bp->attn_group[index].sig[3] = REG_RD(bp,
4091                                                reg_offset + 0xc + 0x10*index);
4092         }
4093
4094         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4095                              HC_REG_ATTN_MSG0_ADDR_L);
4096
4097         REG_WR(bp, reg_offset, U64_LO(section));
4098         REG_WR(bp, reg_offset + 4, U64_HI(section));
4099
4100         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4101
4102         val = REG_RD(bp, reg_offset);
4103         val |= sb_id;
4104         REG_WR(bp, reg_offset, val);
4105
4106         /* USTORM */
4107         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4108                                             u_def_status_block);
4109         def_sb->u_def_status_block.status_block_id = sb_id;
4110
4111         REG_WR(bp, BAR_USTRORM_INTMEM +
4112                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4113         REG_WR(bp, BAR_USTRORM_INTMEM +
4114                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4115                U64_HI(section));
4116         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4117                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4118
4119         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4120                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4121                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4122
4123         /* CSTORM */
4124         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4125                                             c_def_status_block);
4126         def_sb->c_def_status_block.status_block_id = sb_id;
4127
4128         REG_WR(bp, BAR_CSTRORM_INTMEM +
4129                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4130         REG_WR(bp, BAR_CSTRORM_INTMEM +
4131                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4132                U64_HI(section));
4133         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4134                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4135
4136         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4137                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4138                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4139
4140         /* TSTORM */
4141         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142                                             t_def_status_block);
4143         def_sb->t_def_status_block.status_block_id = sb_id;
4144
4145         REG_WR(bp, BAR_TSTRORM_INTMEM +
4146                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4147         REG_WR(bp, BAR_TSTRORM_INTMEM +
4148                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4149                U64_HI(section));
4150         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4151                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4152
4153         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4154                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4155                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4156
4157         /* XSTORM */
4158         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4159                                             x_def_status_block);
4160         def_sb->x_def_status_block.status_block_id = sb_id;
4161
4162         REG_WR(bp, BAR_XSTRORM_INTMEM +
4163                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4164         REG_WR(bp, BAR_XSTRORM_INTMEM +
4165                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4166                U64_HI(section));
4167         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4168                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4169
4170         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4171                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4172                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4173
4174         bp->stats_pending = 0;
4175         bp->set_mac_pending = 0;
4176
4177         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4178 }
4179
4180 static void bnx2x_update_coalesce(struct bnx2x *bp)
4181 {
4182         int port = BP_PORT(bp);
4183         int i;
4184
4185         for_each_queue(bp, i) {
4186                 int sb_id = bp->fp[i].sb_id;
4187
4188                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4189                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4190                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4191                                                     U_SB_ETH_RX_CQ_INDEX),
4192                         bp->rx_ticks/12);
4193                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4194                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4195                                                      U_SB_ETH_RX_CQ_INDEX),
4196                          bp->rx_ticks ? 0 : 1);
4197                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199                                                      U_SB_ETH_RX_BD_INDEX),
4200                          bp->rx_ticks ? 0 : 1);
4201
4202                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4203                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4204                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4205                                                     C_SB_ETH_TX_CQ_INDEX),
4206                         bp->tx_ticks/12);
4207                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4208                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4209                                                      C_SB_ETH_TX_CQ_INDEX),
4210                          bp->tx_ticks ? 0 : 1);
4211         }
4212 }
4213
4214 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4215                                        struct bnx2x_fastpath *fp, int last)
4216 {
4217         int i;
4218
4219         for (i = 0; i < last; i++) {
4220                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4221                 struct sk_buff *skb = rx_buf->skb;
4222
4223                 if (skb == NULL) {
4224                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4225                         continue;
4226                 }
4227
4228                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4229                         pci_unmap_single(bp->pdev,
4230                                          pci_unmap_addr(rx_buf, mapping),
4231                                          bp->rx_buf_use_size,
4232                                          PCI_DMA_FROMDEVICE);
4233
4234                 dev_kfree_skb(skb);
4235                 rx_buf->skb = NULL;
4236         }
4237 }
4238
4239 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4240 {
4241         int func = BP_FUNC(bp);
4242         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4244         u16 ring_prod, cqe_ring_prod;
4245         int i, j;
4246
4247         bp->rx_buf_use_size = bp->dev->mtu;
4248         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4249         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4250
4251         if (bp->flags & TPA_ENABLE_FLAG) {
4252                 DP(NETIF_MSG_IFUP,
4253                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4254                    bp->rx_buf_use_size, bp->rx_buf_size,
4255                    bp->dev->mtu + ETH_OVREHEAD);
4256
4257                 for_each_queue(bp, j) {
4258                         struct bnx2x_fastpath *fp = &bp->fp[j];
4259
4260                         for (i = 0; i < max_agg_queues; i++) {
4261                                 fp->tpa_pool[i].skb =
4262                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4263                                 if (!fp->tpa_pool[i].skb) {
4264                                         BNX2X_ERR("Failed to allocate TPA "
4265                                                   "skb pool for queue[%d] - "
4266                                                   "disabling TPA on this "
4267                                                   "queue!\n", j);
4268                                         bnx2x_free_tpa_pool(bp, fp, i);
4269                                         fp->disable_tpa = 1;
4270                                         break;
4271                                 }
4272                                 pci_unmap_addr_set((struct sw_rx_bd *)
4273                                                         &bp->fp->tpa_pool[i],
4274                                                    mapping, 0);
4275                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4276                         }
4277                 }
4278         }
4279
4280         for_each_queue(bp, j) {
4281                 struct bnx2x_fastpath *fp = &bp->fp[j];
4282
4283                 fp->rx_bd_cons = 0;
4284                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4285                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4286
4287                 /* "next page" elements initialization */
4288                 /* SGE ring */
4289                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4290                         struct eth_rx_sge *sge;
4291
4292                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293                         sge->addr_hi =
4294                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4295                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296                         sge->addr_lo =
4297                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4298                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4299                 }
4300
4301                 bnx2x_init_sge_ring_bit_mask(fp);
4302
4303                 /* RX BD ring */
4304                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4305                         struct eth_rx_bd *rx_bd;
4306
4307                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308                         rx_bd->addr_hi =
4309                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4310                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4311                         rx_bd->addr_lo =
4312                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4313                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4314                 }
4315
4316                 /* CQ ring */
4317                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4318                         struct eth_rx_cqe_next_page *nextpg;
4319
4320                         nextpg = (struct eth_rx_cqe_next_page *)
4321                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322                         nextpg->addr_hi =
4323                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4324                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4325                         nextpg->addr_lo =
4326                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4327                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4328                 }
4329
4330                 /* Allocate SGEs and initialize the ring elements */
4331                 for (i = 0, ring_prod = 0;
4332                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4333
4334                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4335                                 BNX2X_ERR("was only able to allocate "
4336                                           "%d rx sges\n", i);
4337                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4338                                 /* Cleanup already allocated elements */
4339                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4340                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4341                                 fp->disable_tpa = 1;
4342                                 ring_prod = 0;
4343                                 break;
4344                         }
4345                         ring_prod = NEXT_SGE_IDX(ring_prod);
4346                 }
4347                 fp->rx_sge_prod = ring_prod;
4348
4349                 /* Allocate BDs and initialize BD ring */
4350                 fp->rx_comp_cons = 0;
4351                 cqe_ring_prod = ring_prod = 0;
4352                 for (i = 0; i < bp->rx_ring_size; i++) {
4353                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354                                 BNX2X_ERR("was only able to allocate "
4355                                           "%d rx skbs\n", i);
4356                                 bp->eth_stats.rx_skb_alloc_failed++;
4357                                 break;
4358                         }
4359                         ring_prod = NEXT_RX_IDX(ring_prod);
4360                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4361                         WARN_ON(ring_prod <= i);
4362                 }
4363
4364                 fp->rx_bd_prod = ring_prod;
4365                 /* must not have more available CQEs than BDs */
4366                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367                                        cqe_ring_prod);
4368                 fp->rx_pkt = fp->rx_calls = 0;
4369
4370                 /* Warning!
4371                  * this will generate an interrupt (to the TSTORM)
4372                  * must only be done after chip is initialized
4373                  */
4374                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375                                      fp->rx_sge_prod);
4376                 if (j != 0)
4377                         continue;
4378
4379                 REG_WR(bp, BAR_USTRORM_INTMEM +
4380                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4381                        U64_LO(fp->rx_comp_mapping));
4382                 REG_WR(bp, BAR_USTRORM_INTMEM +
4383                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4384                        U64_HI(fp->rx_comp_mapping));
4385         }
4386 }
4387
4388 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4389 {
4390         int i, j;
4391
4392         for_each_queue(bp, j) {
4393                 struct bnx2x_fastpath *fp = &bp->fp[j];
4394
4395                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396                         struct eth_tx_bd *tx_bd =
4397                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4398
4399                         tx_bd->addr_hi =
4400                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4401                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4402                         tx_bd->addr_lo =
4403                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4404                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4405                 }
4406
4407                 fp->tx_pkt_prod = 0;
4408                 fp->tx_pkt_cons = 0;
4409                 fp->tx_bd_prod = 0;
4410                 fp->tx_bd_cons = 0;
4411                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412                 fp->tx_pkt = 0;
4413         }
4414 }
4415
4416 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4417 {
4418         int func = BP_FUNC(bp);
4419
4420         spin_lock_init(&bp->spq_lock);
4421
4422         bp->spq_left = MAX_SPQ_PENDING;
4423         bp->spq_prod_idx = 0;
4424         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425         bp->spq_prod_bd = bp->spq;
4426         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4427
4428         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4429                U64_LO(bp->spq_mapping));
4430         REG_WR(bp,
4431                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4432                U64_HI(bp->spq_mapping));
4433
4434         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4435                bp->spq_prod_idx);
4436 }
4437
4438 static void bnx2x_init_context(struct bnx2x *bp)
4439 {
4440         int i;
4441
4442         for_each_queue(bp, i) {
4443                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444                 struct bnx2x_fastpath *fp = &bp->fp[i];
4445                 u8 sb_id = FP_SB_ID(fp);
4446
4447                 context->xstorm_st_context.tx_bd_page_base_hi =
4448                                                 U64_HI(fp->tx_desc_mapping);
4449                 context->xstorm_st_context.tx_bd_page_base_lo =
4450                                                 U64_LO(fp->tx_desc_mapping);
4451                 context->xstorm_st_context.db_data_addr_hi =
4452                                                 U64_HI(fp->tx_prods_mapping);
4453                 context->xstorm_st_context.db_data_addr_lo =
4454                                                 U64_LO(fp->tx_prods_mapping);
4455                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4457
4458                 context->ustorm_st_context.common.sb_index_numbers =
4459                                                 BNX2X_RX_SB_INDEX_NUM;
4460                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461                 context->ustorm_st_context.common.status_block_id = sb_id;
4462                 context->ustorm_st_context.common.flags =
4463                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464                 context->ustorm_st_context.common.mc_alignment_size = 64;
4465                 context->ustorm_st_context.common.bd_buff_size =
4466                                                 bp->rx_buf_use_size;
4467                 context->ustorm_st_context.common.bd_page_base_hi =
4468                                                 U64_HI(fp->rx_desc_mapping);
4469                 context->ustorm_st_context.common.bd_page_base_lo =
4470                                                 U64_LO(fp->rx_desc_mapping);
4471                 if (!fp->disable_tpa) {
4472                         context->ustorm_st_context.common.flags |=
4473                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475                         context->ustorm_st_context.common.sge_buff_size =
4476                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477                         context->ustorm_st_context.common.sge_page_base_hi =
4478                                                 U64_HI(fp->rx_sge_mapping);
4479                         context->ustorm_st_context.common.sge_page_base_lo =
4480                                                 U64_LO(fp->rx_sge_mapping);
4481                 }
4482
4483                 context->cstorm_st_context.sb_index_number =
4484                                                 C_SB_ETH_TX_CQ_INDEX;
4485                 context->cstorm_st_context.status_block_id = sb_id;
4486
4487                 context->xstorm_ag_context.cdu_reserved =
4488                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489                                                CDU_REGION_NUMBER_XCM_AG,
4490                                                ETH_CONNECTION_TYPE);
4491                 context->ustorm_ag_context.cdu_usage =
4492                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493                                                CDU_REGION_NUMBER_UCM_AG,
4494                                                ETH_CONNECTION_TYPE);
4495         }
4496 }
4497
4498 static void bnx2x_init_ind_table(struct bnx2x *bp)
4499 {
4500         int port = BP_PORT(bp);
4501         int i;
4502
4503         if (!is_multi(bp))
4504                 return;
4505
4506         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4507         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4508                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4510                         i % bp->num_queues);
4511
4512         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513 }
4514
4515 static void bnx2x_set_client_config(struct bnx2x *bp)
4516 {
4517         struct tstorm_eth_client_config tstorm_client = {0};
4518         int port = BP_PORT(bp);
4519         int i;
4520
4521         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4522         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4523         tstorm_client.config_flags =
4524                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525 #ifdef BCM_VLAN
4526         if (bp->rx_mode && bp->vlgrp) {
4527                 tstorm_client.config_flags |=
4528                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530         }
4531 #endif
4532
4533         if (bp->flags & TPA_ENABLE_FLAG) {
4534                 tstorm_client.max_sges_for_packet =
4535                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536                 tstorm_client.max_sges_for_packet =
4537                         ((tstorm_client.max_sges_for_packet +
4538                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539                         PAGES_PER_SGE_SHIFT;
4540
4541                 tstorm_client.config_flags |=
4542                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543         }
4544
4545         for_each_queue(bp, i) {
4546                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4547                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4548                        ((u32 *)&tstorm_client)[0]);
4549                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4550                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4551                        ((u32 *)&tstorm_client)[1]);
4552         }
4553
4554         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4556 }
4557
4558 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559 {
4560         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4561         int mode = bp->rx_mode;
4562         int mask = (1 << BP_L_ID(bp));
4563         int func = BP_FUNC(bp);
4564         int i;
4565
4566         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4567
4568         switch (mode) {
4569         case BNX2X_RX_MODE_NONE: /* no Rx */
4570                 tstorm_mac_filter.ucast_drop_all = mask;
4571                 tstorm_mac_filter.mcast_drop_all = mask;
4572                 tstorm_mac_filter.bcast_drop_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_NORMAL:
4575                 tstorm_mac_filter.bcast_accept_all = mask;
4576                 break;
4577         case BNX2X_RX_MODE_ALLMULTI:
4578                 tstorm_mac_filter.mcast_accept_all = mask;
4579                 tstorm_mac_filter.bcast_accept_all = mask;
4580                 break;
4581         case BNX2X_RX_MODE_PROMISC:
4582                 tstorm_mac_filter.ucast_accept_all = mask;
4583                 tstorm_mac_filter.mcast_accept_all = mask;
4584                 tstorm_mac_filter.bcast_accept_all = mask;
4585                 break;
4586         default:
4587                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588                 break;
4589         }
4590
4591         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4593                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4594                        ((u32 *)&tstorm_mac_filter)[i]);
4595
4596 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4597                    ((u32 *)&tstorm_mac_filter)[i]); */
4598         }
4599
4600         if (mode != BNX2X_RX_MODE_NONE)
4601                 bnx2x_set_client_config(bp);
4602 }
4603
4604 static void bnx2x_init_internal_common(struct bnx2x *bp)
4605 {
4606         int i;
4607
4608         /* Zero this manually as its initialization is
4609            currently missing in the initTool */
4610         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611                 REG_WR(bp, BAR_USTRORM_INTMEM +
4612                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4613 }
4614
4615 static void bnx2x_init_internal_port(struct bnx2x *bp)
4616 {
4617         int port = BP_PORT(bp);
4618
4619         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 }
4624
4625 static void bnx2x_init_internal_func(struct bnx2x *bp)
4626 {
4627         struct tstorm_eth_function_common_config tstorm_config = {0};
4628         struct stats_indication_flags stats_flags = {0};
4629         int port = BP_PORT(bp);
4630         int func = BP_FUNC(bp);
4631         int i;
4632         u16 max_agg_size;
4633
4634         if (is_multi(bp)) {
4635                 tstorm_config.config_flags = MULTI_FLAGS;
4636                 tstorm_config.rss_result_mask = MULTI_MASK;
4637         }
4638
4639         tstorm_config.leading_client_id = BP_L_ID(bp);
4640
4641         REG_WR(bp, BAR_TSTRORM_INTMEM +
4642                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4643                (*(u32 *)&tstorm_config));
4644
4645         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4646         bnx2x_set_storm_rx_mode(bp);
4647
4648         /* reset xstorm per client statistics */
4649         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4652                        i*4, 0);
4653         }
4654         /* reset tstorm per client statistics */
4655         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658                        i*4, 0);
4659         }
4660
4661         /* Init statistics related context */
4662         stats_flags.collect_eth = 1;
4663
4664         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4665                ((u32 *)&stats_flags)[0]);
4666         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4667                ((u32 *)&stats_flags)[1]);
4668
4669         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4670                ((u32 *)&stats_flags)[0]);
4671         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4672                ((u32 *)&stats_flags)[1]);
4673
4674         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4675                ((u32 *)&stats_flags)[0]);
4676         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4677                ((u32 *)&stats_flags)[1]);
4678
4679         REG_WR(bp, BAR_XSTRORM_INTMEM +
4680                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682         REG_WR(bp, BAR_XSTRORM_INTMEM +
4683                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686         REG_WR(bp, BAR_TSTRORM_INTMEM +
4687                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689         REG_WR(bp, BAR_TSTRORM_INTMEM +
4690                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4692
4693         if (CHIP_IS_E1H(bp)) {
4694                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4695                         IS_E1HMF(bp));
4696                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4697                         IS_E1HMF(bp));
4698                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4699                         IS_E1HMF(bp));
4700                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4701                         IS_E1HMF(bp));
4702
4703                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4704                          bp->e1hov);
4705         }
4706
4707         /* Init CQ ring mapping and aggregation size */
4708         max_agg_size = min((u32)(bp->rx_buf_use_size +
4709                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4710                            (u32)0xffff);
4711         for_each_queue(bp, i) {
4712                 struct bnx2x_fastpath *fp = &bp->fp[i];
4713
4714                 REG_WR(bp, BAR_USTRORM_INTMEM +
4715                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4716                        U64_LO(fp->rx_comp_mapping));
4717                 REG_WR(bp, BAR_USTRORM_INTMEM +
4718                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4719                        U64_HI(fp->rx_comp_mapping));
4720
4721                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4722                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4723                          max_agg_size);
4724         }
4725 }
4726
4727 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4728 {
4729         switch (load_code) {
4730         case FW_MSG_CODE_DRV_LOAD_COMMON:
4731                 bnx2x_init_internal_common(bp);
4732                 /* no break */
4733
4734         case FW_MSG_CODE_DRV_LOAD_PORT:
4735                 bnx2x_init_internal_port(bp);
4736                 /* no break */
4737
4738         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739                 bnx2x_init_internal_func(bp);
4740                 break;
4741
4742         default:
4743                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4744                 break;
4745         }
4746 }
4747
4748 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4749 {
4750         int i;
4751
4752         for_each_queue(bp, i) {
4753                 struct bnx2x_fastpath *fp = &bp->fp[i];
4754
4755                 fp->bp = bp;
4756                 fp->state = BNX2X_FP_STATE_CLOSED;
4757                 fp->index = i;
4758                 fp->cl_id = BP_L_ID(bp) + i;
4759                 fp->sb_id = fp->cl_id;
4760                 DP(NETIF_MSG_IFUP,
4761                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4762                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4763                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4764                               FP_SB_ID(fp));
4765                 bnx2x_update_fpsb_idx(fp);
4766         }
4767
4768         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4769                           DEF_SB_ID);
4770         bnx2x_update_dsb_idx(bp);
4771         bnx2x_update_coalesce(bp);
4772         bnx2x_init_rx_rings(bp);
4773         bnx2x_init_tx_ring(bp);
4774         bnx2x_init_sp_ring(bp);
4775         bnx2x_init_context(bp);
4776         bnx2x_init_internal(bp, load_code);
4777         bnx2x_init_ind_table(bp);
4778         bnx2x_int_enable(bp);
4779 }
4780
4781 /* end of nic init */
4782
4783 /*
4784  * gzip service functions
4785  */
4786
4787 static int bnx2x_gunzip_init(struct bnx2x *bp)
4788 {
4789         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4790                                               &bp->gunzip_mapping);
4791         if (bp->gunzip_buf  == NULL)
4792                 goto gunzip_nomem1;
4793
4794         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4795         if (bp->strm  == NULL)
4796                 goto gunzip_nomem2;
4797
4798         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4799                                       GFP_KERNEL);
4800         if (bp->strm->workspace == NULL)
4801                 goto gunzip_nomem3;
4802
4803         return 0;
4804
4805 gunzip_nomem3:
4806         kfree(bp->strm);
4807         bp->strm = NULL;
4808
4809 gunzip_nomem2:
4810         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4811                             bp->gunzip_mapping);
4812         bp->gunzip_buf = NULL;
4813
4814 gunzip_nomem1:
4815         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4816                " un-compression\n", bp->dev->name);
4817         return -ENOMEM;
4818 }
4819
4820 static void bnx2x_gunzip_end(struct bnx2x *bp)
4821 {
4822         kfree(bp->strm->workspace);
4823
4824         kfree(bp->strm);
4825         bp->strm = NULL;
4826
4827         if (bp->gunzip_buf) {
4828                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4829                                     bp->gunzip_mapping);
4830                 bp->gunzip_buf = NULL;
4831         }
4832 }
4833
4834 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4835 {
4836         int n, rc;
4837
4838         /* check gzip header */
4839         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4840                 return -EINVAL;
4841
4842         n = 10;
4843
4844 #define FNAME                           0x8
4845
4846         if (zbuf[3] & FNAME)
4847                 while ((zbuf[n++] != 0) && (n < len));
4848
4849         bp->strm->next_in = zbuf + n;
4850         bp->strm->avail_in = len - n;
4851         bp->strm->next_out = bp->gunzip_buf;
4852         bp->strm->avail_out = FW_BUF_SIZE;
4853
4854         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4855         if (rc != Z_OK)
4856                 return rc;
4857
4858         rc = zlib_inflate(bp->strm, Z_FINISH);
4859         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4860                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4861                        bp->dev->name, bp->strm->msg);
4862
4863         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4864         if (bp->gunzip_outlen & 0x3)
4865                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4866                                     " gunzip_outlen (%d) not aligned\n",
4867                        bp->dev->name, bp->gunzip_outlen);
4868         bp->gunzip_outlen >>= 2;
4869
4870         zlib_inflateEnd(bp->strm);
4871
4872         if (rc == Z_STREAM_END)
4873                 return 0;
4874
4875         return rc;
4876 }
4877
4878 /* nic load/unload */
4879
4880 /*
4881  * General service functions
4882  */
4883
4884 /* send a NIG loopback debug packet */
4885 static void bnx2x_lb_pckt(struct bnx2x *bp)
4886 {
4887         u32 wb_write[3];
4888
4889         /* Ethernet source and destination addresses */
4890         wb_write[0] = 0x55555555;
4891         wb_write[1] = 0x55555555;
4892         wb_write[2] = 0x20;             /* SOP */
4893         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4894
4895         /* NON-IP protocol */
4896         wb_write[0] = 0x09000000;
4897         wb_write[1] = 0x55555555;
4898         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4899         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4900 }
4901
4902 /* some of the internal memories
4903  * are not directly readable from the driver
4904  * to test them we send debug packets
4905  */
4906 static int bnx2x_int_mem_test(struct bnx2x *bp)
4907 {
4908         int factor;
4909         int count, i;
4910         u32 val = 0;
4911
4912         if (CHIP_REV_IS_FPGA(bp))
4913                 factor = 120;
4914         else if (CHIP_REV_IS_EMUL(bp))
4915                 factor = 200;
4916         else
4917                 factor = 1;
4918
4919         DP(NETIF_MSG_HW, "start part1\n");
4920
4921         /* Disable inputs of parser neighbor blocks */
4922         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4923         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4924         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4925         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4926
4927         /*  Write 0 to parser credits for CFC search request */
4928         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4929
4930         /* send Ethernet packet */
4931         bnx2x_lb_pckt(bp);
4932
4933         /* TODO do i reset NIG statistic? */
4934         /* Wait until NIG register shows 1 packet of size 0x10 */
4935         count = 1000 * factor;
4936         while (count) {
4937
4938                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4939                 val = *bnx2x_sp(bp, wb_data[0]);
4940                 if (val == 0x10)
4941                         break;
4942
4943                 msleep(10);
4944                 count--;
4945         }
4946         if (val != 0x10) {
4947                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4948                 return -1;
4949         }
4950
4951         /* Wait until PRS register shows 1 packet */
4952         count = 1000 * factor;
4953         while (count) {
4954                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4955                 if (val == 1)
4956                         break;
4957
4958                 msleep(10);
4959                 count--;
4960         }
4961         if (val != 0x1) {
4962                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4963                 return -2;
4964         }
4965
4966         /* Reset and init BRB, PRS */
4967         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4968         msleep(50);
4969         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4970         msleep(50);
4971         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4972         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4973
4974         DP(NETIF_MSG_HW, "part2\n");
4975
4976         /* Disable inputs of parser neighbor blocks */
4977         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4978         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4979         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4980         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4981
4982         /* Write 0 to parser credits for CFC search request */
4983         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4984
4985         /* send 10 Ethernet packets */
4986         for (i = 0; i < 10; i++)
4987                 bnx2x_lb_pckt(bp);
4988
4989         /* Wait until NIG register shows 10 + 1
4990            packets of size 11*0x10 = 0xb0 */
4991         count = 1000 * factor;
4992         while (count) {
4993
4994                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4995                 val = *bnx2x_sp(bp, wb_data[0]);
4996                 if (val == 0xb0)
4997                         break;
4998
4999                 msleep(10);
5000                 count--;
5001         }
5002         if (val != 0xb0) {
5003                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5004                 return -3;
5005         }
5006
5007         /* Wait until PRS register shows 2 packets */
5008         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5009         if (val != 2)
5010                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5011
5012         /* Write 1 to parser credits for CFC search request */
5013         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5014
5015         /* Wait until PRS register shows 3 packets */
5016         msleep(10 * factor);
5017         /* Wait until NIG register shows 1 packet of size 0x10 */
5018         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019         if (val != 3)
5020                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5021
5022         /* clear NIG EOP FIFO */
5023         for (i = 0; i < 11; i++)
5024                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5025         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5026         if (val != 1) {
5027                 BNX2X_ERR("clear of NIG failed\n");
5028                 return -4;
5029         }
5030
5031         /* Reset and init BRB, PRS, NIG */
5032         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033         msleep(50);
5034         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035         msleep(50);
5036         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038 #ifndef BCM_ISCSI
5039         /* set NIC mode */
5040         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5041 #endif
5042
5043         /* Enable inputs of parser neighbor blocks */
5044         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5045         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5046         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5047         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5048
5049         DP(NETIF_MSG_HW, "done\n");
5050
5051         return 0; /* OK */
5052 }
5053
5054 static void enable_blocks_attention(struct bnx2x *bp)
5055 {
5056         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5057         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5058         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5059         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5060         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5061         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5062         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5063         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5064         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5065 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5066 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5067         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5068         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5069         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5070 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5071 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5072         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5073         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5074         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5075         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5076 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5077 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5078         if (CHIP_REV_IS_FPGA(bp))
5079                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5080         else
5081                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5082         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5083         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5084         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5085 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5086 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5087         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5088         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5089 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5090         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5091 }
5092
5093
5094 static int bnx2x_init_common(struct bnx2x *bp)
5095 {
5096         u32 val, i;
5097
5098         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5099
5100         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5102
5103         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5104         if (CHIP_IS_E1H(bp))
5105                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5106
5107         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5108         msleep(30);
5109         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5110
5111         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5112         if (CHIP_IS_E1(bp)) {
5113                 /* enable HW interrupt from PXP on USDM overflow
5114                    bit 16 on INT_MASK_0 */
5115                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5116         }
5117
5118         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5119         bnx2x_init_pxp(bp);
5120
5121 #ifdef __BIG_ENDIAN
5122         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5123         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5124         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5125         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5126         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5127         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5128
5129 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5130         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5131         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5132         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5133         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5134 #endif
5135
5136         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5137 #ifdef BCM_ISCSI
5138         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5139         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5140         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5141 #endif
5142
5143         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5144                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5145
5146         /* let the HW do it's magic ... */
5147         msleep(100);
5148         /* finish PXP init */
5149         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5150         if (val != 1) {
5151                 BNX2X_ERR("PXP2 CFG failed\n");
5152                 return -EBUSY;
5153         }
5154         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5155         if (val != 1) {
5156                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5157                 return -EBUSY;
5158         }
5159
5160         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5161         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5162
5163         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5164
5165         /* clean the DMAE memory */
5166         bp->dmae_ready = 1;
5167         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5168
5169         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5170         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5171         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5172         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5173
5174         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5175         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5176         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5177         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5178
5179         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5180         /* soft reset pulse */
5181         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5182         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5183
5184 #ifdef BCM_ISCSI
5185         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5186 #endif
5187
5188         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5189         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5190         if (!CHIP_REV_IS_SLOW(bp)) {
5191                 /* enable hw interrupt from doorbell Q */
5192                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5193         }
5194
5195         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5196         if (CHIP_REV_IS_SLOW(bp)) {
5197                 /* fix for emulation and FPGA for no pause */
5198                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5199                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5200                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5201                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5202         }
5203
5204         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5205         /* set NIC mode */
5206         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5207         if (CHIP_IS_E1H(bp))
5208                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5209
5210         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5211         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5212         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5213         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5214
5215         if (CHIP_IS_E1H(bp)) {
5216                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5217                                 STORM_INTMEM_SIZE_E1H/2);
5218                 bnx2x_init_fill(bp,
5219                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5220                                 0, STORM_INTMEM_SIZE_E1H/2);
5221                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5222                                 STORM_INTMEM_SIZE_E1H/2);
5223                 bnx2x_init_fill(bp,
5224                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5225                                 0, STORM_INTMEM_SIZE_E1H/2);
5226                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5227                                 STORM_INTMEM_SIZE_E1H/2);
5228                 bnx2x_init_fill(bp,
5229                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5230                                 0, STORM_INTMEM_SIZE_E1H/2);
5231                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5232                                 STORM_INTMEM_SIZE_E1H/2);
5233                 bnx2x_init_fill(bp,
5234                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5235                                 0, STORM_INTMEM_SIZE_E1H/2);
5236         } else { /* E1 */
5237                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5238                                 STORM_INTMEM_SIZE_E1);
5239                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5240                                 STORM_INTMEM_SIZE_E1);
5241                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5242                                 STORM_INTMEM_SIZE_E1);
5243                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5244                                 STORM_INTMEM_SIZE_E1);
5245         }
5246
5247         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5248         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5249         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5250         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5251
5252         /* sync semi rtc */
5253         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5254                0x80000000);
5255         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5256                0x80000000);
5257
5258         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5259         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5260         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5261
5262         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5263         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5264                 REG_WR(bp, i, 0xc0cac01a);
5265                 /* TODO: replace with something meaningful */
5266         }
5267         if (CHIP_IS_E1H(bp))
5268                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5269         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5270
5271         if (sizeof(union cdu_context) != 1024)
5272                 /* we currently assume that a context is 1024 bytes */
5273                 printk(KERN_ALERT PFX "please adjust the size of"
5274                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5275
5276         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5277         val = (4 << 24) + (0 << 12) + 1024;
5278         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5279         if (CHIP_IS_E1(bp)) {
5280                 /* !!! fix pxp client crdit until excel update */
5281                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5282                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5283         }
5284
5285         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5286         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5287
5288         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5289         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5290
5291         /* PXPCS COMMON comes here */
5292         /* Reset PCIE errors for debug */
5293         REG_WR(bp, 0x2814, 0xffffffff);
5294         REG_WR(bp, 0x3820, 0xffffffff);
5295
5296         /* EMAC0 COMMON comes here */
5297         /* EMAC1 COMMON comes here */
5298         /* DBU COMMON comes here */
5299         /* DBG COMMON comes here */
5300
5301         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5302         if (CHIP_IS_E1H(bp)) {
5303                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5304                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5305         }
5306
5307         if (CHIP_REV_IS_SLOW(bp))
5308                 msleep(200);
5309
5310         /* finish CFC init */
5311         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5312         if (val != 1) {
5313                 BNX2X_ERR("CFC LL_INIT failed\n");
5314                 return -EBUSY;
5315         }
5316         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5317         if (val != 1) {
5318                 BNX2X_ERR("CFC AC_INIT failed\n");
5319                 return -EBUSY;
5320         }
5321         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5322         if (val != 1) {
5323                 BNX2X_ERR("CFC CAM_INIT failed\n");
5324                 return -EBUSY;
5325         }
5326         REG_WR(bp, CFC_REG_DEBUG0, 0);
5327
5328         /* read NIG statistic
5329            to see if this is our first up since powerup */
5330         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5331         val = *bnx2x_sp(bp, wb_data[0]);
5332
5333         /* do internal memory self test */
5334         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5335                 BNX2X_ERR("internal mem self test failed\n");
5336                 return -EBUSY;
5337         }
5338
5339         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5340         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5341                 /* Fan failure is indicated by SPIO 5 */
5342                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5343                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5344
5345                 /* set to active low mode */
5346                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5347                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5348                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5349                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5350
5351                 /* enable interrupt to signal the IGU */
5352                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5353                 val |= (1 << MISC_REGISTERS_SPIO_5);
5354                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5355                 break;
5356
5357         default:
5358                 break;
5359         }
5360
5361         /* clear PXP2 attentions */
5362         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5363
5364         enable_blocks_attention(bp);
5365
5366         if (bp->flags & TPA_ENABLE_FLAG) {
5367                 struct tstorm_eth_tpa_exist tmp = {0};
5368
5369                 tmp.tpa_exist = 1;
5370
5371                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5372                        ((u32 *)&tmp)[0]);
5373                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5374                        ((u32 *)&tmp)[1]);
5375         }
5376
5377         if (!BP_NOMCP(bp)) {
5378                 bnx2x_acquire_phy_lock(bp);
5379                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5380                 bnx2x_release_phy_lock(bp);
5381         } else
5382                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5383
5384         return 0;
5385 }
5386
5387 static int bnx2x_init_port(struct bnx2x *bp)
5388 {
5389         int port = BP_PORT(bp);
5390         u32 val;
5391
5392         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5393
5394         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5395
5396         /* Port PXP comes here */
5397         /* Port PXP2 comes here */
5398 #ifdef BCM_ISCSI
5399         /* Port0  1
5400          * Port1  385 */
5401         i++;
5402         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5403         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5404         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5406
5407         /* Port0  2
5408          * Port1  386 */
5409         i++;
5410         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5411         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5412         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5414
5415         /* Port0  3
5416          * Port1  387 */
5417         i++;
5418         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5419         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5420         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5421         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5422 #endif
5423         /* Port CMs come here */
5424
5425         /* Port QM comes here */
5426 #ifdef BCM_ISCSI
5427         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5428         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5429
5430         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5431                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5432 #endif
5433         /* Port DQ comes here */
5434         /* Port BRB1 comes here */
5435         /* Port PRS comes here */
5436         /* Port TSDM comes here */
5437         /* Port CSDM comes here */
5438         /* Port USDM comes here */
5439         /* Port XSDM comes here */
5440         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5441                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5442         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5443                              port ? USEM_PORT1_END : USEM_PORT0_END);
5444         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5445                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5446         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5447                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5448         /* Port UPB comes here */
5449         /* Port XPB comes here */
5450
5451         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5452                              port ? PBF_PORT1_END : PBF_PORT0_END);
5453
5454         /* configure PBF to work without PAUSE mtu 9000 */
5455         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5456
5457         /* update threshold */
5458         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5459         /* update init credit */
5460         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5461
5462         /* probe changes */
5463         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5464         msleep(5);
5465         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5466
5467 #ifdef BCM_ISCSI
5468         /* tell the searcher where the T2 table is */
5469         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5470
5471         wb_write[0] = U64_LO(bp->t2_mapping);
5472         wb_write[1] = U64_HI(bp->t2_mapping);
5473         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5474         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5475         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5476         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5477
5478         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5479         /* Port SRCH comes here */
5480 #endif
5481         /* Port CDU comes here */
5482         /* Port CFC comes here */
5483
5484         if (CHIP_IS_E1(bp)) {
5485                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5486                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5487         }
5488         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5489                              port ? HC_PORT1_END : HC_PORT0_END);
5490
5491         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5492                                     MISC_AEU_PORT0_START,
5493                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5494         /* init aeu_mask_attn_func_0/1:
5495          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5496          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5497          *             bits 4-7 are used for "per vn group attention" */
5498         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5499                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5500
5501         /* Port PXPCS comes here */
5502         /* Port EMAC0 comes here */
5503         /* Port EMAC1 comes here */
5504         /* Port DBU comes here */
5505         /* Port DBG comes here */
5506         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5507                              port ? NIG_PORT1_END : NIG_PORT0_END);
5508
5509         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5510
5511         if (CHIP_IS_E1H(bp)) {
5512                 u32 wsum;
5513                 struct cmng_struct_per_port m_cmng_port;
5514                 int vn;
5515
5516                 /* 0x2 disable e1hov, 0x1 enable */
5517                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5518                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5519
5520                 /* Init RATE SHAPING and FAIRNESS contexts.
5521                    Initialize as if there is 10G link. */
5522                 wsum = bnx2x_calc_vn_wsum(bp);
5523                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5524                 if (IS_E1HMF(bp))
5525                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5526                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5527                                         wsum, 10000, &m_cmng_port);
5528         }
5529
5530         /* Port MCP comes here */
5531         /* Port DMAE comes here */
5532
5533         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5534         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5535                 /* add SPIO 5 to group 0 */
5536                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5537                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5538                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5539                 break;
5540
5541         default:
5542                 break;
5543         }
5544
5545         bnx2x__link_reset(bp);
5546
5547         return 0;
5548 }
5549
5550 #define ILT_PER_FUNC            (768/2)
5551 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5552 /* the phys address is shifted right 12 bits and has an added
5553    1=valid bit added to the 53rd bit
5554    then since this is a wide register(TM)
5555    we split it into two 32 bit writes
5556  */
5557 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5558 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5559 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5560 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5561
5562 #define CNIC_ILT_LINES          0
5563
5564 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5565 {
5566         int reg;
5567
5568         if (CHIP_IS_E1H(bp))
5569                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5570         else /* E1 */
5571                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5572
5573         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5574 }
5575
5576 static int bnx2x_init_func(struct bnx2x *bp)
5577 {
5578         int port = BP_PORT(bp);
5579         int func = BP_FUNC(bp);
5580         int i;
5581
5582         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5583
5584         i = FUNC_ILT_BASE(func);
5585
5586         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5587         if (CHIP_IS_E1H(bp)) {
5588                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5589                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5590         } else /* E1 */
5591                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5592                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5593
5594
5595         if (CHIP_IS_E1H(bp)) {
5596                 for (i = 0; i < 9; i++)
5597                         bnx2x_init_block(bp,
5598                                          cm_start[func][i], cm_end[func][i]);
5599
5600                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5601                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5602         }
5603
5604         /* HC init per function */
5605         if (CHIP_IS_E1H(bp)) {
5606                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5607
5608                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5609                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5610         }
5611         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5612
5613         if (CHIP_IS_E1H(bp))
5614                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5615
5616         /* Reset PCIE errors for debug */
5617         REG_WR(bp, 0x2114, 0xffffffff);
5618         REG_WR(bp, 0x2120, 0xffffffff);
5619
5620         return 0;
5621 }
5622
5623 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5624 {
5625         int i, rc = 0;
5626
5627         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5628            BP_FUNC(bp), load_code);
5629
5630         bp->dmae_ready = 0;
5631         mutex_init(&bp->dmae_mutex);
5632         bnx2x_gunzip_init(bp);
5633
5634         switch (load_code) {
5635         case FW_MSG_CODE_DRV_LOAD_COMMON:
5636                 rc = bnx2x_init_common(bp);
5637                 if (rc)
5638                         goto init_hw_err;
5639                 /* no break */
5640
5641         case FW_MSG_CODE_DRV_LOAD_PORT:
5642                 bp->dmae_ready = 1;
5643                 rc = bnx2x_init_port(bp);
5644                 if (rc)
5645                         goto init_hw_err;
5646                 /* no break */
5647
5648         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5649                 bp->dmae_ready = 1;
5650                 rc = bnx2x_init_func(bp);
5651                 if (rc)
5652                         goto init_hw_err;
5653                 break;
5654
5655         default:
5656                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5657                 break;
5658         }
5659
5660         if (!BP_NOMCP(bp)) {
5661                 int func = BP_FUNC(bp);
5662
5663                 bp->fw_drv_pulse_wr_seq =
5664                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5665                                  DRV_PULSE_SEQ_MASK);
5666                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5667                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5668                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5669         } else
5670                 bp->func_stx = 0;
5671
5672         /* this needs to be done before gunzip end */
5673         bnx2x_zero_def_sb(bp);
5674         for_each_queue(bp, i)
5675                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5676
5677 init_hw_err:
5678         bnx2x_gunzip_end(bp);
5679
5680         return rc;
5681 }
5682
5683 /* send the MCP a request, block until there is a reply */
5684 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5685 {
5686         int func = BP_FUNC(bp);
5687         u32 seq = ++bp->fw_seq;
5688         u32 rc = 0;
5689         u32 cnt = 1;
5690         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5691
5692         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5693         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5694
5695         do {
5696                 /* let the FW do it's magic ... */
5697                 msleep(delay);
5698
5699                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5700
5701                 /* Give the FW up to 2 second (200*10ms) */
5702         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5703
5704         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5705            cnt*delay, rc, seq);
5706
5707         /* is this a reply to our command? */
5708         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5709                 rc &= FW_MSG_CODE_MASK;
5710
5711         } else {
5712                 /* FW BUG! */
5713                 BNX2X_ERR("FW failed to respond!\n");
5714                 bnx2x_fw_dump(bp);
5715                 rc = 0;
5716         }
5717
5718         return rc;
5719 }
5720
5721 static void bnx2x_free_mem(struct bnx2x *bp)
5722 {
5723
5724 #define BNX2X_PCI_FREE(x, y, size) \
5725         do { \
5726                 if (x) { \
5727                         pci_free_consistent(bp->pdev, size, x, y); \
5728                         x = NULL; \
5729                         y = 0; \
5730                 } \
5731         } while (0)
5732
5733 #define BNX2X_FREE(x) \
5734         do { \
5735                 if (x) { \
5736                         vfree(x); \
5737                         x = NULL; \
5738                 } \
5739         } while (0)
5740
5741         int i;
5742
5743         /* fastpath */
5744         for_each_queue(bp, i) {
5745
5746                 /* Status blocks */
5747                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5748                                bnx2x_fp(bp, i, status_blk_mapping),
5749                                sizeof(struct host_status_block) +
5750                                sizeof(struct eth_tx_db_data));
5751
5752                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5753                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5754                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5755                                bnx2x_fp(bp, i, tx_desc_mapping),
5756                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5757
5758                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5760                                bnx2x_fp(bp, i, rx_desc_mapping),
5761                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5762
5763                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5764                                bnx2x_fp(bp, i, rx_comp_mapping),
5765                                sizeof(struct eth_fast_path_rx_cqe) *
5766                                NUM_RCQ_BD);
5767
5768                 /* SGE ring */
5769                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5770                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5771                                bnx2x_fp(bp, i, rx_sge_mapping),
5772                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5773         }
5774         /* end of fastpath */
5775
5776         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5777                        sizeof(struct host_def_status_block));
5778
5779         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5780                        sizeof(struct bnx2x_slowpath));
5781
5782 #ifdef BCM_ISCSI
5783         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5784         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5785         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5786         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5787 #endif
5788         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5789
5790 #undef BNX2X_PCI_FREE
5791 #undef BNX2X_KFREE
5792 }
5793
5794 static int bnx2