bnx2x: New FW
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738         u16 tx_cons_sb;
739
740         /* Tell compiler that status block fields can change */
741         barrier();
742         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743         return (fp->tx_pkt_cons != tx_cons_sb);
744 }
745
746 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747 {
748         /* Tell compiler that consumer and producer can change */
749         barrier();
750         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
752 }
753
754 /* free skb in the packet ring at pos idx
755  * return idx of last bd freed
756  */
757 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758                              u16 idx)
759 {
760         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761         struct eth_tx_bd *tx_bd;
762         struct sk_buff *skb = tx_buf->skb;
763         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
764         int nbd;
765
766         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
767            idx, tx_buf, skb);
768
769         /* unmap first bd */
770         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771         tx_bd = &fp->tx_desc_ring[bd_idx];
772         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
774
775         nbd = le16_to_cpu(tx_bd->nbd) - 1;
776         new_cons = nbd + tx_buf->first_bd;
777 #ifdef BNX2X_STOP_ON_ERROR
778         if (nbd > (MAX_SKB_FRAGS + 2)) {
779                 BNX2X_ERR("BAD nbd!\n");
780                 bnx2x_panic();
781         }
782 #endif
783
784         /* Skip a parse bd and the TSO split header bd
785            since they have no mapping */
786         if (nbd)
787                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788
789         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790                                            ETH_TX_BD_FLAGS_TCP_CSUM |
791                                            ETH_TX_BD_FLAGS_SW_LSO)) {
792                 if (--nbd)
793                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794                 tx_bd = &fp->tx_desc_ring[bd_idx];
795                 /* is this a TSO split header bd? */
796                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
797                         if (--nbd)
798                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799                 }
800         }
801
802         /* now free frags */
803         while (nbd > 0) {
804
805                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806                 tx_bd = &fp->tx_desc_ring[bd_idx];
807                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809                 if (--nbd)
810                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
811         }
812
813         /* release skb */
814         WARN_ON(!skb);
815         dev_kfree_skb(skb);
816         tx_buf->first_bd = 0;
817         tx_buf->skb = NULL;
818
819         return new_cons;
820 }
821
822 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
823 {
824         s16 used;
825         u16 prod;
826         u16 cons;
827
828         barrier(); /* Tell compiler that prod and cons can change */
829         prod = fp->tx_bd_prod;
830         cons = fp->tx_bd_cons;
831
832         /* NUM_TX_RINGS = number of "next-page" entries
833            It will be used as a threshold */
834         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
835
836 #ifdef BNX2X_STOP_ON_ERROR
837         WARN_ON(used < 0);
838         WARN_ON(used > fp->bp->tx_ring_size);
839         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
840 #endif
841
842         return (s16)(fp->bp->tx_ring_size) - used;
843 }
844
845 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
846 {
847         struct bnx2x *bp = fp->bp;
848         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
849         int done = 0;
850
851 #ifdef BNX2X_STOP_ON_ERROR
852         if (unlikely(bp->panic))
853                 return;
854 #endif
855
856         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857         sw_cons = fp->tx_pkt_cons;
858
859         while (sw_cons != hw_cons) {
860                 u16 pkt_cons;
861
862                 pkt_cons = TX_BD(sw_cons);
863
864                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865
866                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
867                    hw_cons, sw_cons, pkt_cons);
868
869 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
870                         rmb();
871                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872                 }
873 */
874                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875                 sw_cons++;
876                 done++;
877
878                 if (done == work)
879                         break;
880         }
881
882         fp->tx_pkt_cons = sw_cons;
883         fp->tx_bd_cons = bd_cons;
884
885         /* Need to make the tx_cons update visible to start_xmit()
886          * before checking for netif_queue_stopped().  Without the
887          * memory barrier, there is a small possibility that start_xmit()
888          * will miss it and cause the queue to be stopped forever.
889          */
890         smp_mb();
891
892         /* TBD need a thresh? */
893         if (unlikely(netif_queue_stopped(bp->dev))) {
894
895                 netif_tx_lock(bp->dev);
896
897                 if (netif_queue_stopped(bp->dev) &&
898                     (bp->state == BNX2X_STATE_OPEN) &&
899                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900                         netif_wake_queue(bp->dev);
901
902                 netif_tx_unlock(bp->dev);
903         }
904 }
905
906
907 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908                            union eth_rx_cqe *rr_cqe)
909 {
910         struct bnx2x *bp = fp->bp;
911         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913
914         DP(BNX2X_MSG_SP,
915            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
916            FP_IDX(fp), cid, command, bp->state,
917            rr_cqe->ramrod_cqe.ramrod_type);
918
919         bp->spq_left++;
920
921         if (FP_IDX(fp)) {
922                 switch (command | fp->state) {
923                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924                                                 BNX2X_FP_STATE_OPENING):
925                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926                            cid);
927                         fp->state = BNX2X_FP_STATE_OPEN;
928                         break;
929
930                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932                            cid);
933                         fp->state = BNX2X_FP_STATE_HALTED;
934                         break;
935
936                 default:
937                         BNX2X_ERR("unexpected MC reply (%d)  "
938                                   "fp->state is %x\n", command, fp->state);
939                         break;
940                 }
941                 mb(); /* force bnx2x_wait_ramrod() to see the change */
942                 return;
943         }
944
945         switch (command | bp->state) {
946         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948                 bp->state = BNX2X_STATE_OPEN;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954                 fp->state = BNX2X_FP_STATE_HALTED;
955                 break;
956
957         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
958                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
959                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
960                 break;
961
962
963         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
966                 bp->set_mac_pending = 0;
967                 break;
968
969         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
970                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
971                 break;
972
973         default:
974                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
975                           command, bp->state);
976                 break;
977         }
978         mb(); /* force bnx2x_wait_ramrod() to see the change */
979 }
980
981 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982                                      struct bnx2x_fastpath *fp, u16 index)
983 {
984         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985         struct page *page = sw_buf->page;
986         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988         /* Skip "next page" elements */
989         if (!page)
990                 return;
991
992         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994         __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996         sw_buf->page = NULL;
997         sge->addr_hi = 0;
998         sge->addr_lo = 0;
999 }
1000
1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002                                            struct bnx2x_fastpath *fp, int last)
1003 {
1004         int i;
1005
1006         for (i = 0; i < last; i++)
1007                 bnx2x_free_rx_sge(bp, fp, i);
1008 }
1009
1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011                                      struct bnx2x_fastpath *fp, u16 index)
1012 {
1013         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016         dma_addr_t mapping;
1017
1018         if (unlikely(page == NULL))
1019                 return -ENOMEM;
1020
1021         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1022                                PCI_DMA_FROMDEVICE);
1023         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025                 return -ENOMEM;
1026         }
1027
1028         sw_buf->page = page;
1029         pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034         return 0;
1035 }
1036
1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038                                      struct bnx2x_fastpath *fp, u16 index)
1039 {
1040         struct sk_buff *skb;
1041         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043         dma_addr_t mapping;
1044
1045         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046         if (unlikely(skb == NULL))
1047                 return -ENOMEM;
1048
1049         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050                                  PCI_DMA_FROMDEVICE);
1051         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052                 dev_kfree_skb(skb);
1053                 return -ENOMEM;
1054         }
1055
1056         rx_buf->skb = skb;
1057         pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062         return 0;
1063 }
1064
1065 /* note that we are not allocating a new skb,
1066  * we are just moving one from cons to prod
1067  * we are not creating a new mapping,
1068  * so there is no need to check for dma_mapping_error().
1069  */
1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071                                struct sk_buff *skb, u16 cons, u16 prod)
1072 {
1073         struct bnx2x *bp = fp->bp;
1074         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079         pci_dma_sync_single_for_device(bp->pdev,
1080                                        pci_unmap_addr(cons_rx_buf, mapping),
1081                                        bp->rx_offset + RX_COPY_THRESH,
1082                                        PCI_DMA_FROMDEVICE);
1083
1084         prod_rx_buf->skb = cons_rx_buf->skb;
1085         pci_unmap_addr_set(prod_rx_buf, mapping,
1086                            pci_unmap_addr(cons_rx_buf, mapping));
1087         *prod_bd = *cons_bd;
1088 }
1089
1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091                                              u16 idx)
1092 {
1093         u16 last_max = fp->last_max_sge;
1094
1095         if (SUB_S16(idx, last_max) > 0)
1096                 fp->last_max_sge = idx;
1097 }
1098
1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100 {
1101         int i, j;
1102
1103         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104                 int idx = RX_SGE_CNT * i - 1;
1105
1106                 for (j = 0; j < 2; j++) {
1107                         SGE_MASK_CLEAR_BIT(fp, idx);
1108                         idx--;
1109                 }
1110         }
1111 }
1112
1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114                                   struct eth_fast_path_rx_cqe *fp_cqe)
1115 {
1116         struct bnx2x *bp = fp->bp;
1117         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1119                       SGE_PAGE_SHIFT;
1120         u16 last_max, last_elem, first_elem;
1121         u16 delta = 0;
1122         u16 i;
1123
1124         if (!sge_len)
1125                 return;
1126
1127         /* First mark all used pages */
1128         for (i = 0; i < sge_len; i++)
1129                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134         /* Here we assume that the last SGE index is the biggest */
1135         prefetch((void *)(fp->sge_mask));
1136         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138         last_max = RX_SGE(fp->last_max_sge);
1139         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142         /* If ring is not full */
1143         if (last_elem + 1 != first_elem)
1144                 last_elem++;
1145
1146         /* Now update the prod */
1147         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148                 if (likely(fp->sge_mask[i]))
1149                         break;
1150
1151                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152                 delta += RX_SGE_MASK_ELEM_SZ;
1153         }
1154
1155         if (delta > 0) {
1156                 fp->rx_sge_prod += delta;
1157                 /* clear page-end entries */
1158                 bnx2x_clear_sge_mask_next_elems(fp);
1159         }
1160
1161         DP(NETIF_MSG_RX_STATUS,
1162            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1163            fp->last_max_sge, fp->rx_sge_prod);
1164 }
1165
1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167 {
1168         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169         memset(fp->sge_mask, 0xff,
1170                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
1172         /* Clear the two last indices in the page to 1:
1173            these are the indices that correspond to the "next" element,
1174            hence will never be indicated and should be removed from
1175            the calculations. */
1176         bnx2x_clear_sge_mask_next_elems(fp);
1177 }
1178
1179 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180                             struct sk_buff *skb, u16 cons, u16 prod)
1181 {
1182         struct bnx2x *bp = fp->bp;
1183         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186         dma_addr_t mapping;
1187
1188         /* move empty skb from pool to prod and map it */
1189         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194         /* move partial skb from cons to pool (don't unmap yet) */
1195         fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197         /* mark bin state as start - print error if current state != stop */
1198         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201         fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203         /* point prod_bd to new skb */
1204         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207 #ifdef BNX2X_STOP_ON_ERROR
1208         fp->tpa_queue_used |= (1 << queue);
1209 #ifdef __powerpc64__
1210         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211 #else
1212         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213 #endif
1214            fp->tpa_queue_used);
1215 #endif
1216 }
1217
1218 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219                                struct sk_buff *skb,
1220                                struct eth_fast_path_rx_cqe *fp_cqe,
1221                                u16 cqe_idx)
1222 {
1223         struct sw_rx_page *rx_pg, old_rx_pg;
1224         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225         u32 i, frag_len, frag_size, pages;
1226         int err;
1227         int j;
1228
1229         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1230         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1231
1232         /* This is needed in order to enable forwarding support */
1233         if (frag_size)
1234                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1235                                                max(frag_size, (u32)len_on_bd));
1236
1237 #ifdef BNX2X_STOP_ON_ERROR
1238         if (pages >
1239             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1240                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241                           pages, cqe_idx);
1242                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1243                           fp_cqe->pkt_len, len_on_bd);
1244                 bnx2x_panic();
1245                 return -EINVAL;
1246         }
1247 #endif
1248
1249         /* Run through the SGL and compose the fragmented skb */
1250         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253                 /* FW gives the indices of the SGE as if the ring is an array
1254                    (meaning that "next" element will consume 2 indices) */
1255                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1256                 rx_pg = &fp->rx_page_ring[sge_idx];
1257                 old_rx_pg = *rx_pg;
1258
1259                 /* If we fail to allocate a substitute page, we simply stop
1260                    where we are and drop the whole packet */
1261                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262                 if (unlikely(err)) {
1263                         bp->eth_stats.rx_skb_alloc_failed++;
1264                         return err;
1265                 }
1266
1267                 /* Unmap the page as we r going to pass it to the stack */
1268                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1269                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1270
1271                 /* Add one frag and update the appropriate fields in the skb */
1272                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273
1274                 skb->data_len += frag_len;
1275                 skb->truesize += frag_len;
1276                 skb->len += frag_len;
1277
1278                 frag_size -= frag_len;
1279         }
1280
1281         return 0;
1282 }
1283
1284 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286                            u16 cqe_idx)
1287 {
1288         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289         struct sk_buff *skb = rx_buf->skb;
1290         /* alloc new skb */
1291         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292
1293         /* Unmap skb in the pool anyway, as we are going to change
1294            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295            fails. */
1296         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1297                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1298
1299         if (likely(new_skb)) {
1300                 /* fix ip xsum and give it to the stack */
1301                 /* (no need to map the new skb) */
1302 #ifdef BCM_VLAN
1303                 int is_vlan_cqe =
1304                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305                          PARSING_FLAGS_VLAN);
1306                 int is_not_hwaccel_vlan_cqe =
1307                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308 #endif
1309
1310                 prefetch(skb);
1311                 prefetch(((char *)(skb)) + 128);
1312
1313 #ifdef BNX2X_STOP_ON_ERROR
1314                 if (pad + len > bp->rx_buf_size) {
1315                         BNX2X_ERR("skb_put is about to fail...  "
1316                                   "pad %d  len %d  rx_buf_size %d\n",
1317                                   pad, len, bp->rx_buf_size);
1318                         bnx2x_panic();
1319                         return;
1320                 }
1321 #endif
1322
1323                 skb_reserve(skb, pad);
1324                 skb_put(skb, len);
1325
1326                 skb->protocol = eth_type_trans(skb, bp->dev);
1327                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1328                 skb_record_rx_queue(skb, queue);
1329
1330                 {
1331                         struct iphdr *iph;
1332
1333                         iph = (struct iphdr *)skb->data;
1334 #ifdef BCM_VLAN
1335                         /* If there is no Rx VLAN offloading -
1336                            take VLAN tag into an account */
1337                         if (unlikely(is_not_hwaccel_vlan_cqe))
1338                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1339 #endif
1340                         iph->check = 0;
1341                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1342                 }
1343
1344                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1345                                          &cqe->fast_path_cqe, cqe_idx)) {
1346 #ifdef BCM_VLAN
1347                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1348                             (!is_not_hwaccel_vlan_cqe))
1349                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1350                                                 le16_to_cpu(cqe->fast_path_cqe.
1351                                                             vlan_tag));
1352                         else
1353 #endif
1354                                 netif_receive_skb(skb);
1355                 } else {
1356                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1357                            " - dropping packet!\n");
1358                         dev_kfree_skb(skb);
1359                 }
1360
1361
1362                 /* put new skb in bin */
1363                 fp->tpa_pool[queue].skb = new_skb;
1364
1365         } else {
1366                 /* else drop the packet and keep the buffer in the bin */
1367                 DP(NETIF_MSG_RX_STATUS,
1368                    "Failed to allocate new skb - dropping packet!\n");
1369                 bp->eth_stats.rx_skb_alloc_failed++;
1370         }
1371
1372         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1373 }
1374
1375 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1376                                         struct bnx2x_fastpath *fp,
1377                                         u16 bd_prod, u16 rx_comp_prod,
1378                                         u16 rx_sge_prod)
1379 {
1380         struct ustorm_eth_rx_producers rx_prods = {0};
1381         int i;
1382
1383         /* Update producers */
1384         rx_prods.bd_prod = bd_prod;
1385         rx_prods.cqe_prod = rx_comp_prod;
1386         rx_prods.sge_prod = rx_sge_prod;
1387
1388         /*
1389          * Make sure that the BD and SGE data is updated before updating the
1390          * producers since FW might read the BD/SGE right after the producer
1391          * is updated.
1392          * This is only applicable for weak-ordered memory model archs such
1393          * as IA-64. The following barrier is also mandatory since FW will
1394          * assumes BDs must have buffers.
1395          */
1396         wmb();
1397
1398         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1399                 REG_WR(bp, BAR_USTRORM_INTMEM +
1400                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1401                        ((u32 *)&rx_prods)[i]);
1402
1403         mmiowb(); /* keep prod updates ordered */
1404
1405         DP(NETIF_MSG_RX_STATUS,
1406            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1407            bd_prod, rx_comp_prod, rx_sge_prod);
1408 }
1409
1410 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1411 {
1412         struct bnx2x *bp = fp->bp;
1413         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1414         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1415         int rx_pkt = 0;
1416
1417 #ifdef BNX2X_STOP_ON_ERROR
1418         if (unlikely(bp->panic))
1419                 return 0;
1420 #endif
1421
1422         /* CQ "next element" is of the size of the regular element,
1423            that's why it's ok here */
1424         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1425         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1426                 hw_comp_cons++;
1427
1428         bd_cons = fp->rx_bd_cons;
1429         bd_prod = fp->rx_bd_prod;
1430         bd_prod_fw = bd_prod;
1431         sw_comp_cons = fp->rx_comp_cons;
1432         sw_comp_prod = fp->rx_comp_prod;
1433
1434         /* Memory barrier necessary as speculative reads of the rx
1435          * buffer can be ahead of the index in the status block
1436          */
1437         rmb();
1438
1439         DP(NETIF_MSG_RX_STATUS,
1440            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1441            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1442
1443         while (sw_comp_cons != hw_comp_cons) {
1444                 struct sw_rx_bd *rx_buf = NULL;
1445                 struct sk_buff *skb;
1446                 union eth_rx_cqe *cqe;
1447                 u8 cqe_fp_flags;
1448                 u16 len, pad;
1449
1450                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1451                 bd_prod = RX_BD(bd_prod);
1452                 bd_cons = RX_BD(bd_cons);
1453
1454                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1455                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1456
1457                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1458                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1459                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1460                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1461                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1462                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1463
1464                 /* is this a slowpath msg? */
1465                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1466                         bnx2x_sp_event(fp, cqe);
1467                         goto next_cqe;
1468
1469                 /* this is an rx packet */
1470                 } else {
1471                         rx_buf = &fp->rx_buf_ring[bd_cons];
1472                         skb = rx_buf->skb;
1473                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1474                         pad = cqe->fast_path_cqe.placement_offset;
1475
1476                         /* If CQE is marked both TPA_START and TPA_END
1477                            it is a non-TPA CQE */
1478                         if ((!fp->disable_tpa) &&
1479                             (TPA_TYPE(cqe_fp_flags) !=
1480                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1481                                 u16 queue = cqe->fast_path_cqe.queue_index;
1482
1483                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1484                                         DP(NETIF_MSG_RX_STATUS,
1485                                            "calling tpa_start on queue %d\n",
1486                                            queue);
1487
1488                                         bnx2x_tpa_start(fp, queue, skb,
1489                                                         bd_cons, bd_prod);
1490                                         goto next_rx;
1491                                 }
1492
1493                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1494                                         DP(NETIF_MSG_RX_STATUS,
1495                                            "calling tpa_stop on queue %d\n",
1496                                            queue);
1497
1498                                         if (!BNX2X_RX_SUM_FIX(cqe))
1499                                                 BNX2X_ERR("STOP on none TCP "
1500                                                           "data\n");
1501
1502                                         /* This is a size of the linear data
1503                                            on this skb */
1504                                         len = le16_to_cpu(cqe->fast_path_cqe.
1505                                                                 len_on_bd);
1506                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1507                                                     len, cqe, comp_ring_cons);
1508 #ifdef BNX2X_STOP_ON_ERROR
1509                                         if (bp->panic)
1510                                                 return -EINVAL;
1511 #endif
1512
1513                                         bnx2x_update_sge_prod(fp,
1514                                                         &cqe->fast_path_cqe);
1515                                         goto next_cqe;
1516                                 }
1517                         }
1518
1519                         pci_dma_sync_single_for_device(bp->pdev,
1520                                         pci_unmap_addr(rx_buf, mapping),
1521                                                        pad + RX_COPY_THRESH,
1522                                                        PCI_DMA_FROMDEVICE);
1523                         prefetch(skb);
1524                         prefetch(((char *)(skb)) + 128);
1525
1526                         /* is this an error packet? */
1527                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1528                                 DP(NETIF_MSG_RX_ERR,
1529                                    "ERROR  flags %x  rx packet %u\n",
1530                                    cqe_fp_flags, sw_comp_cons);
1531                                 bp->eth_stats.rx_err_discard_pkt++;
1532                                 goto reuse_rx;
1533                         }
1534
1535                         /* Since we don't have a jumbo ring
1536                          * copy small packets if mtu > 1500
1537                          */
1538                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1539                             (len <= RX_COPY_THRESH)) {
1540                                 struct sk_buff *new_skb;
1541
1542                                 new_skb = netdev_alloc_skb(bp->dev,
1543                                                            len + pad);
1544                                 if (new_skb == NULL) {
1545                                         DP(NETIF_MSG_RX_ERR,
1546                                            "ERROR  packet dropped "
1547                                            "because of alloc failure\n");
1548                                         bp->eth_stats.rx_skb_alloc_failed++;
1549                                         goto reuse_rx;
1550                                 }
1551
1552                                 /* aligned copy */
1553                                 skb_copy_from_linear_data_offset(skb, pad,
1554                                                     new_skb->data + pad, len);
1555                                 skb_reserve(new_skb, pad);
1556                                 skb_put(new_skb, len);
1557
1558                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1559
1560                                 skb = new_skb;
1561
1562                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1563                                 pci_unmap_single(bp->pdev,
1564                                         pci_unmap_addr(rx_buf, mapping),
1565                                                  bp->rx_buf_size,
1566                                                  PCI_DMA_FROMDEVICE);
1567                                 skb_reserve(skb, pad);
1568                                 skb_put(skb, len);
1569
1570                         } else {
1571                                 DP(NETIF_MSG_RX_ERR,
1572                                    "ERROR  packet dropped because "
1573                                    "of alloc failure\n");
1574                                 bp->eth_stats.rx_skb_alloc_failed++;
1575 reuse_rx:
1576                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1577                                 goto next_rx;
1578                         }
1579
1580                         skb->protocol = eth_type_trans(skb, bp->dev);
1581
1582                         skb->ip_summed = CHECKSUM_NONE;
1583                         if (bp->rx_csum) {
1584                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1585                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1586                                 else
1587                                         bp->eth_stats.hw_csum_err++;
1588                         }
1589                 }
1590
1591 #ifdef BCM_VLAN
1592                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1593                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1594                      PARSING_FLAGS_VLAN))
1595                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1596                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1597                 else
1598 #endif
1599                         netif_receive_skb(skb);
1600
1601
1602 next_rx:
1603                 rx_buf->skb = NULL;
1604
1605                 bd_cons = NEXT_RX_IDX(bd_cons);
1606                 bd_prod = NEXT_RX_IDX(bd_prod);
1607                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1608                 rx_pkt++;
1609 next_cqe:
1610                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1611                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1612
1613                 if (rx_pkt == budget)
1614                         break;
1615         } /* while */
1616
1617         fp->rx_bd_cons = bd_cons;
1618         fp->rx_bd_prod = bd_prod_fw;
1619         fp->rx_comp_cons = sw_comp_cons;
1620         fp->rx_comp_prod = sw_comp_prod;
1621
1622         /* Update producers */
1623         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1624                              fp->rx_sge_prod);
1625
1626         fp->rx_pkt += rx_pkt;
1627         fp->rx_calls++;
1628
1629         return rx_pkt;
1630 }
1631
1632 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1633 {
1634         struct bnx2x_fastpath *fp = fp_cookie;
1635         struct bnx2x *bp = fp->bp;
1636         int index = FP_IDX(fp);
1637
1638         /* Return here if interrupt is disabled */
1639         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641                 return IRQ_HANDLED;
1642         }
1643
1644         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1645            index, FP_SB_ID(fp));
1646         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1647
1648 #ifdef BNX2X_STOP_ON_ERROR
1649         if (unlikely(bp->panic))
1650                 return IRQ_HANDLED;
1651 #endif
1652
1653         prefetch(fp->rx_cons_sb);
1654         prefetch(fp->tx_cons_sb);
1655         prefetch(&fp->status_blk->c_status_block.status_block_index);
1656         prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658         napi_schedule(&bnx2x_fp(bp, index, napi));
1659
1660         return IRQ_HANDLED;
1661 }
1662
1663 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1664 {
1665         struct net_device *dev = dev_instance;
1666         struct bnx2x *bp = netdev_priv(dev);
1667         u16 status = bnx2x_ack_int(bp);
1668         u16 mask;
1669
1670         /* Return here if interrupt is shared and it's not for us */
1671         if (unlikely(status == 0)) {
1672                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1673                 return IRQ_NONE;
1674         }
1675         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1676
1677         /* Return here if interrupt is disabled */
1678         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1679                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1680                 return IRQ_HANDLED;
1681         }
1682
1683 #ifdef BNX2X_STOP_ON_ERROR
1684         if (unlikely(bp->panic))
1685                 return IRQ_HANDLED;
1686 #endif
1687
1688         mask = 0x2 << bp->fp[0].sb_id;
1689         if (status & mask) {
1690                 struct bnx2x_fastpath *fp = &bp->fp[0];
1691
1692                 prefetch(fp->rx_cons_sb);
1693                 prefetch(fp->tx_cons_sb);
1694                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1695                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1696
1697                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1698
1699                 status &= ~mask;
1700         }
1701
1702
1703         if (unlikely(status & 0x1)) {
1704                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1705
1706                 status &= ~0x1;
1707                 if (!status)
1708                         return IRQ_HANDLED;
1709         }
1710
1711         if (status)
1712                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1713                    status);
1714
1715         return IRQ_HANDLED;
1716 }
1717
1718 /* end of fast path */
1719
1720 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1721
1722 /* Link */
1723
1724 /*
1725  * General service functions
1726  */
1727
1728 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1729 {
1730         u32 lock_status;
1731         u32 resource_bit = (1 << resource);
1732         int func = BP_FUNC(bp);
1733         u32 hw_lock_control_reg;
1734         int cnt;
1735
1736         /* Validating that the resource is within range */
1737         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1738                 DP(NETIF_MSG_HW,
1739                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1740                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1741                 return -EINVAL;
1742         }
1743
1744         if (func <= 5) {
1745                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1746         } else {
1747                 hw_lock_control_reg =
1748                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1749         }
1750
1751         /* Validating that the resource is not already taken */
1752         lock_status = REG_RD(bp, hw_lock_control_reg);
1753         if (lock_status & resource_bit) {
1754                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1755                    lock_status, resource_bit);
1756                 return -EEXIST;
1757         }
1758
1759         /* Try for 5 second every 5ms */
1760         for (cnt = 0; cnt < 1000; cnt++) {
1761                 /* Try to acquire the lock */
1762                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1763                 lock_status = REG_RD(bp, hw_lock_control_reg);
1764                 if (lock_status & resource_bit)
1765                         return 0;
1766
1767                 msleep(5);
1768         }
1769         DP(NETIF_MSG_HW, "Timeout\n");
1770         return -EAGAIN;
1771 }
1772
1773 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1774 {
1775         u32 lock_status;
1776         u32 resource_bit = (1 << resource);
1777         int func = BP_FUNC(bp);
1778         u32 hw_lock_control_reg;
1779
1780         /* Validating that the resource is within range */
1781         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1782                 DP(NETIF_MSG_HW,
1783                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1784                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1785                 return -EINVAL;
1786         }
1787
1788         if (func <= 5) {
1789                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1790         } else {
1791                 hw_lock_control_reg =
1792                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1793         }
1794
1795         /* Validating that the resource is currently taken */
1796         lock_status = REG_RD(bp, hw_lock_control_reg);
1797         if (!(lock_status & resource_bit)) {
1798                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1799                    lock_status, resource_bit);
1800                 return -EFAULT;
1801         }
1802
1803         REG_WR(bp, hw_lock_control_reg, resource_bit);
1804         return 0;
1805 }
1806
1807 /* HW Lock for shared dual port PHYs */
1808 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1809 {
1810         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1811
1812         mutex_lock(&bp->port.phy_mutex);
1813
1814         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1815             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1816                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1817 }
1818
1819 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1820 {
1821         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1822
1823         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1825                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1826
1827         mutex_unlock(&bp->port.phy_mutex);
1828 }
1829
1830 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1831 {
1832         /* The GPIO should be swapped if swap register is set and active */
1833         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1834                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1835         int gpio_shift = gpio_num +
1836                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1837         u32 gpio_mask = (1 << gpio_shift);
1838         u32 gpio_reg;
1839
1840         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1841                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1842                 return -EINVAL;
1843         }
1844
1845         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1846         /* read GPIO and mask except the float bits */
1847         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1848
1849         switch (mode) {
1850         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1851                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1852                    gpio_num, gpio_shift);
1853                 /* clear FLOAT and set CLR */
1854                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1856                 break;
1857
1858         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1859                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1860                    gpio_num, gpio_shift);
1861                 /* clear FLOAT and set SET */
1862                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1864                 break;
1865
1866         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1867                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1868                    gpio_num, gpio_shift);
1869                 /* set FLOAT */
1870                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1871                 break;
1872
1873         default:
1874                 break;
1875         }
1876
1877         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1878         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1879
1880         return 0;
1881 }
1882
1883 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1884 {
1885         u32 spio_mask = (1 << spio_num);
1886         u32 spio_reg;
1887
1888         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1889             (spio_num > MISC_REGISTERS_SPIO_7)) {
1890                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1891                 return -EINVAL;
1892         }
1893
1894         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1895         /* read SPIO and mask except the float bits */
1896         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1897
1898         switch (mode) {
1899         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1900                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1901                 /* clear FLOAT and set CLR */
1902                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1903                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1904                 break;
1905
1906         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1907                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1908                 /* clear FLOAT and set SET */
1909                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1910                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1911                 break;
1912
1913         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1914                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1915                 /* set FLOAT */
1916                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1917                 break;
1918
1919         default:
1920                 break;
1921         }
1922
1923         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1924         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1925
1926         return 0;
1927 }
1928
1929 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1930 {
1931         switch (bp->link_vars.ieee_fc &
1932                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1933         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1934                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1935                                           ADVERTISED_Pause);
1936                 break;
1937         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1938                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1939                                          ADVERTISED_Pause);
1940                 break;
1941         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1942                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1943                 break;
1944         default:
1945                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1946                                           ADVERTISED_Pause);
1947                 break;
1948         }
1949 }
1950
1951 static void bnx2x_link_report(struct bnx2x *bp)
1952 {
1953         if (bp->link_vars.link_up) {
1954                 if (bp->state == BNX2X_STATE_OPEN)
1955                         netif_carrier_on(bp->dev);
1956                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1957
1958                 printk("%d Mbps ", bp->link_vars.line_speed);
1959
1960                 if (bp->link_vars.duplex == DUPLEX_FULL)
1961                         printk("full duplex");
1962                 else
1963                         printk("half duplex");
1964
1965                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1966                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1967                                 printk(", receive ");
1968                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1969                                         printk("& transmit ");
1970                         } else {
1971                                 printk(", transmit ");
1972                         }
1973                         printk("flow control ON");
1974                 }
1975                 printk("\n");
1976
1977         } else { /* link_down */
1978                 netif_carrier_off(bp->dev);
1979                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1980         }
1981 }
1982
1983 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1984 {
1985         if (!BP_NOMCP(bp)) {
1986                 u8 rc;
1987
1988                 /* Initialize link parameters structure variables */
1989                 /* It is recommended to turn off RX FC for jumbo frames
1990                    for better performance */
1991                 if (IS_E1HMF(bp))
1992                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1993                 else if (bp->dev->mtu > 5000)
1994                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1995                 else
1996                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1997
1998                 bnx2x_acquire_phy_lock(bp);
1999                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001
2002                 bnx2x_calc_fc_adv(bp);
2003
2004                 if (bp->link_vars.link_up)
2005                         bnx2x_link_report(bp);
2006
2007
2008                 return rc;
2009         }
2010         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2011         return -EINVAL;
2012 }
2013
2014 static void bnx2x_link_set(struct bnx2x *bp)
2015 {
2016         if (!BP_NOMCP(bp)) {
2017                 bnx2x_acquire_phy_lock(bp);
2018                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2019                 bnx2x_release_phy_lock(bp);
2020
2021                 bnx2x_calc_fc_adv(bp);
2022         } else
2023                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2024 }
2025
2026 static void bnx2x__link_reset(struct bnx2x *bp)
2027 {
2028         if (!BP_NOMCP(bp)) {
2029                 bnx2x_acquire_phy_lock(bp);
2030                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2031                 bnx2x_release_phy_lock(bp);
2032         } else
2033                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2034 }
2035
2036 static u8 bnx2x_link_test(struct bnx2x *bp)
2037 {
2038         u8 rc;
2039
2040         bnx2x_acquire_phy_lock(bp);
2041         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2042         bnx2x_release_phy_lock(bp);
2043
2044         return rc;
2045 }
2046
2047 /* Calculates the sum of vn_min_rates.
2048    It's needed for further normalizing of the min_rates.
2049
2050    Returns:
2051      sum of vn_min_rates
2052        or
2053      0 - if all the min_rates are 0.
2054      In the later case fairness algorithm should be deactivated.
2055      If not all min_rates are zero then those that are zeroes will
2056      be set to 1.
2057  */
2058 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2059 {
2060         int i, port = BP_PORT(bp);
2061         u32 wsum = 0;
2062         int all_zero = 1;
2063
2064         for (i = 0; i < E1HVN_MAX; i++) {
2065                 u32 vn_cfg =
2066                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2067                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2068                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2069                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2070                         /* If min rate is zero - set it to 1 */
2071                         if (!vn_min_rate)
2072                                 vn_min_rate = DEF_MIN_RATE;
2073                         else
2074                                 all_zero = 0;
2075
2076                         wsum += vn_min_rate;
2077                 }
2078         }
2079
2080         /* ... only if all min rates are zeros - disable FAIRNESS */
2081         if (all_zero)
2082                 return 0;
2083
2084         return wsum;
2085 }
2086
2087 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2088                                    int en_fness,
2089                                    u16 port_rate,
2090                                    struct cmng_struct_per_port *m_cmng_port)
2091 {
2092         u32 r_param = port_rate / 8;
2093         int port = BP_PORT(bp);
2094         int i;
2095
2096         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2097
2098         /* Enable minmax only if we are in e1hmf mode */
2099         if (IS_E1HMF(bp)) {
2100                 u32 fair_periodic_timeout_usec;
2101                 u32 t_fair;
2102
2103                 /* Enable rate shaping and fairness */
2104                 m_cmng_port->flags.cmng_vn_enable = 1;
2105                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2106                 m_cmng_port->flags.rate_shaping_enable = 1;
2107
2108                 if (!en_fness)
2109                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2110                            "  fairness will be disabled\n");
2111
2112                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2113                 m_cmng_port->rs_vars.rs_periodic_timeout =
2114                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2115
2116                 /* this is the threshold below which no timer arming will occur
2117                    1.25 coefficient is for the threshold to be a little bigger
2118                    than the real time, to compensate for timer in-accuracy */
2119                 m_cmng_port->rs_vars.rs_threshold =
2120                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2121
2122                 /* resolution of fairness timer */
2123                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2124                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2125                 t_fair = T_FAIR_COEF / port_rate;
2126
2127                 /* this is the threshold below which we won't arm
2128                    the timer anymore */
2129                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2130
2131                 /* we multiply by 1e3/8 to get bytes/msec.
2132                    We don't want the credits to pass a credit
2133                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2134                 m_cmng_port->fair_vars.upper_bound =
2135                                                 r_param * t_fair * FAIR_MEM;
2136                 /* since each tick is 4 usec */
2137                 m_cmng_port->fair_vars.fairness_timeout =
2138                                                 fair_periodic_timeout_usec / 4;
2139
2140         } else {
2141                 /* Disable rate shaping and fairness */
2142                 m_cmng_port->flags.cmng_vn_enable = 0;
2143                 m_cmng_port->flags.fairness_enable = 0;
2144                 m_cmng_port->flags.rate_shaping_enable = 0;
2145
2146                 DP(NETIF_MSG_IFUP,
2147                    "Single function mode  minmax will be disabled\n");
2148         }
2149
2150         /* Store it to internal memory */
2151         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2152                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2154                        ((u32 *)(m_cmng_port))[i]);
2155 }
2156
2157 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2158                                    u32 wsum, u16 port_rate,
2159                                  struct cmng_struct_per_port *m_cmng_port)
2160 {
2161         struct rate_shaping_vars_per_vn m_rs_vn;
2162         struct fairness_vars_per_vn m_fair_vn;
2163         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2164         u16 vn_min_rate, vn_max_rate;
2165         int i;
2166
2167         /* If function is hidden - set min and max to zeroes */
2168         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2169                 vn_min_rate = 0;
2170                 vn_max_rate = 0;
2171
2172         } else {
2173                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2174                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2175                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2176                    if current min rate is zero - set it to 1.
2177                    This is a requirement of the algorithm. */
2178                 if ((vn_min_rate == 0) && wsum)
2179                         vn_min_rate = DEF_MIN_RATE;
2180                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2181                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2182         }
2183
2184         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2185            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2186
2187         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2188         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2189
2190         /* global vn counter - maximal Mbps for this vn */
2191         m_rs_vn.vn_counter.rate = vn_max_rate;
2192
2193         /* quota - number of bytes transmitted in this period */
2194         m_rs_vn.vn_counter.quota =
2195                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2196
2197 #ifdef BNX2X_PER_PROT_QOS
2198         /* per protocol counter */
2199         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2200                 /* maximal Mbps for this protocol */
2201                 m_rs_vn.protocol_counters[protocol].rate =
2202                                                 protocol_max_rate[protocol];
2203                 /* the quota in each timer period -
2204                    number of bytes transmitted in this period */
2205                 m_rs_vn.protocol_counters[protocol].quota =
2206                         (u32)(rs_periodic_timeout_usec *
2207                           ((double)m_rs_vn.
2208                                    protocol_counters[protocol].rate/8));
2209         }
2210 #endif
2211
2212         if (wsum) {
2213                 /* credit for each period of the fairness algorithm:
2214                    number of bytes in T_FAIR (the vn share the port rate).
2215                    wsum should not be larger than 10000, thus
2216                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2217                 m_fair_vn.vn_credit_delta =
2218                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2219                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2220                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2221                    m_fair_vn.vn_credit_delta);
2222         }
2223
2224 #ifdef BNX2X_PER_PROT_QOS
2225         do {
2226                 u32 protocolWeightSum = 0;
2227
2228                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2229                         protocolWeightSum +=
2230                                         drvInit.protocol_min_rate[protocol];
2231                 /* per protocol counter -
2232                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2233                 if (protocolWeightSum > 0) {
2234                         for (protocol = 0;
2235                              protocol < NUM_OF_PROTOCOLS; protocol++)
2236                                 /* credit for each period of the
2237                                    fairness algorithm - number of bytes in
2238                                    T_FAIR (the protocol share the vn rate) */
2239                                 m_fair_vn.protocol_credit_delta[protocol] =
2240                                         (u32)((vn_min_rate / 8) * t_fair *
2241                                         protocol_min_rate / protocolWeightSum);
2242                 }
2243         } while (0);
2244 #endif
2245
2246         /* Store it to internal memory */
2247         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250                        ((u32 *)(&m_rs_vn))[i]);
2251
2252         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255                        ((u32 *)(&m_fair_vn))[i]);
2256 }
2257
2258 /* This function is called upon link interrupt */
2259 static void bnx2x_link_attn(struct bnx2x *bp)
2260 {
2261         int vn;
2262
2263         /* Make sure that we are synced with the current statistics */
2264         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
2266         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267
2268         if (bp->link_vars.link_up) {
2269
2270                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2271                         struct host_port_stats *pstats;
2272
2273                         pstats = bnx2x_sp(bp, port_stats);
2274                         /* reset old bmac stats */
2275                         memset(&(pstats->mac_stx[0]), 0,
2276                                sizeof(struct mac_stx));
2277                 }
2278                 if ((bp->state == BNX2X_STATE_OPEN) ||
2279                     (bp->state == BNX2X_STATE_DISABLED))
2280                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2281         }
2282
2283         /* indicate link status */
2284         bnx2x_link_report(bp);
2285
2286         if (IS_E1HMF(bp)) {
2287                 int func;
2288
2289                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2290                         if (vn == BP_E1HVN(bp))
2291                                 continue;
2292
2293                         func = ((vn << 1) | BP_PORT(bp));
2294
2295                         /* Set the attention towards other drivers
2296                            on the same port */
2297                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2298                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2299                 }
2300         }
2301
2302         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2303                 struct cmng_struct_per_port m_cmng_port;
2304                 u32 wsum;
2305                 int port = BP_PORT(bp);
2306
2307                 /* Init RATE SHAPING and FAIRNESS contexts */
2308                 wsum = bnx2x_calc_vn_wsum(bp);
2309                 bnx2x_init_port_minmax(bp, (int)wsum,
2310                                         bp->link_vars.line_speed,
2311                                         &m_cmng_port);
2312                 if (IS_E1HMF(bp))
2313                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2314                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2315                                         wsum, bp->link_vars.line_speed,
2316                                                      &m_cmng_port);
2317         }
2318 }
2319
2320 static void bnx2x__link_status_update(struct bnx2x *bp)
2321 {
2322         if (bp->state != BNX2X_STATE_OPEN)
2323                 return;
2324
2325         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2326
2327         if (bp->link_vars.link_up)
2328                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2329         else
2330                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2331
2332         /* indicate link status */
2333         bnx2x_link_report(bp);
2334 }
2335
2336 static void bnx2x_pmf_update(struct bnx2x *bp)
2337 {
2338         int port = BP_PORT(bp);
2339         u32 val;
2340
2341         bp->port.pmf = 1;
2342         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2343
2344         /* enable nig attention */
2345         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2346         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2347         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2348
2349         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2350 }
2351
2352 /* end of Link */
2353
2354 /* slow path */
2355
2356 /*
2357  * General service functions
2358  */
2359
2360 /* the slow path queue is odd since completions arrive on the fastpath ring */
2361 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2362                          u32 data_hi, u32 data_lo, int common)
2363 {
2364         int func = BP_FUNC(bp);
2365
2366         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2367            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2368            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2369            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2370            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2371
2372 #ifdef BNX2X_STOP_ON_ERROR
2373         if (unlikely(bp->panic))
2374                 return -EIO;
2375 #endif
2376
2377         spin_lock_bh(&bp->spq_lock);
2378
2379         if (!bp->spq_left) {
2380                 BNX2X_ERR("BUG! SPQ ring full!\n");
2381                 spin_unlock_bh(&bp->spq_lock);
2382                 bnx2x_panic();
2383                 return -EBUSY;
2384         }
2385
2386         /* CID needs port number to be encoded int it */
2387         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2388                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2389                                      HW_CID(bp, cid)));
2390         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2391         if (common)
2392                 bp->spq_prod_bd->hdr.type |=
2393                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2394
2395         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2396         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2397
2398         bp->spq_left--;
2399
2400         if (bp->spq_prod_bd == bp->spq_last_bd) {
2401                 bp->spq_prod_bd = bp->spq;
2402                 bp->spq_prod_idx = 0;
2403                 DP(NETIF_MSG_TIMER, "end of spq\n");
2404
2405         } else {
2406                 bp->spq_prod_bd++;
2407                 bp->spq_prod_idx++;
2408         }
2409
2410         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2411                bp->spq_prod_idx);
2412
2413         spin_unlock_bh(&bp->spq_lock);
2414         return 0;
2415 }
2416
2417 /* acquire split MCP access lock register */
2418 static int bnx2x_acquire_alr(struct bnx2x *bp)
2419 {
2420         u32 i, j, val;
2421         int rc = 0;
2422
2423         might_sleep();
2424         i = 100;
2425         for (j = 0; j < i*10; j++) {
2426                 val = (1UL << 31);
2427                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2428                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2429                 if (val & (1L << 31))
2430                         break;
2431
2432                 msleep(5);
2433         }
2434         if (!(val & (1L << 31))) {
2435                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2436                 rc = -EBUSY;
2437         }
2438
2439         return rc;
2440 }
2441
2442 /* release split MCP access lock register */
2443 static void bnx2x_release_alr(struct bnx2x *bp)
2444 {
2445         u32 val = 0;
2446
2447         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2448 }
2449
2450 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2451 {
2452         struct host_def_status_block *def_sb = bp->def_status_blk;
2453         u16 rc = 0;
2454
2455         barrier(); /* status block is written to by the chip */
2456         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2457                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2458                 rc |= 1;
2459         }
2460         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2461                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2462                 rc |= 2;
2463         }
2464         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2465                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2466                 rc |= 4;
2467         }
2468         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2469                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2470                 rc |= 8;
2471         }
2472         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2473                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2474                 rc |= 16;
2475         }
2476         return rc;
2477 }
2478
2479 /*
2480  * slow path service functions
2481  */
2482
2483 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2484 {
2485         int port = BP_PORT(bp);
2486         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2487                        COMMAND_REG_ATTN_BITS_SET);
2488         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2489                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2490         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2491                                        NIG_REG_MASK_INTERRUPT_PORT0;
2492         u32 aeu_mask;
2493
2494         if (bp->attn_state & asserted)
2495                 BNX2X_ERR("IGU ERROR\n");
2496
2497         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2498         aeu_mask = REG_RD(bp, aeu_addr);
2499
2500         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2501            aeu_mask, asserted);
2502         aeu_mask &= ~(asserted & 0xff);
2503         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2504
2505         REG_WR(bp, aeu_addr, aeu_mask);
2506         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2507
2508         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2509         bp->attn_state |= asserted;
2510         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2511
2512         if (asserted & ATTN_HARD_WIRED_MASK) {
2513                 if (asserted & ATTN_NIG_FOR_FUNC) {
2514
2515                         bnx2x_acquire_phy_lock(bp);
2516
2517                         /* save nig interrupt mask */
2518                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2519                         REG_WR(bp, nig_int_mask_addr, 0);
2520
2521                         bnx2x_link_attn(bp);
2522
2523                         /* handle unicore attn? */
2524                 }
2525                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2526                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2527
2528                 if (asserted & GPIO_2_FUNC)
2529                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2530
2531                 if (asserted & GPIO_3_FUNC)
2532                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2533
2534                 if (asserted & GPIO_4_FUNC)
2535                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2536
2537                 if (port == 0) {
2538                         if (asserted & ATTN_GENERAL_ATTN_1) {
2539                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2540                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2541                         }
2542                         if (asserted & ATTN_GENERAL_ATTN_2) {
2543                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2544                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2545                         }
2546                         if (asserted & ATTN_GENERAL_ATTN_3) {
2547                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2548                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2549                         }
2550                 } else {
2551                         if (asserted & ATTN_GENERAL_ATTN_4) {
2552                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2553                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2554                         }
2555                         if (asserted & ATTN_GENERAL_ATTN_5) {
2556                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2557                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2558                         }
2559                         if (asserted & ATTN_GENERAL_ATTN_6) {
2560                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2561                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2562                         }
2563                 }
2564
2565         } /* if hardwired */
2566
2567         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2568            asserted, hc_addr);
2569         REG_WR(bp, hc_addr, asserted);
2570
2571         /* now set back the mask */
2572         if (asserted & ATTN_NIG_FOR_FUNC) {
2573                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2574                 bnx2x_release_phy_lock(bp);
2575         }
2576 }
2577
2578 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2579 {
2580         int port = BP_PORT(bp);
2581         int reg_offset;
2582         u32 val;
2583
2584         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2585                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2586
2587         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2588
2589                 val = REG_RD(bp, reg_offset);
2590                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2591                 REG_WR(bp, reg_offset, val);
2592
2593                 BNX2X_ERR("SPIO5 hw attention\n");
2594
2595                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2596                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2597                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2598                         /* Fan failure attention */
2599
2600                         /* The PHY reset is controlled by GPIO 1 */
2601                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2602                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2603                         /* Low power mode is controlled by GPIO 2 */
2604                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2605                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2606                         /* mark the failure */
2607                         bp->link_params.ext_phy_config &=
2608                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2609                         bp->link_params.ext_phy_config |=
2610                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2611                         SHMEM_WR(bp,
2612                                  dev_info.port_hw_config[port].
2613                                                         external_phy_config,
2614                                  bp->link_params.ext_phy_config);
2615                         /* log the failure */
2616                         printk(KERN_ERR PFX "Fan Failure on Network"
2617                                " Controller %s has caused the driver to"
2618                                " shutdown the card to prevent permanent"
2619                                " damage.  Please contact Dell Support for"
2620                                " assistance\n", bp->dev->name);
2621                         break;
2622
2623                 default:
2624                         break;
2625                 }
2626         }
2627
2628         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629
2630                 val = REG_RD(bp, reg_offset);
2631                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632                 REG_WR(bp, reg_offset, val);
2633
2634                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635                           (attn & HW_INTERRUT_ASSERT_SET_0));
2636                 bnx2x_panic();
2637         }
2638 }
2639
2640 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2641 {
2642         u32 val;
2643
2644         if (attn & BNX2X_DOORQ_ASSERT) {
2645
2646                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648                 /* DORQ discard attention */
2649                 if (val & 0x2)
2650                         BNX2X_ERR("FATAL error from DORQ\n");
2651         }
2652
2653         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654
2655                 int port = BP_PORT(bp);
2656                 int reg_offset;
2657
2658                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660
2661                 val = REG_RD(bp, reg_offset);
2662                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663                 REG_WR(bp, reg_offset, val);
2664
2665                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666                           (attn & HW_INTERRUT_ASSERT_SET_1));
2667                 bnx2x_panic();
2668         }
2669 }
2670
2671 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2672 {
2673         u32 val;
2674
2675         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676
2677                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679                 /* CFC error attention */
2680                 if (val & 0x2)
2681                         BNX2X_ERR("FATAL error from CFC\n");
2682         }
2683
2684         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685
2686                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688                 /* RQ_USDMDP_FIFO_OVERFLOW */
2689                 if (val & 0x18000)
2690                         BNX2X_ERR("FATAL error from PXP\n");
2691         }
2692
2693         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694
2695                 int port = BP_PORT(bp);
2696                 int reg_offset;
2697
2698                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700
2701                 val = REG_RD(bp, reg_offset);
2702                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703                 REG_WR(bp, reg_offset, val);
2704
2705                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706                           (attn & HW_INTERRUT_ASSERT_SET_2));
2707                 bnx2x_panic();
2708         }
2709 }
2710
2711 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2712 {
2713         u32 val;
2714
2715         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716
2717                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718                         int func = BP_FUNC(bp);
2719
2720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721                         bnx2x__link_status_update(bp);
2722                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723                                                         DRV_STATUS_PMF)
2724                                 bnx2x_pmf_update(bp);
2725
2726                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2727
2728                         BNX2X_ERR("MC assert!\n");
2729                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2733                         bnx2x_panic();
2734
2735                 } else if (attn & BNX2X_MCP_ASSERT) {
2736
2737                         BNX2X_ERR("MCP assert!\n");
2738                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2739                         bnx2x_fw_dump(bp);
2740
2741                 } else
2742                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2743         }
2744
2745         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2746                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747                 if (attn & BNX2X_GRC_TIMEOUT) {
2748                         val = CHIP_IS_E1H(bp) ?
2749                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751                 }
2752                 if (attn & BNX2X_GRC_RSV) {
2753                         val = CHIP_IS_E1H(bp) ?
2754                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756                 }
2757                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2758         }
2759 }
2760
2761 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762 {
2763         struct attn_route attn;
2764         struct attn_route group_mask;
2765         int port = BP_PORT(bp);
2766         int index;
2767         u32 reg_addr;
2768         u32 val;
2769         u32 aeu_mask;
2770
2771         /* need to take HW lock because MCP or other port might also
2772            try to handle this event */
2773         bnx2x_acquire_alr(bp);
2774
2775         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2779         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2781
2782         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783                 if (deasserted & (1 << index)) {
2784                         group_mask = bp->attn_group[index];
2785
2786                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787                            index, group_mask.sig[0], group_mask.sig[1],
2788                            group_mask.sig[2], group_mask.sig[3]);
2789
2790                         bnx2x_attn_int_deasserted3(bp,
2791                                         attn.sig[3] & group_mask.sig[3]);
2792                         bnx2x_attn_int_deasserted1(bp,
2793                                         attn.sig[1] & group_mask.sig[1]);
2794                         bnx2x_attn_int_deasserted2(bp,
2795                                         attn.sig[2] & group_mask.sig[2]);
2796                         bnx2x_attn_int_deasserted0(bp,
2797                                         attn.sig[0] & group_mask.sig[0]);
2798
2799                         if ((attn.sig[0] & group_mask.sig[0] &
2800                                                 HW_PRTY_ASSERT_SET_0) ||
2801                             (attn.sig[1] & group_mask.sig[1] &
2802                                                 HW_PRTY_ASSERT_SET_1) ||
2803                             (attn.sig[2] & group_mask.sig[2] &
2804                                                 HW_PRTY_ASSERT_SET_2))
2805                                 BNX2X_ERR("FATAL HW block parity attention\n");
2806                 }
2807         }
2808
2809         bnx2x_release_alr(bp);
2810
2811         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2812
2813         val = ~deasserted;
2814         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815            val, reg_addr);
2816         REG_WR(bp, reg_addr, val);
2817
2818         if (~bp->attn_state & deasserted)
2819                 BNX2X_ERR("IGU ERROR\n");
2820
2821         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823
2824         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825         aeu_mask = REG_RD(bp, reg_addr);
2826
2827         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2828            aeu_mask, deasserted);
2829         aeu_mask |= (deasserted & 0xff);
2830         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2831
2832         REG_WR(bp, reg_addr, aeu_mask);
2833         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2834
2835         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836         bp->attn_state &= ~deasserted;
2837         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2838 }
2839
2840 static void bnx2x_attn_int(struct bnx2x *bp)
2841 {
2842         /* read local copy of bits */
2843         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844                                                                 attn_bits);
2845         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846                                                                 attn_bits_ack);
2847         u32 attn_state = bp->attn_state;
2848
2849         /* look for changed bits */
2850         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2851         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2852
2853         DP(NETIF_MSG_HW,
2854            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2855            attn_bits, attn_ack, asserted, deasserted);
2856
2857         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2858                 BNX2X_ERR("BAD attention state\n");
2859
2860         /* handle bits that were raised */
2861         if (asserted)
2862                 bnx2x_attn_int_asserted(bp, asserted);
2863
2864         if (deasserted)
2865                 bnx2x_attn_int_deasserted(bp, deasserted);
2866 }
2867
2868 static void bnx2x_sp_task(struct work_struct *work)
2869 {
2870         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2871         u16 status;
2872
2873
2874         /* Return here if interrupt is disabled */
2875         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2876                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2877                 return;
2878         }
2879
2880         status = bnx2x_update_dsb_idx(bp);
2881 /*      if (status == 0)                                     */
2882 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2883
2884         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2885
2886         /* HW attentions */
2887         if (status & 0x1)
2888                 bnx2x_attn_int(bp);
2889
2890         /* CStorm events: query_stats, port delete ramrod */
2891         if (status & 0x2)
2892                 bp->stats_pending = 0;
2893
2894         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2895                      IGU_INT_NOP, 1);
2896         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2897                      IGU_INT_NOP, 1);
2898         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2899                      IGU_INT_NOP, 1);
2900         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2901                      IGU_INT_NOP, 1);
2902         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2903                      IGU_INT_ENABLE, 1);
2904
2905 }
2906
2907 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2908 {
2909         struct net_device *dev = dev_instance;
2910         struct bnx2x *bp = netdev_priv(dev);
2911
2912         /* Return here if interrupt is disabled */
2913         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2914                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2915                 return IRQ_HANDLED;
2916         }
2917
2918         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2919
2920 #ifdef BNX2X_STOP_ON_ERROR
2921         if (unlikely(bp->panic))
2922                 return IRQ_HANDLED;
2923 #endif
2924
2925         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2926
2927         return IRQ_HANDLED;
2928 }
2929
2930 /* end of slow path */
2931
2932 /* Statistics */
2933
2934 /****************************************************************************
2935 * Macros
2936 ****************************************************************************/
2937
2938 /* sum[hi:lo] += add[hi:lo] */
2939 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2940         do { \
2941                 s_lo += a_lo; \
2942                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2943         } while (0)
2944
2945 /* difference = minuend - subtrahend */
2946 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2947         do { \
2948                 if (m_lo < s_lo) { \
2949                         /* underflow */ \
2950                         d_hi = m_hi - s_hi; \
2951                         if (d_hi > 0) { \
2952                                 /* we can 'loan' 1 */ \
2953                                 d_hi--; \
2954                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2955                         } else { \
2956                                 /* m_hi <= s_hi */ \
2957                                 d_hi = 0; \
2958                                 d_lo = 0; \
2959                         } \
2960                 } else { \
2961                         /* m_lo >= s_lo */ \
2962                         if (m_hi < s_hi) { \
2963                                 d_hi = 0; \
2964                                 d_lo = 0; \
2965                         } else { \
2966                                 /* m_hi >= s_hi */ \
2967                                 d_hi = m_hi - s_hi; \
2968                                 d_lo = m_lo - s_lo; \
2969                         } \
2970                 } \
2971         } while (0)
2972
2973 #define UPDATE_STAT64(s, t) \
2974         do { \
2975                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2976                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2977                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2978                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2979                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2980                        pstats->mac_stx[1].t##_lo, diff.lo); \
2981         } while (0)
2982
2983 #define UPDATE_STAT64_NIG(s, t) \
2984         do { \
2985                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2986                         diff.lo, new->s##_lo, old->s##_lo); \
2987                 ADD_64(estats->t##_hi, diff.hi, \
2988                        estats->t##_lo, diff.lo); \
2989         } while (0)
2990
2991 /* sum[hi:lo] += add */
2992 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2993         do { \
2994                 s_lo += a; \
2995                 s_hi += (s_lo < a) ? 1 : 0; \
2996         } while (0)
2997
2998 #define UPDATE_EXTEND_STAT(s) \
2999         do { \
3000                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3001                               pstats->mac_stx[1].s##_lo, \
3002                               new->s); \
3003         } while (0)
3004
3005 #define UPDATE_EXTEND_TSTAT(s, t) \
3006         do { \
3007                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3008                 old_tclient->s = le32_to_cpu(tclient->s); \
3009                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3010         } while (0)
3011
3012 #define UPDATE_EXTEND_XSTAT(s, t) \
3013         do { \
3014                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3015                 old_xclient->s = le32_to_cpu(xclient->s); \
3016                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3017         } while (0)
3018
3019 /*
3020  * General service functions
3021  */
3022
3023 static inline long bnx2x_hilo(u32 *hiref)
3024 {
3025         u32 lo = *(hiref + 1);
3026 #if (BITS_PER_LONG == 64)
3027         u32 hi = *hiref;
3028
3029         return HILO_U64(hi, lo);
3030 #else
3031         return lo;
3032 #endif
3033 }
3034
3035 /*
3036  * Init service functions
3037  */
3038
3039 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3040 {
3041         if (!bp->stats_pending) {
3042                 struct eth_query_ramrod_data ramrod_data = {0};
3043                 int rc;
3044
3045                 ramrod_data.drv_counter = bp->stats_counter++;
3046                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3047                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3048
3049                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3050                                    ((u32 *)&ramrod_data)[1],
3051                                    ((u32 *)&ramrod_data)[0], 0);
3052                 if (rc == 0) {
3053                         /* stats ramrod has it's own slot on the spq */
3054                         bp->spq_left++;
3055                         bp->stats_pending = 1;
3056                 }
3057         }
3058 }
3059
3060 static void bnx2x_stats_init(struct bnx2x *bp)
3061 {
3062         int port = BP_PORT(bp);
3063
3064         bp->executer_idx = 0;
3065         bp->stats_counter = 0;
3066
3067         /* port stats */
3068         if (!BP_NOMCP(bp))
3069                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3070         else
3071                 bp->port.port_stx = 0;
3072         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3073
3074         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3075         bp->port.old_nig_stats.brb_discard =
3076                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3077         bp->port.old_nig_stats.brb_truncate =
3078                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3079         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3080                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3081         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3082                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3083
3084         /* function stats */
3085         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3086         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3087         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3088         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3089
3090         bp->stats_state = STATS_STATE_DISABLED;
3091         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3092                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3093 }
3094
3095 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3096 {
3097         struct dmae_command *dmae = &bp->stats_dmae;
3098         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099
3100         *stats_comp = DMAE_COMP_VAL;
3101
3102         /* loader */
3103         if (bp->executer_idx) {
3104                 int loader_idx = PMF_DMAE_C(bp);
3105
3106                 memset(dmae, 0, sizeof(struct dmae_command));
3107
3108                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3109                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3110                                 DMAE_CMD_DST_RESET |
3111 #ifdef __BIG_ENDIAN
3112                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3113 #else
3114                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3115 #endif
3116                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3117                                                DMAE_CMD_PORT_0) |
3118                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3119                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3120                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3121                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3122                                      sizeof(struct dmae_command) *
3123                                      (loader_idx + 1)) >> 2;
3124                 dmae->dst_addr_hi = 0;
3125                 dmae->len = sizeof(struct dmae_command) >> 2;
3126                 if (CHIP_IS_E1(bp))
3127                         dmae->len--;
3128                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3129                 dmae->comp_addr_hi = 0;
3130                 dmae->comp_val = 1;
3131
3132                 *stats_comp = 0;
3133                 bnx2x_post_dmae(bp, dmae, loader_idx);
3134
3135         } else if (bp->func_stx) {
3136                 *stats_comp = 0;
3137                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3138         }
3139 }
3140
3141 static int bnx2x_stats_comp(struct bnx2x *bp)
3142 {
3143         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3144         int cnt = 10;
3145
3146         might_sleep();
3147         while (*stats_comp != DMAE_COMP_VAL) {
3148                 if (!cnt) {
3149                         BNX2X_ERR("timeout waiting for stats finished\n");
3150                         break;
3151                 }
3152                 cnt--;
3153                 msleep(1);
3154         }
3155         return 1;
3156 }
3157
3158 /*
3159  * Statistics service functions
3160  */
3161
3162 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3163 {
3164         struct dmae_command *dmae;
3165         u32 opcode;
3166         int loader_idx = PMF_DMAE_C(bp);
3167         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3168
3169         /* sanity */
3170         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3171                 BNX2X_ERR("BUG!\n");
3172                 return;
3173         }
3174
3175         bp->executer_idx = 0;
3176
3177         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3178                   DMAE_CMD_C_ENABLE |
3179                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3180 #ifdef __BIG_ENDIAN
3181                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3182 #else
3183                   DMAE_CMD_ENDIANITY_DW_SWAP |
3184 #endif
3185                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3186                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3187
3188         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3189         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3190         dmae->src_addr_lo = bp->port.port_stx >> 2;
3191         dmae->src_addr_hi = 0;
3192         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3193         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3194         dmae->len = DMAE_LEN32_RD_MAX;
3195         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3196         dmae->comp_addr_hi = 0;
3197         dmae->comp_val = 1;
3198
3199         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3200         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3201         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3202         dmae->src_addr_hi = 0;
3203         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3204                                    DMAE_LEN32_RD_MAX * 4);
3205         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3206                                    DMAE_LEN32_RD_MAX * 4);
3207         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3208         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3209         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3210         dmae->comp_val = DMAE_COMP_VAL;
3211
3212         *stats_comp = 0;
3213         bnx2x_hw_stats_post(bp);
3214         bnx2x_stats_comp(bp);
3215 }
3216
3217 static void bnx2x_port_stats_init(struct bnx2x *bp)
3218 {
3219         struct dmae_command *dmae;
3220         int port = BP_PORT(bp);
3221         int vn = BP_E1HVN(bp);
3222         u32 opcode;
3223         int loader_idx = PMF_DMAE_C(bp);
3224         u32 mac_addr;
3225         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227         /* sanity */
3228         if (!bp->link_vars.link_up || !bp->port.pmf) {
3229                 BNX2X_ERR("BUG!\n");
3230                 return;
3231         }
3232
3233         bp->executer_idx = 0;
3234
3235         /* MCP */
3236         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3237                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3238                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239 #ifdef __BIG_ENDIAN
3240                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241 #else
3242                   DMAE_CMD_ENDIANITY_DW_SWAP |
3243 #endif
3244                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3245                   (vn << DMAE_CMD_E1HVN_SHIFT));
3246
3247         if (bp->port.port_stx) {
3248
3249                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250                 dmae->opcode = opcode;
3251                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3252                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3253                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3254                 dmae->dst_addr_hi = 0;
3255                 dmae->len = sizeof(struct host_port_stats) >> 2;
3256                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257                 dmae->comp_addr_hi = 0;
3258                 dmae->comp_val = 1;
3259         }
3260
3261         if (bp->func_stx) {
3262
3263                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264                 dmae->opcode = opcode;
3265                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3266                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3267                 dmae->dst_addr_lo = bp->func_stx >> 2;
3268                 dmae->dst_addr_hi = 0;
3269                 dmae->len = sizeof(struct host_func_stats) >> 2;
3270                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3271                 dmae->comp_addr_hi = 0;
3272                 dmae->comp_val = 1;
3273         }
3274
3275         /* MAC */
3276         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3277                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3278                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3279 #ifdef __BIG_ENDIAN
3280                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3281 #else
3282                   DMAE_CMD_ENDIANITY_DW_SWAP |
3283 #endif
3284                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3285                   (vn << DMAE_CMD_E1HVN_SHIFT));
3286
3287         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3288
3289                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3290                                    NIG_REG_INGRESS_BMAC0_MEM);
3291
3292                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3293                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3294                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295                 dmae->opcode = opcode;
3296                 dmae->src_addr_lo = (mac_addr +
3297                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3298                 dmae->src_addr_hi = 0;
3299                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3300                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3301                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3302                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3303                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304                 dmae->comp_addr_hi = 0;
3305                 dmae->comp_val = 1;
3306
3307                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3308                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3309                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310                 dmae->opcode = opcode;
3311                 dmae->src_addr_lo = (mac_addr +
3312                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3313                 dmae->src_addr_hi = 0;
3314                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3315                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3316                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3317                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3318                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3319                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3320                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321                 dmae->comp_addr_hi = 0;
3322                 dmae->comp_val = 1;
3323
3324         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3325
3326                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3327
3328                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3329                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330                 dmae->opcode = opcode;
3331                 dmae->src_addr_lo = (mac_addr +
3332                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3333                 dmae->src_addr_hi = 0;
3334                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3335                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3336                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3337                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338                 dmae->comp_addr_hi = 0;
3339                 dmae->comp_val = 1;
3340
3341                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3342                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3343                 dmae->opcode = opcode;
3344                 dmae->src_addr_lo = (mac_addr +
3345                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3346                 dmae->src_addr_hi = 0;
3347                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3348                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3349                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3350                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3351                 dmae->len = 1;
3352                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353                 dmae->comp_addr_hi = 0;
3354                 dmae->comp_val = 1;
3355
3356                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3357                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3358                 dmae->opcode = opcode;
3359                 dmae->src_addr_lo = (mac_addr +
3360                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3361                 dmae->src_addr_hi = 0;
3362                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3363                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3364                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3365                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3366                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3367                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368                 dmae->comp_addr_hi = 0;
3369                 dmae->comp_val = 1;
3370         }
3371
3372         /* NIG */
3373         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374         dmae->opcode = opcode;
3375         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3376                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3377         dmae->src_addr_hi = 0;
3378         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3379         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3380         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3381         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3382         dmae->comp_addr_hi = 0;
3383         dmae->comp_val = 1;
3384
3385         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3386         dmae->opcode = opcode;
3387         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3388                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3389         dmae->src_addr_hi = 0;
3390         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3391                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3392         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3393                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3394         dmae->len = (2*sizeof(u32)) >> 2;
3395         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396         dmae->comp_addr_hi = 0;
3397         dmae->comp_val = 1;
3398
3399         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3402                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3403 #ifdef __BIG_ENDIAN
3404                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3405 #else
3406                         DMAE_CMD_ENDIANITY_DW_SWAP |
3407 #endif
3408                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409                         (vn << DMAE_CMD_E1HVN_SHIFT));
3410         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3411                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3412         dmae->src_addr_hi = 0;
3413         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3414                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3415         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3416                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3417         dmae->len = (2*sizeof(u32)) >> 2;
3418         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3419         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3420         dmae->comp_val = DMAE_COMP_VAL;
3421
3422         *stats_comp = 0;
3423 }
3424
3425 static void bnx2x_func_stats_init(struct bnx2x *bp)
3426 {
3427         struct dmae_command *dmae = &bp->stats_dmae;
3428         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3429
3430         /* sanity */
3431         if (!bp->func_stx) {
3432                 BNX2X_ERR("BUG!\n");
3433                 return;
3434         }
3435
3436         bp->executer_idx = 0;
3437         memset(dmae, 0, sizeof(struct dmae_command));
3438
3439         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3440                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3441                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3442 #ifdef __BIG_ENDIAN
3443                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3444 #else
3445                         DMAE_CMD_ENDIANITY_DW_SWAP |
3446 #endif
3447                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3448                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3449         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3450         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3451         dmae->dst_addr_lo = bp->func_stx >> 2;
3452         dmae->dst_addr_hi = 0;
3453         dmae->len = sizeof(struct host_func_stats) >> 2;
3454         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456         dmae->comp_val = DMAE_COMP_VAL;
3457
3458         *stats_comp = 0;
3459 }
3460
3461 static void bnx2x_stats_start(struct bnx2x *bp)
3462 {
3463         if (bp->port.pmf)
3464                 bnx2x_port_stats_init(bp);
3465
3466         else if (bp->func_stx)
3467                 bnx2x_func_stats_init(bp);
3468
3469         bnx2x_hw_stats_post(bp);
3470         bnx2x_storm_stats_post(bp);
3471 }
3472
3473 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3474 {
3475         bnx2x_stats_comp(bp);
3476         bnx2x_stats_pmf_update(bp);
3477         bnx2x_stats_start(bp);
3478 }
3479
3480 static void bnx2x_stats_restart(struct bnx2x *bp)
3481 {
3482         bnx2x_stats_comp(bp);
3483         bnx2x_stats_start(bp);
3484 }
3485
3486 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3487 {
3488         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3489         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3490         struct regpair diff;
3491
3492         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3493         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3494         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3495         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3496         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3497         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3498         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3499         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3500         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3501         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3502         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3503         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3504         UPDATE_STAT64(tx_stat_gt127,
3505                                 tx_stat_etherstatspkts65octetsto127octets);
3506         UPDATE_STAT64(tx_stat_gt255,
3507                                 tx_stat_etherstatspkts128octetsto255octets);
3508         UPDATE_STAT64(tx_stat_gt511,
3509                                 tx_stat_etherstatspkts256octetsto511octets);
3510         UPDATE_STAT64(tx_stat_gt1023,
3511                                 tx_stat_etherstatspkts512octetsto1023octets);
3512         UPDATE_STAT64(tx_stat_gt1518,
3513                                 tx_stat_etherstatspkts1024octetsto1522octets);
3514         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3515         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3516         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3517         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3518         UPDATE_STAT64(tx_stat_gterr,
3519                                 tx_stat_dot3statsinternalmactransmiterrors);
3520         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3521 }
3522
3523 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3524 {
3525         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3526         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3527
3528         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3529         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3530         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3531         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3532         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3533         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3534         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3535         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3536         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3537         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3538         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3539         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3540         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3541         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3542         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3543         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3544         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3545         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3546         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3547         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3548         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3549         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3550         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3551         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3552         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3553         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3554         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3555         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3556         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3557         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3558         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3559 }
3560
3561 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3562 {
3563         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3564         struct nig_stats *old = &(bp->port.old_nig_stats);
3565         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3566         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3567         struct regpair diff;
3568
3569         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3570                 bnx2x_bmac_stats_update(bp);
3571
3572         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3573                 bnx2x_emac_stats_update(bp);
3574
3575         else { /* unreached */
3576                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3577                 return -1;
3578         }
3579
3580         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3581                       new->brb_discard - old->brb_discard);
3582         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3583                       new->brb_truncate - old->brb_truncate);
3584
3585         UPDATE_STAT64_NIG(egress_mac_pkt0,
3586                                         etherstatspkts1024octetsto1522octets);
3587         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3588
3589         memcpy(old, new, sizeof(struct nig_stats));
3590
3591         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3592                sizeof(struct mac_stx));
3593         estats->brb_drop_hi = pstats->brb_drop_hi;
3594         estats->brb_drop_lo = pstats->brb_drop_lo;
3595
3596         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3597
3598         return 0;
3599 }
3600
3601 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3602 {
3603         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3604         int cl_id = BP_CL_ID(bp);
3605         struct tstorm_per_port_stats *tport =
3606                                 &stats->tstorm_common.port_statistics;
3607         struct tstorm_per_client_stats *tclient =
3608                         &stats->tstorm_common.client_statistics[cl_id];
3609         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3610         struct xstorm_per_client_stats *xclient =
3611                         &stats->xstorm_common.client_statistics[cl_id];
3612         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3613         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3614         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3615         u32 diff;
3616
3617         /* are storm stats valid? */
3618         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3619                                                         bp->stats_counter) {
3620                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3621                    "  tstorm counter (%d) != stats_counter (%d)\n",
3622                    tclient->stats_counter, bp->stats_counter);
3623                 return -1;
3624         }
3625         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3626                                                         bp->stats_counter) {
3627                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3628                    "  xstorm counter (%d) != stats_counter (%d)\n",
3629                    xclient->stats_counter, bp->stats_counter);
3630                 return -2;
3631         }
3632
3633         fstats->total_bytes_received_hi =
3634         fstats->valid_bytes_received_hi =
3635                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3636         fstats->total_bytes_received_lo =
3637         fstats->valid_bytes_received_lo =
3638                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3639
3640         estats->error_bytes_received_hi =
3641                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3642         estats->error_bytes_received_lo =
3643                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3644         ADD_64(estats->error_bytes_received_hi,
3645                estats->rx_stat_ifhcinbadoctets_hi,
3646                estats->error_bytes_received_lo,
3647                estats->rx_stat_ifhcinbadoctets_lo);
3648
3649         ADD_64(fstats->total_bytes_received_hi,
3650                estats->error_bytes_received_hi,
3651                fstats->total_bytes_received_lo,
3652                estats->error_bytes_received_lo);
3653
3654         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3655         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3656                                 total_multicast_packets_received);
3657         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3658                                 total_broadcast_packets_received);
3659
3660         fstats->total_bytes_transmitted_hi =
3661                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3662         fstats->total_bytes_transmitted_lo =
3663                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3664
3665         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3666                                 total_unicast_packets_transmitted);
3667         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3668                                 total_multicast_packets_transmitted);
3669         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3670                                 total_broadcast_packets_transmitted);
3671
3672         memcpy(estats, &(fstats->total_bytes_received_hi),
3673                sizeof(struct host_func_stats) - 2*sizeof(u32));
3674
3675         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3676         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3677         estats->brb_truncate_discard =
3678                                 le32_to_cpu(tport->brb_truncate_discard);
3679         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3680
3681         old_tclient->rcv_unicast_bytes.hi =
3682                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3683         old_tclient->rcv_unicast_bytes.lo =
3684                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3685         old_tclient->rcv_broadcast_bytes.hi =
3686                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3687         old_tclient->rcv_broadcast_bytes.lo =
3688                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3689         old_tclient->rcv_multicast_bytes.hi =
3690                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3691         old_tclient->rcv_multicast_bytes.lo =
3692                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3693         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3694
3695         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3696         old_tclient->packets_too_big_discard =
3697                                 le32_to_cpu(tclient->packets_too_big_discard);
3698         estats->no_buff_discard =
3699         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3700         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3701
3702         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3703         old_xclient->unicast_bytes_sent.hi =
3704                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3705         old_xclient->unicast_bytes_sent.lo =
3706                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3707         old_xclient->multicast_bytes_sent.hi =
3708                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3709         old_xclient->multicast_bytes_sent.lo =
3710                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3711         old_xclient->broadcast_bytes_sent.hi =
3712                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3713         old_xclient->broadcast_bytes_sent.lo =
3714                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3715
3716         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3717
3718         return 0;
3719 }
3720
3721 static void bnx2x_net_stats_update(struct bnx2x *bp)
3722 {
3723         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3724         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3725         struct net_device_stats *nstats = &bp->dev->stats;
3726
3727         nstats->rx_packets =
3728                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3729                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3730                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3731
3732         nstats->tx_packets =
3733                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3734                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3735                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3736
3737         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3738
3739         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3740
3741         nstats->rx_dropped = old_tclient->checksum_discard +
3742                              estats->mac_discard;
3743         nstats->tx_dropped = 0;
3744
3745         nstats->multicast =
3746                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3747
3748         nstats->collisions =
3749                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3750                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3751                         estats->tx_stat_dot3statslatecollisions_lo +
3752                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3753
3754         estats->jabber_packets_received =
3755                                 old_tclient->packets_too_big_discard +
3756                                 estats->rx_stat_dot3statsframestoolong_lo;
3757
3758         nstats->rx_length_errors =
3759                                 estats->rx_stat_etherstatsundersizepkts_lo +
3760                                 estats->jabber_packets_received;
3761         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3762         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3763         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3764         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3765         nstats->rx_missed_errors = estats->xxoverflow_discard;
3766
3767         nstats->rx_errors = nstats->rx_length_errors +
3768                             nstats->rx_over_errors +
3769                             nstats->rx_crc_errors +
3770                             nstats->rx_frame_errors +
3771                             nstats->rx_fifo_errors +
3772                             nstats->rx_missed_errors;
3773
3774         nstats->tx_aborted_errors =
3775                         estats->tx_stat_dot3statslatecollisions_lo +
3776                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3777         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3778         nstats->tx_fifo_errors = 0;
3779         nstats->tx_heartbeat_errors = 0;
3780         nstats->tx_window_errors = 0;
3781
3782         nstats->tx_errors = nstats->tx_aborted_errors +
3783                             nstats->tx_carrier_errors;
3784 }
3785
3786 static void bnx2x_stats_update(struct bnx2x *bp)
3787 {
3788         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3789         int update = 0;
3790
3791         if (*stats_comp != DMAE_COMP_VAL)
3792                 return;
3793
3794         if (bp->port.pmf)
3795                 update = (bnx2x_hw_stats_update(bp) == 0);
3796
3797         update |= (bnx2x_storm_stats_update(bp) == 0);
3798
3799         if (update)
3800                 bnx2x_net_stats_update(bp);
3801
3802         else {
3803                 if (bp->stats_pending) {
3804                         bp->stats_pending++;
3805                         if (bp->stats_pending == 3) {
3806                                 BNX2X_ERR("stats not updated for 3 times\n");
3807                                 bnx2x_panic();
3808                                 return;
3809                         }
3810                 }
3811         }
3812
3813         if (bp->msglevel & NETIF_MSG_TIMER) {
3814                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3815                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3816                 struct net_device_stats *nstats = &bp->dev->stats;
3817                 int i;
3818
3819                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3820                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3821                                   "  tx pkt (%lx)\n",
3822                        bnx2x_tx_avail(bp->fp),
3823                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3824                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3825                                   "  rx pkt (%lx)\n",
3826                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3827                              bp->fp->rx_comp_cons),
3828                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3829                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3830                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3831                        estats->driver_xoff, estats->brb_drop_lo);
3832                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3833                         "packets_too_big_discard %u  no_buff_discard %u  "
3834                         "mac_discard %u  mac_filter_discard %u  "
3835                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3836                         "ttl0_discard %u\n",
3837                        old_tclient->checksum_discard,
3838                        old_tclient->packets_too_big_discard,
3839                        old_tclient->no_buff_discard, estats->mac_discard,
3840                        estats->mac_filter_discard, estats->xxoverflow_discard,
3841                        estats->brb_truncate_discard,
3842                        old_tclient->ttl0_discard);
3843
3844                 for_each_queue(bp, i) {
3845                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3846                                bnx2x_fp(bp, i, tx_pkt),
3847                                bnx2x_fp(bp, i, rx_pkt),
3848                                bnx2x_fp(bp, i, rx_calls));
3849                 }
3850         }
3851
3852         bnx2x_hw_stats_post(bp);
3853         bnx2x_storm_stats_post(bp);
3854 }
3855
3856 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3857 {
3858         struct dmae_command *dmae;
3859         u32 opcode;
3860         int loader_idx = PMF_DMAE_C(bp);
3861         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3862
3863         bp->executer_idx = 0;
3864
3865         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3866                   DMAE_CMD_C_ENABLE |
3867                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3868 #ifdef __BIG_ENDIAN
3869                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3870 #else
3871                   DMAE_CMD_ENDIANITY_DW_SWAP |
3872 #endif
3873                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3874                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3875
3876         if (bp->port.port_stx) {
3877
3878                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3879                 if (bp->func_stx)
3880                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3881                 else
3882                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3883                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3884                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3885                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3886                 dmae->dst_addr_hi = 0;
3887                 dmae->len = sizeof(struct host_port_stats) >> 2;
3888                 if (bp->func_stx) {
3889                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3890                         dmae->comp_addr_hi = 0;
3891                         dmae->comp_val = 1;
3892                 } else {
3893                         dmae->comp_addr_lo =
3894                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895                         dmae->comp_addr_hi =
3896                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3897                         dmae->comp_val = DMAE_COMP_VAL;
3898
3899                         *stats_comp = 0;
3900                 }
3901         }
3902
3903         if (bp->func_stx) {
3904
3905                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3906                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3907                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3908                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3909                 dmae->dst_addr_lo = bp->func_stx >> 2;
3910                 dmae->dst_addr_hi = 0;
3911                 dmae->len = sizeof(struct host_func_stats) >> 2;
3912                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3913                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3914                 dmae->comp_val = DMAE_COMP_VAL;
3915
3916                 *stats_comp = 0;
3917         }
3918 }
3919
3920 static void bnx2x_stats_stop(struct bnx2x *bp)
3921 {
3922         int update = 0;
3923
3924         bnx2x_stats_comp(bp);
3925
3926         if (bp->port.pmf)
3927                 update = (bnx2x_hw_stats_update(bp) == 0);
3928
3929         update |= (bnx2x_storm_stats_update(bp) == 0);
3930
3931         if (update) {
3932                 bnx2x_net_stats_update(bp);
3933
3934                 if (bp->port.pmf)
3935                         bnx2x_port_stats_stop(bp);
3936
3937                 bnx2x_hw_stats_post(bp);
3938                 bnx2x_stats_comp(bp);
3939         }
3940 }
3941
3942 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3943 {
3944 }
3945
3946 static const struct {
3947         void (*action)(struct bnx2x *bp);
3948         enum bnx2x_stats_state next_state;
3949 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3950 /* state        event   */
3951 {
3952 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3953 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3954 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3955 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3956 },
3957 {
3958 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3959 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3960 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3961 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3962 }
3963 };
3964
3965 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3966 {
3967         enum bnx2x_stats_state state = bp->stats_state;
3968
3969         bnx2x_stats_stm[state][event].action(bp);
3970         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3971
3972         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3973                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3974                    state, event, bp->stats_state);
3975 }
3976
3977 static void bnx2x_timer(unsigned long data)
3978 {
3979         struct bnx2x *bp = (struct bnx2x *) data;
3980
3981         if (!netif_running(bp->dev))
3982                 return;
3983
3984         if (atomic_read(&bp->intr_sem) != 0)
3985                 goto timer_restart;
3986
3987         if (poll) {
3988                 struct bnx2x_fastpath *fp = &bp->fp[0];
3989                 int rc;
3990
3991                 bnx2x_tx_int(fp, 1000);
3992                 rc = bnx2x_rx_int(fp, 1000);
3993         }
3994
3995         if (!BP_NOMCP(bp)) {
3996                 int func = BP_FUNC(bp);
3997                 u32 drv_pulse;
3998                 u32 mcp_pulse;
3999
4000                 ++bp->fw_drv_pulse_wr_seq;
4001                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4002                 /* TBD - add SYSTEM_TIME */
4003                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4004                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4005
4006                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4007                              MCP_PULSE_SEQ_MASK);
4008                 /* The delta between driver pulse and mcp response
4009                  * should be 1 (before mcp response) or 0 (after mcp response)
4010                  */
4011                 if ((drv_pulse != mcp_pulse) &&
4012                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4013                         /* someone lost a heartbeat... */
4014                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4015                                   drv_pulse, mcp_pulse);
4016                 }
4017         }
4018
4019         if ((bp->state == BNX2X_STATE_OPEN) ||
4020             (bp->state == BNX2X_STATE_DISABLED))
4021                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4022
4023 timer_restart:
4024         mod_timer(&bp->timer, jiffies + bp->current_interval);
4025 }
4026
4027 /* end of Statistics */
4028
4029 /* nic init */
4030
4031 /*
4032  * nic init service functions
4033  */
4034
4035 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4036 {
4037         int port = BP_PORT(bp);
4038
4039         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4040                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4041                         sizeof(struct ustorm_status_block)/4);
4042         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4043                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4044                         sizeof(struct cstorm_status_block)/4);
4045 }
4046
4047 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4048                           dma_addr_t mapping, int sb_id)
4049 {
4050         int port = BP_PORT(bp);
4051         int func = BP_FUNC(bp);
4052         int index;
4053         u64 section;
4054
4055         /* USTORM */
4056         section = ((u64)mapping) + offsetof(struct host_status_block,
4057                                             u_status_block);
4058         sb->u_status_block.status_block_id = sb_id;
4059
4060         REG_WR(bp, BAR_USTRORM_INTMEM +
4061                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4062         REG_WR(bp, BAR_USTRORM_INTMEM +
4063                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4064                U64_HI(section));
4065         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4066                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4067
4068         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4069                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4070                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4071
4072         /* CSTORM */
4073         section = ((u64)mapping) + offsetof(struct host_status_block,
4074                                             c_status_block);
4075         sb->c_status_block.status_block_id = sb_id;
4076
4077         REG_WR(bp, BAR_CSTRORM_INTMEM +
4078                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4079         REG_WR(bp, BAR_CSTRORM_INTMEM +
4080                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4081                U64_HI(section));
4082         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4083                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4084
4085         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4086                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4087                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4088
4089         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4090 }
4091
4092 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4093 {
4094         int func = BP_FUNC(bp);
4095
4096         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4097                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4098                         sizeof(struct ustorm_def_status_block)/4);
4099         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4100                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4101                         sizeof(struct cstorm_def_status_block)/4);
4102         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4103                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4104                         sizeof(struct xstorm_def_status_block)/4);
4105         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4106                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4107                         sizeof(struct tstorm_def_status_block)/4);
4108 }
4109
4110 static void bnx2x_init_def_sb(struct bnx2x *bp,
4111                               struct host_def_status_block *def_sb,
4112                               dma_addr_t mapping, int sb_id)
4113 {
4114         int port = BP_PORT(bp);
4115         int func = BP_FUNC(bp);
4116         int index, val, reg_offset;
4117         u64 section;
4118
4119         /* ATTN */
4120         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4121                                             atten_status_block);
4122         def_sb->atten_status_block.status_block_id = sb_id;
4123
4124         bp->attn_state = 0;
4125
4126         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4127                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4128
4129         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4130                 bp->attn_group[index].sig[0] = REG_RD(bp,
4131                                                      reg_offset + 0x10*index);
4132                 bp->attn_group[index].sig[1] = REG_RD(bp,
4133                                                reg_offset + 0x4 + 0x10*index);
4134                 bp->attn_group[index].sig[2] = REG_RD(bp,
4135                                                reg_offset + 0x8 + 0x10*index);
4136                 bp->attn_group[index].sig[3] = REG_RD(bp,
4137                                                reg_offset + 0xc + 0x10*index);
4138         }
4139
4140         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4141                              HC_REG_ATTN_MSG0_ADDR_L);
4142
4143         REG_WR(bp, reg_offset, U64_LO(section));
4144         REG_WR(bp, reg_offset + 4, U64_HI(section));
4145
4146         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4147
4148         val = REG_RD(bp, reg_offset);
4149         val |= sb_id;
4150         REG_WR(bp, reg_offset, val);
4151
4152         /* USTORM */
4153         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154                                             u_def_status_block);
4155         def_sb->u_def_status_block.status_block_id = sb_id;
4156
4157         REG_WR(bp, BAR_USTRORM_INTMEM +
4158                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4159         REG_WR(bp, BAR_USTRORM_INTMEM +
4160                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4161                U64_HI(section));
4162         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4163                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4164
4165         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4166                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4167                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4168
4169         /* CSTORM */
4170         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171                                             c_def_status_block);
4172         def_sb->c_def_status_block.status_block_id = sb_id;
4173
4174         REG_WR(bp, BAR_CSTRORM_INTMEM +
4175                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4176         REG_WR(bp, BAR_CSTRORM_INTMEM +
4177                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4178                U64_HI(section));
4179         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4180                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4181
4182         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4183                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4184                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4185
4186         /* TSTORM */
4187         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4188                                             t_def_status_block);
4189         def_sb->t_def_status_block.status_block_id = sb_id;
4190
4191         REG_WR(bp, BAR_TSTRORM_INTMEM +
4192                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4193         REG_WR(bp, BAR_TSTRORM_INTMEM +
4194                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4195                U64_HI(section));
4196         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4197                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4198
4199         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4200                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4201                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4202
4203         /* XSTORM */
4204         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4205                                             x_def_status_block);
4206         def_sb->x_def_status_block.status_block_id = sb_id;
4207
4208         REG_WR(bp, BAR_XSTRORM_INTMEM +
4209                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4210         REG_WR(bp, BAR_XSTRORM_INTMEM +
4211                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4212                U64_HI(section));
4213         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4214                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4215
4216         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4217                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4218                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4219
4220         bp->stats_pending = 0;
4221         bp->set_mac_pending = 0;
4222
4223         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4224 }
4225
4226 static void bnx2x_update_coalesce(struct bnx2x *bp)
4227 {
4228         int port = BP_PORT(bp);
4229         int i;
4230
4231         for_each_queue(bp, i) {
4232                 int sb_id = bp->fp[i].sb_id;
4233
4234                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4235                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4236                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4237                                                     U_SB_ETH_RX_CQ_INDEX),
4238                         bp->rx_ticks/12);
4239                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4240                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4241                                                      U_SB_ETH_RX_CQ_INDEX),
4242                          bp->rx_ticks ? 0 : 1);
4243
4244                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4245                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4246                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4247                                                     C_SB_ETH_TX_CQ_INDEX),
4248                         bp->tx_ticks/12);
4249                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4250                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4251                                                      C_SB_ETH_TX_CQ_INDEX),
4252                          bp->tx_ticks ? 0 : 1);
4253         }
4254 }
4255
4256 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4257                                        struct bnx2x_fastpath *fp, int last)
4258 {
4259         int i;
4260
4261         for (i = 0; i < last; i++) {
4262                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4263                 struct sk_buff *skb = rx_buf->skb;
4264
4265                 if (skb == NULL) {
4266                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4267                         continue;
4268                 }
4269
4270                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4271                         pci_unmap_single(bp->pdev,
4272                                          pci_unmap_addr(rx_buf, mapping),
4273                                          bp->rx_buf_size,
4274                                          PCI_DMA_FROMDEVICE);
4275
4276                 dev_kfree_skb(skb);
4277                 rx_buf->skb = NULL;
4278         }
4279 }
4280
4281 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4282 {
4283         int func = BP_FUNC(bp);
4284         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4285                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4286         u16 ring_prod, cqe_ring_prod;
4287         int i, j;
4288
4289         bp->rx_buf_size = bp->dev->mtu;
4290         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4291                 BCM_RX_ETH_PAYLOAD_ALIGN;
4292
4293         if (bp->flags & TPA_ENABLE_FLAG) {
4294                 DP(NETIF_MSG_IFUP,
4295                    "rx_buf_size %d  effective_mtu %d\n",
4296                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4297
4298                 for_each_queue(bp, j) {
4299                         struct bnx2x_fastpath *fp = &bp->fp[j];
4300
4301                         for (i = 0; i < max_agg_queues; i++) {
4302                                 fp->tpa_pool[i].skb =
4303                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4304                                 if (!fp->tpa_pool[i].skb) {
4305                                         BNX2X_ERR("Failed to allocate TPA "
4306                                                   "skb pool for queue[%d] - "
4307                                                   "disabling TPA on this "
4308                                                   "queue!\n", j);
4309                                         bnx2x_free_tpa_pool(bp, fp, i);
4310                                         fp->disable_tpa = 1;
4311                                         break;
4312                                 }
4313                                 pci_unmap_addr_set((struct sw_rx_bd *)
4314                                                         &bp->fp->tpa_pool[i],
4315                                                    mapping, 0);
4316                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4317                         }
4318                 }
4319         }
4320
4321         for_each_queue(bp, j) {
4322                 struct bnx2x_fastpath *fp = &bp->fp[j];
4323
4324                 fp->rx_bd_cons = 0;
4325                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4326                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4327
4328                 /* "next page" elements initialization */
4329                 /* SGE ring */
4330                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4331                         struct eth_rx_sge *sge;
4332
4333                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4334                         sge->addr_hi =
4335                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4336                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4337                         sge->addr_lo =
4338                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4339                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4340                 }
4341
4342                 bnx2x_init_sge_ring_bit_mask(fp);
4343
4344                 /* RX BD ring */
4345                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4346                         struct eth_rx_bd *rx_bd;
4347
4348                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4349                         rx_bd->addr_hi =
4350                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4351                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4352                         rx_bd->addr_lo =
4353                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4354                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4355                 }
4356
4357                 /* CQ ring */
4358                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4359                         struct eth_rx_cqe_next_page *nextpg;
4360
4361                         nextpg = (struct eth_rx_cqe_next_page *)
4362                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4363                         nextpg->addr_hi =
4364                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4365                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4366                         nextpg->addr_lo =
4367                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4368                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4369                 }
4370
4371                 /* Allocate SGEs and initialize the ring elements */
4372                 for (i = 0, ring_prod = 0;
4373                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4374
4375                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4376                                 BNX2X_ERR("was only able to allocate "
4377                                           "%d rx sges\n", i);
4378                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4379                                 /* Cleanup already allocated elements */
4380                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4381                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4382                                 fp->disable_tpa = 1;
4383                                 ring_prod = 0;
4384                                 break;
4385                         }
4386                         ring_prod = NEXT_SGE_IDX(ring_prod);
4387                 }
4388                 fp->rx_sge_prod = ring_prod;
4389
4390                 /* Allocate BDs and initialize BD ring */
4391                 fp->rx_comp_cons = 0;
4392                 cqe_ring_prod = ring_prod = 0;
4393                 for (i = 0; i < bp->rx_ring_size; i++) {
4394                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4395                                 BNX2X_ERR("was only able to allocate "
4396                                           "%d rx skbs\n", i);
4397                                 bp->eth_stats.rx_skb_alloc_failed++;
4398                                 break;
4399                         }
4400                         ring_prod = NEXT_RX_IDX(ring_prod);
4401                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4402                         WARN_ON(ring_prod <= i);
4403                 }
4404
4405                 fp->rx_bd_prod = ring_prod;
4406                 /* must not have more available CQEs than BDs */
4407                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4408                                        cqe_ring_prod);
4409                 fp->rx_pkt = fp->rx_calls = 0;
4410
4411                 /* Warning!
4412                  * this will generate an interrupt (to the TSTORM)
4413                  * must only be done after chip is initialized
4414                  */
4415                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4416                                      fp->rx_sge_prod);
4417                 if (j != 0)
4418                         continue;
4419
4420                 REG_WR(bp, BAR_USTRORM_INTMEM +
4421                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4422                        U64_LO(fp->rx_comp_mapping));
4423                 REG_WR(bp, BAR_USTRORM_INTMEM +
4424                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4425                        U64_HI(fp->rx_comp_mapping));
4426         }
4427 }
4428
4429 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4430 {
4431         int i, j;
4432
4433         for_each_queue(bp, j) {
4434                 struct bnx2x_fastpath *fp = &bp->fp[j];
4435
4436                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4437                         struct eth_tx_bd *tx_bd =
4438                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4439
4440                         tx_bd->addr_hi =
4441                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4442                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4443                         tx_bd->addr_lo =
4444                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4445                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4446                 }
4447
4448                 fp->tx_pkt_prod = 0;
4449                 fp->tx_pkt_cons = 0;
4450                 fp->tx_bd_prod = 0;
4451                 fp->tx_bd_cons = 0;
4452                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4453                 fp->tx_pkt = 0;
4454         }
4455 }
4456
4457 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4458 {
4459         int func = BP_FUNC(bp);
4460
4461         spin_lock_init(&bp->spq_lock);
4462
4463         bp->spq_left = MAX_SPQ_PENDING;
4464         bp->spq_prod_idx = 0;
4465         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4466         bp->spq_prod_bd = bp->spq;
4467         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4468
4469         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4470                U64_LO(bp->spq_mapping));
4471         REG_WR(bp,
4472                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4473                U64_HI(bp->spq_mapping));
4474
4475         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4476                bp->spq_prod_idx);
4477 }
4478
4479 static void bnx2x_init_context(struct bnx2x *bp)
4480 {
4481         int i;
4482
4483         for_each_queue(bp, i) {
4484                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4485                 struct bnx2x_fastpath *fp = &bp->fp[i];
4486                 u8 sb_id = FP_SB_ID(fp);
4487
4488                 context->ustorm_st_context.common.sb_index_numbers =
4489                                                 BNX2X_RX_SB_INDEX_NUM;
4490                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4491                 context->ustorm_st_context.common.status_block_id = sb_id;
4492                 context->ustorm_st_context.common.flags =
4493                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4494                 context->ustorm_st_context.common.mc_alignment_log_size =
4495                         6 /*BCM_RX_ETH_PAYLOAD_ALIGN*/;
4496                 context->ustorm_st_context.common.bd_buff_size =
4497                                                 bp->rx_buf_size;
4498                 context->ustorm_st_context.common.bd_page_base_hi =
4499                                                 U64_HI(fp->rx_desc_mapping);
4500                 context->ustorm_st_context.common.bd_page_base_lo =
4501                                                 U64_LO(fp->rx_desc_mapping);
4502                 if (!fp->disable_tpa) {
4503                         context->ustorm_st_context.common.flags |=
4504                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4505                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4506                         context->ustorm_st_context.common.sge_buff_size =
4507                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4508                                          (u32)0xffff);
4509                         context->ustorm_st_context.common.sge_page_base_hi =
4510                                                 U64_HI(fp->rx_sge_mapping);
4511                         context->ustorm_st_context.common.sge_page_base_lo =
4512                                                 U64_LO(fp->rx_sge_mapping);
4513                 }
4514
4515                 context->ustorm_ag_context.cdu_usage =
4516                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4517                                                CDU_REGION_NUMBER_UCM_AG,
4518                                                ETH_CONNECTION_TYPE);
4519
4520                 context->xstorm_st_context.tx_bd_page_base_hi =
4521                                                 U64_HI(fp->tx_desc_mapping);
4522                 context->xstorm_st_context.tx_bd_page_base_lo =
4523                                                 U64_LO(fp->tx_desc_mapping);
4524                 context->xstorm_st_context.db_data_addr_hi =
4525                                                 U64_HI(fp->tx_prods_mapping);
4526                 context->xstorm_st_context.db_data_addr_lo =
4527                                                 U64_LO(fp->tx_prods_mapping);
4528                 context->xstorm_st_context.statistics_data = (fp->cl_id |
4529                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4530                 context->cstorm_st_context.sb_index_number =
4531                                                 C_SB_ETH_TX_CQ_INDEX;
4532                 context->cstorm_st_context.status_block_id = sb_id;
4533
4534                 context->xstorm_ag_context.cdu_reserved =
4535                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4536                                                CDU_REGION_NUMBER_XCM_AG,
4537                                                ETH_CONNECTION_TYPE);
4538         }
4539 }
4540
4541 static void bnx2x_init_ind_table(struct bnx2x *bp)
4542 {
4543         int func = BP_FUNC(bp);
4544         int i;
4545
4546         if (!is_multi(bp))
4547                 return;
4548
4549         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4550         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4551                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4552                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4553                         BP_CL_ID(bp) + (i % bp->num_queues));
4554 }
4555
4556 static void bnx2x_set_client_config(struct bnx2x *bp)
4557 {
4558         struct tstorm_eth_client_config tstorm_client = {0};
4559         int port = BP_PORT(bp);
4560         int i;
4561
4562         tstorm_client.mtu = bp->dev->mtu;
4563         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4564         tstorm_client.config_flags =
4565                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4566 #ifdef BCM_VLAN
4567         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4568                 tstorm_client.config_flags |=
4569                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4570                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4571         }
4572 #endif
4573
4574         if (bp->flags & TPA_ENABLE_FLAG) {
4575                 tstorm_client.max_sges_for_packet =
4576                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4577                 tstorm_client.max_sges_for_packet =
4578                         ((tstorm_client.max_sges_for_packet +
4579                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4580                         PAGES_PER_SGE_SHIFT;
4581
4582                 tstorm_client.config_flags |=
4583                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4584         }
4585
4586         for_each_queue(bp, i) {
4587                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4588                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4589                        ((u32 *)&tstorm_client)[0]);
4590                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4591                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4592                        ((u32 *)&tstorm_client)[1]);
4593         }
4594
4595         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4596            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4597 }
4598
4599 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4600 {
4601         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4602         int mode = bp->rx_mode;
4603         int mask = (1 << BP_L_ID(bp));
4604         int func = BP_FUNC(bp);
4605         int i;
4606
4607         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4608
4609         switch (mode) {
4610         case BNX2X_RX_MODE_NONE: /* no Rx */
4611                 tstorm_mac_filter.ucast_drop_all = mask;
4612                 tstorm_mac_filter.mcast_drop_all = mask;
4613                 tstorm_mac_filter.bcast_drop_all = mask;
4614                 break;
4615         case BNX2X_RX_MODE_NORMAL:
4616                 tstorm_mac_filter.bcast_accept_all = mask;
4617                 break;
4618         case BNX2X_RX_MODE_ALLMULTI:
4619                 tstorm_mac_filter.mcast_accept_all = mask;
4620                 tstorm_mac_filter.bcast_accept_all = mask;
4621                 break;
4622         case BNX2X_RX_MODE_PROMISC:
4623                 tstorm_mac_filter.ucast_accept_all = mask;
4624                 tstorm_mac_filter.mcast_accept_all = mask;
4625                 tstorm_mac_filter.bcast_accept_all = mask;
4626                 break;
4627         default:
4628                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4629                 break;
4630         }
4631
4632         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4633                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4634                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4635                        ((u32 *)&tstorm_mac_filter)[i]);
4636
4637 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4638                    ((u32 *)&tstorm_mac_filter)[i]); */
4639         }
4640
4641         if (mode != BNX2X_RX_MODE_NONE)
4642                 bnx2x_set_client_config(bp);
4643 }
4644
4645 static void bnx2x_init_internal_common(struct bnx2x *bp)
4646 {
4647         int i;
4648
4649         if (bp->flags & TPA_ENABLE_FLAG) {
4650                 struct tstorm_eth_tpa_exist tpa = {0};
4651
4652                 tpa.tpa_exist = 1;
4653
4654                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4655                        ((u32 *)&tpa)[0]);
4656                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4657                        ((u32 *)&tpa)[1]);
4658         }
4659
4660         /* Zero this manually as its initialization is
4661            currently missing in the initTool */
4662         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4663                 REG_WR(bp, BAR_USTRORM_INTMEM +
4664                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4665 }
4666
4667 static void bnx2x_init_internal_port(struct bnx2x *bp)
4668 {
4669         int port = BP_PORT(bp);
4670
4671         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4672         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4673         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4674         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675 }
4676
4677 static void bnx2x_init_internal_func(struct bnx2x *bp)
4678 {
4679         struct tstorm_eth_function_common_config tstorm_config = {0};
4680         struct stats_indication_flags stats_flags = {0};
4681         int port = BP_PORT(bp);
4682         int func = BP_FUNC(bp);
4683         int i;
4684         u16 max_agg_size;
4685
4686         if (is_multi(bp)) {
4687                 tstorm_config.config_flags = MULTI_FLAGS;
4688                 tstorm_config.rss_result_mask = MULTI_MASK;
4689         }
4690         if (IS_E1HMF(bp))
4691                 tstorm_config.config_flags |=
4692                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4693
4694         tstorm_config.leading_client_id = BP_L_ID(bp);
4695
4696         REG_WR(bp, BAR_TSTRORM_INTMEM +
4697                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4698                (*(u32 *)&tstorm_config));
4699
4700         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4701         bnx2x_set_storm_rx_mode(bp);
4702
4703         /* reset xstorm per client statistics */
4704         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4705                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4707                        i*4, 0);
4708         }
4709         /* reset tstorm per client statistics */
4710         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4711                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4712                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4713                        i*4, 0);
4714         }
4715
4716         /* Init statistics related context */
4717         stats_flags.collect_eth = 1;
4718
4719         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4720                ((u32 *)&stats_flags)[0]);
4721         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4722                ((u32 *)&stats_flags)[1]);
4723
4724         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4725                ((u32 *)&stats_flags)[0]);
4726         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4727                ((u32 *)&stats_flags)[1]);
4728
4729         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4730                ((u32 *)&stats_flags)[0]);
4731         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4732                ((u32 *)&stats_flags)[1]);
4733
4734         REG_WR(bp, BAR_XSTRORM_INTMEM +
4735                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4736                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4737         REG_WR(bp, BAR_XSTRORM_INTMEM +
4738                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4739                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740
4741         REG_WR(bp, BAR_TSTRORM_INTMEM +
4742                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4743                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4744         REG_WR(bp, BAR_TSTRORM_INTMEM +
4745                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4746                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4747
4748         if (CHIP_IS_E1H(bp)) {
4749                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4750                         IS_E1HMF(bp));
4751                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4752                         IS_E1HMF(bp));
4753                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4754                         IS_E1HMF(bp));
4755                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4756                         IS_E1HMF(bp));
4757
4758                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4759                          bp->e1hov);
4760         }
4761
4762         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4763         max_agg_size =
4764                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4765                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4766                     (u32)0xffff);
4767         for_each_queue(bp, i) {
4768                 struct bnx2x_fastpath *fp = &bp->fp[i];
4769
4770                 REG_WR(bp, BAR_USTRORM_INTMEM +
4771                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4772                        U64_LO(fp->rx_comp_mapping));
4773                 REG_WR(bp, BAR_USTRORM_INTMEM +
4774                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4775                        U64_HI(fp->rx_comp_mapping));
4776
4777                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4778                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4779                          max_agg_size);
4780         }
4781 }
4782
4783 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4784 {
4785         switch (load_code) {
4786         case FW_MSG_CODE_DRV_LOAD_COMMON:
4787                 bnx2x_init_internal_common(bp);
4788                 /* no break */
4789
4790         case FW_MSG_CODE_DRV_LOAD_PORT:
4791                 bnx2x_init_internal_port(bp);
4792                 /* no break */
4793
4794         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4795                 bnx2x_init_internal_func(bp);
4796                 break;
4797
4798         default:
4799                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4800                 break;
4801         }
4802 }
4803
4804 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4805 {
4806         int i;
4807
4808         for_each_queue(bp, i) {
4809                 struct bnx2x_fastpath *fp = &bp->fp[i];
4810
4811                 fp->bp = bp;
4812                 fp->state = BNX2X_FP_STATE_CLOSED;
4813                 fp->index = i;
4814                 fp->cl_id = BP_L_ID(bp) + i;
4815                 fp->sb_id = fp->cl_id;
4816                 DP(NETIF_MSG_IFUP,
4817                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4818                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4819                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4820                               FP_SB_ID(fp));
4821                 bnx2x_update_fpsb_idx(fp);
4822         }
4823
4824         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4825                           DEF_SB_ID);
4826         bnx2x_update_dsb_idx(bp);
4827         bnx2x_update_coalesce(bp);
4828         bnx2x_init_rx_rings(bp);
4829         bnx2x_init_tx_ring(bp);
4830         bnx2x_init_sp_ring(bp);
4831         bnx2x_init_context(bp);
4832         bnx2x_init_internal(bp, load_code);
4833         bnx2x_init_ind_table(bp);
4834         bnx2x_stats_init(bp);
4835
4836         /* At this point, we are ready for interrupts */
4837         atomic_set(&bp->intr_sem, 0);
4838
4839         /* flush all before enabling interrupts */
4840         mb();
4841         mmiowb();
4842
4843         bnx2x_int_enable(bp);
4844 }
4845
4846 /* end of nic init */
4847
4848 /*
4849  * gzip service functions
4850  */
4851
4852 static int bnx2x_gunzip_init(struct bnx2x *bp)
4853 {
4854         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4855                                               &bp->gunzip_mapping);
4856         if (bp->gunzip_buf  == NULL)
4857                 goto gunzip_nomem1;
4858
4859         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4860         if (bp->strm  == NULL)
4861                 goto gunzip_nomem2;
4862
4863         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4864                                       GFP_KERNEL);
4865         if (bp->strm->workspace == NULL)
4866                 goto gunzip_nomem3;
4867
4868         return 0;
4869
4870 gunzip_nomem3:
4871         kfree(bp->strm);
4872         bp->strm = NULL;
4873
4874 gunzip_nomem2:
4875         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4876                             bp->gunzip_mapping);
4877         bp->gunzip_buf = NULL;
4878
4879 gunzip_nomem1:
4880         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4881                " un-compression\n", bp->dev->name);
4882         return -ENOMEM;
4883 }
4884
4885 static void bnx2x_gunzip_end(struct bnx2x *bp)
4886 {
4887         kfree(bp->strm->workspace);
4888
4889         kfree(bp->strm);
4890         bp->strm = NULL;
4891
4892         if (bp->gunzip_buf) {
4893                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4894                                     bp->gunzip_mapping);
4895                 bp->gunzip_buf = NULL;
4896         }
4897 }
4898
4899 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4900 {
4901         int n, rc;
4902
4903         /* check gzip header */
4904         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4905                 return -EINVAL;
4906
4907         n = 10;
4908
4909 #define FNAME                           0x8
4910
4911         if (zbuf[3] & FNAME)
4912                 while ((zbuf[n++] != 0) && (n < len));
4913
4914         bp->strm->next_in = zbuf + n;
4915         bp->strm->avail_in = len - n;
4916         bp->strm->next_out = bp->gunzip_buf;
4917         bp->strm->avail_out = FW_BUF_SIZE;
4918
4919         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4920         if (rc != Z_OK)
4921                 return rc;
4922
4923         rc = zlib_inflate(bp->strm, Z_FINISH);
4924         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4925                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4926                        bp->dev->name, bp->strm->msg);
4927
4928         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4929         if (bp->gunzip_outlen & 0x3)
4930                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4931                                     " gunzip_outlen (%d) not aligned\n",
4932                        bp->dev->name, bp->gunzip_outlen);
4933         bp->gunzip_outlen >>= 2;
4934
4935         zlib_inflateEnd(bp->strm);
4936
4937         if (rc == Z_STREAM_END)
4938                 return 0;
4939
4940         return rc;
4941 }
4942
4943 /* nic load/unload */
4944
4945 /*
4946  * General service functions
4947  */
4948
4949 /* send a NIG loopback debug packet */
4950 static void bnx2x_lb_pckt(struct bnx2x *bp)
4951 {
4952         u32 wb_write[3];
4953
4954         /* Ethernet source and destination addresses */
4955         wb_write[0] = 0x55555555;
4956         wb_write[1] = 0x55555555;
4957         wb_write[2] = 0x20;             /* SOP */
4958         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4959
4960         /* NON-IP protocol */
4961         wb_write[0] = 0x09000000;
4962         wb_write[1] = 0x55555555;
4963         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4964         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4965 }
4966
4967 /* some of the internal memories
4968  * are not directly readable from the driver
4969  * to test them we send debug packets
4970  */
4971 static int bnx2x_int_mem_test(struct bnx2x *bp)
4972 {
4973         int factor;
4974         int count, i;
4975         u32 val = 0;
4976
4977         if (CHIP_REV_IS_FPGA(bp))
4978                 factor = 120;
4979         else if (CHIP_REV_IS_EMUL(bp))
4980                 factor = 200;
4981         else
4982                 factor = 1;
4983
4984         DP(NETIF_MSG_HW, "start part1\n");
4985
4986         /* Disable inputs of parser neighbor blocks */
4987         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4991
4992         /*  Write 0 to parser credits for CFC search request */
4993         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995         /* send Ethernet packet */
4996         bnx2x_lb_pckt(bp);
4997
4998         /* TODO do i reset NIG statistic? */
4999         /* Wait until NIG register shows 1 packet of size 0x10 */
5000         count = 1000 * factor;
5001         while (count) {
5002
5003                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5004                 val = *bnx2x_sp(bp, wb_data[0]);
5005                 if (val == 0x10)
5006                         break;
5007
5008                 msleep(10);
5009                 count--;
5010         }
5011         if (val != 0x10) {
5012                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5013                 return -1;
5014         }
5015
5016         /* Wait until PRS register shows 1 packet */
5017         count = 1000 * factor;
5018         while (count) {
5019                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020                 if (val == 1)
5021                         break;
5022
5023                 msleep(10);
5024                 count--;
5025         }
5026         if (val != 0x1) {
5027                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5028                 return -2;
5029         }
5030
5031         /* Reset and init BRB, PRS */
5032         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033         msleep(50);
5034         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035         msleep(50);
5036         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038
5039         DP(NETIF_MSG_HW, "part2\n");
5040
5041         /* Disable inputs of parser neighbor blocks */
5042         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5043         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5044         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5045         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5046
5047         /* Write 0 to parser credits for CFC search request */
5048         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5049
5050         /* send 10 Ethernet packets */
5051         for (i = 0; i < 10; i++)
5052                 bnx2x_lb_pckt(bp);
5053
5054         /* Wait until NIG register shows 10 + 1
5055            packets of size 11*0x10 = 0xb0 */
5056         count = 1000 * factor;
5057         while (count) {
5058
5059                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5060                 val = *bnx2x_sp(bp, wb_data[0]);
5061                 if (val == 0xb0)
5062                         break;
5063
5064                 msleep(10);
5065                 count--;
5066         }
5067         if (val != 0xb0) {
5068                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5069                 return -3;
5070         }
5071
5072         /* Wait until PRS register shows 2 packets */
5073         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5074         if (val != 2)
5075                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5076
5077         /* Write 1 to parser credits for CFC search request */
5078         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5079
5080         /* Wait until PRS register shows 3 packets */
5081         msleep(10 * factor);
5082         /* Wait until NIG register shows 1 packet of size 0x10 */
5083         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5084         if (val != 3)
5085                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5086
5087         /* clear NIG EOP FIFO */
5088         for (i = 0; i < 11; i++)
5089                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5090         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5091         if (val != 1) {
5092                 BNX2X_ERR("clear of NIG failed\n");
5093                 return -4;
5094         }
5095
5096         /* Reset and init BRB, PRS, NIG */
5097         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5098         msleep(50);
5099         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5100         msleep(50);
5101         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5102         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5103 #ifndef BCM_ISCSI
5104         /* set NIC mode */
5105         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5106 #endif
5107
5108         /* Enable inputs of parser neighbor blocks */
5109         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5110         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5111         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5112         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5113
5114         DP(NETIF_MSG_HW, "done\n");
5115
5116         return 0; /* OK */
5117 }
5118
5119 static void enable_blocks_attention(struct bnx2x *bp)
5120 {
5121         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5123         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5124         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5126         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5127         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5128         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5129         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5130 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5131 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5132         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5133         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5134         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5135 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5136 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5137         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5138         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5139         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5140         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5141 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5142 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5143         if (CHIP_REV_IS_FPGA(bp))
5144                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5145         else
5146                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5147         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5148         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5149         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5150 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5151 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5152         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5153         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5154 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5155         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5156 }
5157
5158
5159 static void bnx2x_reset_common(struct bnx2x *bp)
5160 {
5161         /* reset_common */
5162         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5163                0xd3ffff7f);
5164         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5165 }
5166
5167 static int bnx2x_init_common(struct bnx2x *bp)
5168 {
5169         u32 val, i;
5170
5171         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5172
5173         bnx2x_reset_common(bp);
5174         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5175         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5176
5177         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5178         if (CHIP_IS_E1H(bp))
5179                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5180
5181         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5182         msleep(30);
5183         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5184
5185         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5186         if (CHIP_IS_E1(bp)) {
5187                 /* enable HW interrupt from PXP on USDM overflow
5188                    bit 16 on INT_MASK_0 */
5189                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5190         }
5191
5192         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5193         bnx2x_init_pxp(bp);
5194
5195 #ifdef __BIG_ENDIAN
5196         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5197         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5198         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5199         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5200         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5201
5202 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5203         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5204         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5205         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5206         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5207 #endif
5208
5209         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5210 #ifdef BCM_ISCSI
5211         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5212         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5213         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5214 #endif
5215
5216         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5217                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5218
5219         /* let the HW do it's magic ... */
5220         msleep(100);
5221         /* finish PXP init */
5222         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5223         if (val != 1) {
5224                 BNX2X_ERR("PXP2 CFG failed\n");
5225                 return -EBUSY;
5226         }
5227         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5228         if (val != 1) {
5229                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5230                 return -EBUSY;
5231         }
5232
5233         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5234         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5235
5236         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5237
5238         /* clean the DMAE memory */
5239         bp->dmae_ready = 1;
5240         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5241
5242         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5243         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5244         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5245         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5246
5247         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5248         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5249         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5250         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5251
5252         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5253         /* soft reset pulse */
5254         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5255         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5256
5257 #ifdef BCM_ISCSI
5258         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5259 #endif
5260
5261         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5262         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5263         if (!CHIP_REV_IS_SLOW(bp)) {
5264                 /* enable hw interrupt from doorbell Q */
5265                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5266         }
5267
5268         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5269         if (CHIP_REV_IS_SLOW(bp)) {
5270                 /* fix for emulation and FPGA for no pause */
5271                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5272                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5273                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5274                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5275         }
5276
5277         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5278         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5279         /* set NIC mode */
5280         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5281         if (CHIP_IS_E1H(bp))
5282                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5283
5284         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5285         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5286         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5287         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5288
5289         if (CHIP_IS_E1H(bp)) {
5290                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5291                                 STORM_INTMEM_SIZE_E1H/2);
5292                 bnx2x_init_fill(bp,
5293                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5294                                 0, STORM_INTMEM_SIZE_E1H/2);
5295                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5296                                 STORM_INTMEM_SIZE_E1H/2);
5297                 bnx2x_init_fill(bp,
5298                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5299                                 0, STORM_INTMEM_SIZE_E1H/2);
5300                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5301                                 STORM_INTMEM_SIZE_E1H/2);
5302                 bnx2x_init_fill(bp,
5303                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304                                 0, STORM_INTMEM_SIZE_E1H/2);
5305                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5306                                 STORM_INTMEM_SIZE_E1H/2);
5307                 bnx2x_init_fill(bp,
5308                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309                                 0, STORM_INTMEM_SIZE_E1H/2);
5310         } else { /* E1 */
5311                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5312                                 STORM_INTMEM_SIZE_E1);
5313                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5314                                 STORM_INTMEM_SIZE_E1);
5315                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5316                                 STORM_INTMEM_SIZE_E1);
5317                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5318                                 STORM_INTMEM_SIZE_E1);
5319         }
5320
5321         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5322         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5323         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5324         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5325
5326         /* sync semi rtc */
5327         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5328                0x80000000);
5329         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5330                0x80000000);
5331
5332         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5333         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5334         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5335
5336         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5337         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5338                 REG_WR(bp, i, 0xc0cac01a);
5339                 /* TODO: replace with something meaningful */
5340         }
5341         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5342         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5343
5344         if (sizeof(union cdu_context) != 1024)
5345                 /* we currently assume that a context is 1024 bytes */
5346                 printk(KERN_ALERT PFX "please adjust the size of"
5347                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5348
5349         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5350         val = (4 << 24) + (0 << 12) + 1024;
5351         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5352         if (CHIP_IS_E1(bp)) {
5353                 /* !!! fix pxp client crdit until excel update */
5354                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5355                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5356         }
5357
5358         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5359         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5360         /* enable context validation interrupt from CFC */
5361         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5362
5363         /* set the thresholds to prevent CFC/CDU race */
5364         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5365
5366         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5367         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5368
5369         /* PXPCS COMMON comes here */
5370         /* Reset PCIE errors for debug */
5371         REG_WR(bp, 0x2814, 0xffffffff);
5372         REG_WR(bp, 0x3820, 0xffffffff);
5373
5374         /* EMAC0 COMMON comes here */
5375         /* EMAC1 COMMON comes here */
5376         /* DBU COMMON comes here */
5377         /* DBG COMMON comes here */
5378
5379         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5380         if (CHIP_IS_E1H(bp)) {
5381                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5382                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5383         }
5384
5385         if (CHIP_REV_IS_SLOW(bp))
5386                 msleep(200);
5387
5388         /* finish CFC init */
5389         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5390         if (val != 1) {
5391                 BNX2X_ERR("CFC LL_INIT failed\n");
5392                 return -EBUSY;
5393         }
5394         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5395         if (val != 1) {
5396                 BNX2X_ERR("CFC AC_INIT failed\n");
5397                 return -EBUSY;
5398         }
5399         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5400         if (val != 1) {
5401                 BNX2X_ERR("CFC CAM_INIT failed\n");
5402                 return -EBUSY;
5403         }
5404         REG_WR(bp, CFC_REG_DEBUG0, 0);
5405
5406         /* read NIG statistic
5407            to see if this is our first up since powerup */
5408         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5409         val = *bnx2x_sp(bp, wb_data[0]);
5410
5411         /* do internal memory self test */
5412         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5413                 BNX2X_ERR("internal mem self test failed\n");
5414                 return -EBUSY;
5415         }
5416
5417         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5418         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5419         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5420                 /* Fan failure is indicated by SPIO 5 */
5421                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5422                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5423
5424                 /* set to active low mode */
5425                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5426                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5427                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5428                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5429
5430                 /* enable interrupt to signal the IGU */
5431                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5432                 val |= (1 << MISC_REGISTERS_SPIO_5);
5433                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5434                 break;
5435
5436         default:
5437                 break;
5438         }
5439
5440         /* clear PXP2 attentions */
5441         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5442
5443         enable_blocks_attention(bp);
5444
5445         if (!BP_NOMCP(bp)) {
5446                 bnx2x_acquire_phy_lock(bp);
5447                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5448                 bnx2x_release_phy_lock(bp);
5449         } else
5450                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5451
5452         return 0;
5453 }
5454
5455 static int bnx2x_init_port(struct bnx2x *bp)
5456 {
5457         int port = BP_PORT(bp);
5458         u32 val;
5459
5460         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5461
5462         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5463
5464         /* Port PXP comes here */
5465         /* Port PXP2 comes here */
5466 #ifdef BCM_ISCSI
5467         /* Port0  1
5468          * Port1  385 */
5469         i++;
5470         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5471         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5472         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5473         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5474
5475         /* Port0  2
5476          * Port1  386 */
5477         i++;
5478         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5479         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5480         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5481         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5482
5483         /* Port0  3
5484          * Port1  387 */
5485         i++;
5486         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5487         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5488         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5489         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5490 #endif
5491         /* Port CMs come here */
5492         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5493                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5494
5495         /* Port QM comes here */
5496 #ifdef BCM_ISCSI
5497         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5498         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5499
5500         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5501                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5502 #endif
5503         /* Port DQ comes here */
5504         /* Port BRB1 comes here */
5505         /* Port PRS comes here */
5506         /* Port TSDM comes here */
5507         /* Port CSDM comes here */
5508         /* Port USDM comes here */
5509         /* Port XSDM comes here */
5510         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5511                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5512         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5513                              port ? USEM_PORT1_END : USEM_PORT0_END);
5514         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5515                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5516         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5517                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5518         /* Port UPB comes here */
5519         /* Port XPB comes here */
5520
5521         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5522                              port ? PBF_PORT1_END : PBF_PORT0_END);
5523
5524         /* configure PBF to work without PAUSE mtu 9000 */
5525         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5526
5527         /* update threshold */
5528         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5529         /* update init credit */
5530         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5531
5532         /* probe changes */
5533         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5534         msleep(5);
5535         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5536
5537 #ifdef BCM_ISCSI
5538         /* tell the searcher where the T2 table is */
5539         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5540
5541         wb_write[0] = U64_LO(bp->t2_mapping);
5542         wb_write[1] = U64_HI(bp->t2_mapping);
5543         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5544         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5545         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5546         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5547
5548         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5549         /* Port SRCH comes here */
5550 #endif
5551         /* Port CDU comes here */
5552         /* Port CFC comes here */
5553
5554         if (CHIP_IS_E1(bp)) {
5555                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5556                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5557         }
5558         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5559                              port ? HC_PORT1_END : HC_PORT0_END);
5560
5561         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5562                                     MISC_AEU_PORT0_START,
5563                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5564         /* init aeu_mask_attn_func_0/1:
5565          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5566          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5567          *             bits 4-7 are used for "per vn group attention" */
5568         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5569                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5570
5571         /* Port PXPCS comes here */
5572         /* Port EMAC0 comes here */
5573         /* Port EMAC1 comes here */
5574         /* Port DBU comes here */
5575         /* Port DBG comes here */
5576         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5577                              port ? NIG_PORT1_END : NIG_PORT0_END);
5578
5579         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5580
5581         if (CHIP_IS_E1H(bp)) {
5582                 u32 wsum;
5583                 struct cmng_struct_per_port m_cmng_port;
5584                 int vn;
5585
5586                 /* 0x2 disable e1hov, 0x1 enable */
5587                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5588                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5589
5590                 /* Init RATE SHAPING and FAIRNESS contexts.
5591                    Initialize as if there is 10G link. */
5592                 wsum = bnx2x_calc_vn_wsum(bp);
5593                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5594                 if (IS_E1HMF(bp))
5595                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5596                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5597                                         wsum, 10000, &m_cmng_port);
5598         }
5599
5600         /* Port MCP comes here */
5601         /* Port DMAE comes here */
5602
5603         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5604         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5605         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5606                 /* add SPIO 5 to group 0 */
5607                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5608                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5609                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5610                 break;
5611
5612         default:
5613                 break;
5614         }
5615
5616         bnx2x__link_reset(bp);
5617
5618         return 0;
5619 }
5620
5621 #define ILT_PER_FUNC            (768/2)
5622 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5623 /* the phys address is shifted right 12 bits and has an added
5624    1=valid bit added to the 53rd bit
5625    then since this is a wide register(TM)
5626    we split it into two 32 bit writes
5627  */
5628 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5629 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5630 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5631 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5632
5633 #define CNIC_ILT_LINES          0
5634
5635 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5636 {
5637         int reg;
5638
5639         if (CHIP_IS_E1H(bp))
5640                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5641         else /* E1 */
5642                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5643
5644         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5645 }
5646
5647 static int bnx2x_init_func(struct bnx2x *bp)
5648 {
5649         int port = BP_PORT(bp);
5650         int func = BP_FUNC(bp);
5651         int i;
5652
5653         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5654
5655         i = FUNC_ILT_BASE(func);
5656
5657         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5658         if (CHIP_IS_E1H(bp)) {
5659                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5660                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5661         } else /* E1 */
5662                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5663                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5664
5665
5666         if (CHIP_IS_E1H(bp)) {
5667                 for (i = 0; i < 9; i++)
5668                         bnx2x_init_block(bp,
5669                                          cm_start[func][i], cm_end[func][i]);
5670
5671                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5672                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5673         }
5674
5675         /* HC init per function */
5676         if (CHIP_IS_E1H(bp)) {
5677                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5678
5679                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5680                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5681         }
5682         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5683
5684         /* Reset PCIE errors for debug */
5685         REG_WR(bp, 0x2114, 0xffffffff);
5686         REG_WR(bp, 0x2120, 0xffffffff);
5687
5688         return 0;
5689 }
5690
5691 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5692 {
5693         int i, rc = 0;
5694
5695         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5696            BP_FUNC(bp), load_code);
5697
5698         bp->dmae_ready = 0;
5699         mutex_init(&bp->dmae_mutex);
5700         bnx2x_gunzip_init(bp);
5701
5702         switch (load_code) {
5703         case FW_MSG_CODE_DRV_LOAD_COMMON:
5704                 rc = bnx2x_init_common(bp);
5705                 if (rc)
5706                         goto init_hw_err;
5707                 /* no break */
5708
5709         case FW_MSG_CODE_DRV_LOAD_PORT:
5710                 bp->dmae_ready = 1;
5711                 rc = bnx2x_init_port(bp);
5712                 if (rc)
5713                         goto init_hw_err;
5714                 /* no break */
5715
5716         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5717                 bp->dmae_ready = 1;
5718                 rc = bnx2x_init_func(bp);
5719                 if (rc)
5720                         goto init_hw_err;
5721                 break;
5722
5723         default:
5724                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5725                 break;
5726         }
5727
5728         if (!BP_NOMCP(bp)) {
5729                 int func = BP_FUNC(bp);
5730
5731                 bp->fw_drv_pulse_wr_seq =
5732                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5733                                  DRV_PULSE_SEQ_MASK);
5734                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5735                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5736                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5737         } else
5738                 bp->func_stx = 0;
5739
5740         /* this needs to be done before gunzip end */
5741         bnx2x_zero_def_sb(bp);
5742         for_each_queue(bp, i)
5743                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5744
5745 init_hw_err:
5746         bnx2x_gunzip_end(bp);
5747
5748         return rc;
5749 }
5750
5751 /* send the MCP a request, block until there is a reply */
5752 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5753 {
5754         int func = BP_FUNC(bp);
5755         u32 seq = ++bp->fw_seq;
5756         u32 rc = 0;
5757         u32 cnt = 1;
5758         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5759
5760         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5761         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5762
5763         do {
5764                 /* let the FW do it's magic ... */
5765                 msleep(delay);
5766
5767                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5768
5769                 /* Give the FW up to 2 second (200*10ms) */
5770         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5771
5772         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5773            cnt*delay, rc, seq);
5774
5775         /* is this a reply to our command? */
5776         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5777                 rc &= FW_MSG_CODE_MASK;
5778
5779         } else {
5780                 /* FW BUG! */
5781                 BNX2X_ERR("FW failed to respond!\n");
5782                 bnx2x_fw_dump(bp);
5783                 rc = 0;
5784         }
5785
5786         return rc;
5787 }
5788
5789 static void bnx2x_free_mem(struct bnx2x *bp)
5790 {
5791