bnx2x: Indirection table initialization index
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.23"
61 #define DRV_MODULE_RELDATE      "2008/11/03"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       SGE_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207         u32 i, frag_len, frag_size, pages;
1208         int err;
1209         int j;
1210
1211         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1213
1214         /* This is needed in order to enable forwarding support */
1215         if (frag_size)
1216                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217                                                max(frag_size, (u32)len_on_bd));
1218
1219 #ifdef BNX2X_STOP_ON_ERROR
1220         if (pages >
1221             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 old_rx_pg = *rx_pg;
1240
1241                 /* If we fail to allocate a substitute page, we simply stop
1242                    where we are and drop the whole packet */
1243                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244                 if (unlikely(err)) {
1245                         bp->eth_stats.rx_skb_alloc_failed++;
1246                         return err;
1247                 }
1248
1249                 /* Unmap the page as we r going to pass it to the stack */
1250                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253                 /* Add one frag and update the appropriate fields in the skb */
1254                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256                 skb->data_len += frag_len;
1257                 skb->truesize += frag_len;
1258                 skb->len += frag_len;
1259
1260                 frag_size -= frag_len;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268                            u16 cqe_idx)
1269 {
1270         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271         struct sk_buff *skb = rx_buf->skb;
1272         /* alloc new skb */
1273         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275         /* Unmap skb in the pool anyway, as we are going to change
1276            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277            fails. */
1278         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1280
1281         if (likely(new_skb)) {
1282                 /* fix ip xsum and give it to the stack */
1283                 /* (no need to map the new skb) */
1284 #ifdef BCM_VLAN
1285                 int is_vlan_cqe =
1286                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287                          PARSING_FLAGS_VLAN);
1288                 int is_not_hwaccel_vlan_cqe =
1289                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1290 #endif
1291
1292                 prefetch(skb);
1293                 prefetch(((char *)(skb)) + 128);
1294
1295 #ifdef BNX2X_STOP_ON_ERROR
1296                 if (pad + len > bp->rx_buf_size) {
1297                         BNX2X_ERR("skb_put is about to fail...  "
1298                                   "pad %d  len %d  rx_buf_size %d\n",
1299                                   pad, len, bp->rx_buf_size);
1300                         bnx2x_panic();
1301                         return;
1302                 }
1303 #endif
1304
1305                 skb_reserve(skb, pad);
1306                 skb_put(skb, len);
1307
1308                 skb->protocol = eth_type_trans(skb, bp->dev);
1309                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1310
1311                 {
1312                         struct iphdr *iph;
1313
1314                         iph = (struct iphdr *)skb->data;
1315 #ifdef BCM_VLAN
1316                         /* If there is no Rx VLAN offloading -
1317                            take VLAN tag into an account */
1318                         if (unlikely(is_not_hwaccel_vlan_cqe))
1319                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1320 #endif
1321                         iph->check = 0;
1322                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1323                 }
1324
1325                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326                                          &cqe->fast_path_cqe, cqe_idx)) {
1327 #ifdef BCM_VLAN
1328                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329                             (!is_not_hwaccel_vlan_cqe))
1330                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331                                                 le16_to_cpu(cqe->fast_path_cqe.
1332                                                             vlan_tag));
1333                         else
1334 #endif
1335                                 netif_receive_skb(skb);
1336                 } else {
1337                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338                            " - dropping packet!\n");
1339                         dev_kfree_skb(skb);
1340                 }
1341
1342
1343                 /* put new skb in bin */
1344                 fp->tpa_pool[queue].skb = new_skb;
1345
1346         } else {
1347                 /* else drop the packet and keep the buffer in the bin */
1348                 DP(NETIF_MSG_RX_STATUS,
1349                    "Failed to allocate new skb - dropping packet!\n");
1350                 bp->eth_stats.rx_skb_alloc_failed++;
1351         }
1352
1353         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1354 }
1355
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357                                         struct bnx2x_fastpath *fp,
1358                                         u16 bd_prod, u16 rx_comp_prod,
1359                                         u16 rx_sge_prod)
1360 {
1361         struct tstorm_eth_rx_producers rx_prods = {0};
1362         int i;
1363
1364         /* Update producers */
1365         rx_prods.bd_prod = bd_prod;
1366         rx_prods.cqe_prod = rx_comp_prod;
1367         rx_prods.sge_prod = rx_sge_prod;
1368
1369         /*
1370          * Make sure that the BD and SGE data is updated before updating the
1371          * producers since FW might read the BD/SGE right after the producer
1372          * is updated.
1373          * This is only applicable for weak-ordered memory model archs such
1374          * as IA-64. The following barrier is also mandatory since FW will
1375          * assumes BDs must have buffers.
1376          */
1377         wmb();
1378
1379         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382                        ((u32 *)&rx_prods)[i]);
1383
1384         mmiowb(); /* keep prod updates ordered */
1385
1386         DP(NETIF_MSG_RX_STATUS,
1387            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1388            bd_prod, rx_comp_prod, rx_sge_prod);
1389 }
1390
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1392 {
1393         struct bnx2x *bp = fp->bp;
1394         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396         int rx_pkt = 0;
1397
1398 #ifdef BNX2X_STOP_ON_ERROR
1399         if (unlikely(bp->panic))
1400                 return 0;
1401 #endif
1402
1403         /* CQ "next element" is of the size of the regular element,
1404            that's why it's ok here */
1405         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407                 hw_comp_cons++;
1408
1409         bd_cons = fp->rx_bd_cons;
1410         bd_prod = fp->rx_bd_prod;
1411         bd_prod_fw = bd_prod;
1412         sw_comp_cons = fp->rx_comp_cons;
1413         sw_comp_prod = fp->rx_comp_prod;
1414
1415         /* Memory barrier necessary as speculative reads of the rx
1416          * buffer can be ahead of the index in the status block
1417          */
1418         rmb();
1419
1420         DP(NETIF_MSG_RX_STATUS,
1421            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1422            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1423
1424         while (sw_comp_cons != hw_comp_cons) {
1425                 struct sw_rx_bd *rx_buf = NULL;
1426                 struct sk_buff *skb;
1427                 union eth_rx_cqe *cqe;
1428                 u8 cqe_fp_flags;
1429                 u16 len, pad;
1430
1431                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432                 bd_prod = RX_BD(bd_prod);
1433                 bd_cons = RX_BD(bd_cons);
1434
1435                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1437
1438                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1439                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1440                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1444
1445                 /* is this a slowpath msg? */
1446                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447                         bnx2x_sp_event(fp, cqe);
1448                         goto next_cqe;
1449
1450                 /* this is an rx packet */
1451                 } else {
1452                         rx_buf = &fp->rx_buf_ring[bd_cons];
1453                         skb = rx_buf->skb;
1454                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455                         pad = cqe->fast_path_cqe.placement_offset;
1456
1457                         /* If CQE is marked both TPA_START and TPA_END
1458                            it is a non-TPA CQE */
1459                         if ((!fp->disable_tpa) &&
1460                             (TPA_TYPE(cqe_fp_flags) !=
1461                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1462                                 u16 queue = cqe->fast_path_cqe.queue_index;
1463
1464                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465                                         DP(NETIF_MSG_RX_STATUS,
1466                                            "calling tpa_start on queue %d\n",
1467                                            queue);
1468
1469                                         bnx2x_tpa_start(fp, queue, skb,
1470                                                         bd_cons, bd_prod);
1471                                         goto next_rx;
1472                                 }
1473
1474                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475                                         DP(NETIF_MSG_RX_STATUS,
1476                                            "calling tpa_stop on queue %d\n",
1477                                            queue);
1478
1479                                         if (!BNX2X_RX_SUM_FIX(cqe))
1480                                                 BNX2X_ERR("STOP on none TCP "
1481                                                           "data\n");
1482
1483                                         /* This is a size of the linear data
1484                                            on this skb */
1485                                         len = le16_to_cpu(cqe->fast_path_cqe.
1486                                                                 len_on_bd);
1487                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1488                                                     len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1490                                         if (bp->panic)
1491                                                 return -EINVAL;
1492 #endif
1493
1494                                         bnx2x_update_sge_prod(fp,
1495                                                         &cqe->fast_path_cqe);
1496                                         goto next_cqe;
1497                                 }
1498                         }
1499
1500                         pci_dma_sync_single_for_device(bp->pdev,
1501                                         pci_unmap_addr(rx_buf, mapping),
1502                                                        pad + RX_COPY_THRESH,
1503                                                        PCI_DMA_FROMDEVICE);
1504                         prefetch(skb);
1505                         prefetch(((char *)(skb)) + 128);
1506
1507                         /* is this an error packet? */
1508                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509                                 DP(NETIF_MSG_RX_ERR,
1510                                    "ERROR  flags %x  rx packet %u\n",
1511                                    cqe_fp_flags, sw_comp_cons);
1512                                 bp->eth_stats.rx_err_discard_pkt++;
1513                                 goto reuse_rx;
1514                         }
1515
1516                         /* Since we don't have a jumbo ring
1517                          * copy small packets if mtu > 1500
1518                          */
1519                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520                             (len <= RX_COPY_THRESH)) {
1521                                 struct sk_buff *new_skb;
1522
1523                                 new_skb = netdev_alloc_skb(bp->dev,
1524                                                            len + pad);
1525                                 if (new_skb == NULL) {
1526                                         DP(NETIF_MSG_RX_ERR,
1527                                            "ERROR  packet dropped "
1528                                            "because of alloc failure\n");
1529                                         bp->eth_stats.rx_skb_alloc_failed++;
1530                                         goto reuse_rx;
1531                                 }
1532
1533                                 /* aligned copy */
1534                                 skb_copy_from_linear_data_offset(skb, pad,
1535                                                     new_skb->data + pad, len);
1536                                 skb_reserve(new_skb, pad);
1537                                 skb_put(new_skb, len);
1538
1539                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1540
1541                                 skb = new_skb;
1542
1543                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544                                 pci_unmap_single(bp->pdev,
1545                                         pci_unmap_addr(rx_buf, mapping),
1546                                                  bp->rx_buf_size,
1547                                                  PCI_DMA_FROMDEVICE);
1548                                 skb_reserve(skb, pad);
1549                                 skb_put(skb, len);
1550
1551                         } else {
1552                                 DP(NETIF_MSG_RX_ERR,
1553                                    "ERROR  packet dropped because "
1554                                    "of alloc failure\n");
1555                                 bp->eth_stats.rx_skb_alloc_failed++;
1556 reuse_rx:
1557                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558                                 goto next_rx;
1559                         }
1560
1561                         skb->protocol = eth_type_trans(skb, bp->dev);
1562
1563                         skb->ip_summed = CHECKSUM_NONE;
1564                         if (bp->rx_csum) {
1565                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1567                                 else
1568                                         bp->eth_stats.hw_csum_err++;
1569                         }
1570                 }
1571
1572 #ifdef BCM_VLAN
1573                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575                      PARSING_FLAGS_VLAN))
1576                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578                 else
1579 #endif
1580                         netif_receive_skb(skb);
1581
1582
1583 next_rx:
1584                 rx_buf->skb = NULL;
1585
1586                 bd_cons = NEXT_RX_IDX(bd_cons);
1587                 bd_prod = NEXT_RX_IDX(bd_prod);
1588                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1589                 rx_pkt++;
1590 next_cqe:
1591                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1593
1594                 if (rx_pkt == budget)
1595                         break;
1596         } /* while */
1597
1598         fp->rx_bd_cons = bd_cons;
1599         fp->rx_bd_prod = bd_prod_fw;
1600         fp->rx_comp_cons = sw_comp_cons;
1601         fp->rx_comp_prod = sw_comp_prod;
1602
1603         /* Update producers */
1604         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605                              fp->rx_sge_prod);
1606
1607         fp->rx_pkt += rx_pkt;
1608         fp->rx_calls++;
1609
1610         return rx_pkt;
1611 }
1612
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614 {
1615         struct bnx2x_fastpath *fp = fp_cookie;
1616         struct bnx2x *bp = fp->bp;
1617         int index = FP_IDX(fp);
1618
1619         /* Return here if interrupt is disabled */
1620         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1622                 return IRQ_HANDLED;
1623         }
1624
1625         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626            index, FP_SB_ID(fp));
1627         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1628
1629 #ifdef BNX2X_STOP_ON_ERROR
1630         if (unlikely(bp->panic))
1631                 return IRQ_HANDLED;
1632 #endif
1633
1634         prefetch(fp->rx_cons_sb);
1635         prefetch(fp->tx_cons_sb);
1636         prefetch(&fp->status_blk->c_status_block.status_block_index);
1637         prefetch(&fp->status_blk->u_status_block.status_block_index);
1638
1639         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1640
1641         return IRQ_HANDLED;
1642 }
1643
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1645 {
1646         struct net_device *dev = dev_instance;
1647         struct bnx2x *bp = netdev_priv(dev);
1648         u16 status = bnx2x_ack_int(bp);
1649         u16 mask;
1650
1651         /* Return here if interrupt is shared and it's not for us */
1652         if (unlikely(status == 0)) {
1653                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1654                 return IRQ_NONE;
1655         }
1656         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1657
1658         /* Return here if interrupt is disabled */
1659         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661                 return IRQ_HANDLED;
1662         }
1663
1664 #ifdef BNX2X_STOP_ON_ERROR
1665         if (unlikely(bp->panic))
1666                 return IRQ_HANDLED;
1667 #endif
1668
1669         mask = 0x2 << bp->fp[0].sb_id;
1670         if (status & mask) {
1671                 struct bnx2x_fastpath *fp = &bp->fp[0];
1672
1673                 prefetch(fp->rx_cons_sb);
1674                 prefetch(fp->tx_cons_sb);
1675                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1677
1678                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1679
1680                 status &= ~mask;
1681         }
1682
1683
1684         if (unlikely(status & 0x1)) {
1685                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1686
1687                 status &= ~0x1;
1688                 if (!status)
1689                         return IRQ_HANDLED;
1690         }
1691
1692         if (status)
1693                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694                    status);
1695
1696         return IRQ_HANDLED;
1697 }
1698
1699 /* end of fast path */
1700
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1702
1703 /* Link */
1704
1705 /*
1706  * General service functions
1707  */
1708
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1710 {
1711         u32 lock_status;
1712         u32 resource_bit = (1 << resource);
1713         int func = BP_FUNC(bp);
1714         u32 hw_lock_control_reg;
1715         int cnt;
1716
1717         /* Validating that the resource is within range */
1718         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1719                 DP(NETIF_MSG_HW,
1720                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1722                 return -EINVAL;
1723         }
1724
1725         if (func <= 5) {
1726                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1727         } else {
1728                 hw_lock_control_reg =
1729                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1730         }
1731
1732         /* Validating that the resource is not already taken */
1733         lock_status = REG_RD(bp, hw_lock_control_reg);
1734         if (lock_status & resource_bit) {
1735                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1736                    lock_status, resource_bit);
1737                 return -EEXIST;
1738         }
1739
1740         /* Try for 5 second every 5ms */
1741         for (cnt = 0; cnt < 1000; cnt++) {
1742                 /* Try to acquire the lock */
1743                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744                 lock_status = REG_RD(bp, hw_lock_control_reg);
1745                 if (lock_status & resource_bit)
1746                         return 0;
1747
1748                 msleep(5);
1749         }
1750         DP(NETIF_MSG_HW, "Timeout\n");
1751         return -EAGAIN;
1752 }
1753
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1755 {
1756         u32 lock_status;
1757         u32 resource_bit = (1 << resource);
1758         int func = BP_FUNC(bp);
1759         u32 hw_lock_control_reg;
1760
1761         /* Validating that the resource is within range */
1762         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1763                 DP(NETIF_MSG_HW,
1764                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1766                 return -EINVAL;
1767         }
1768
1769         if (func <= 5) {
1770                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1771         } else {
1772                 hw_lock_control_reg =
1773                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1774         }
1775
1776         /* Validating that the resource is currently taken */
1777         lock_status = REG_RD(bp, hw_lock_control_reg);
1778         if (!(lock_status & resource_bit)) {
1779                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1780                    lock_status, resource_bit);
1781                 return -EFAULT;
1782         }
1783
1784         REG_WR(bp, hw_lock_control_reg, resource_bit);
1785         return 0;
1786 }
1787
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1790 {
1791         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1792
1793         mutex_lock(&bp->port.phy_mutex);
1794
1795         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1798 }
1799
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1807
1808         mutex_unlock(&bp->port.phy_mutex);
1809 }
1810
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1812 {
1813         /* The GPIO should be swapped if swap register is set and active */
1814         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816         int gpio_shift = gpio_num +
1817                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818         u32 gpio_mask = (1 << gpio_shift);
1819         u32 gpio_reg;
1820
1821         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1823                 return -EINVAL;
1824         }
1825
1826         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827         /* read GPIO and mask except the float bits */
1828         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1829
1830         switch (mode) {
1831         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833                    gpio_num, gpio_shift);
1834                 /* clear FLOAT and set CLR */
1835                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1837                 break;
1838
1839         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841                    gpio_num, gpio_shift);
1842                 /* clear FLOAT and set SET */
1843                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1845                 break;
1846
1847         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849                    gpio_num, gpio_shift);
1850                 /* set FLOAT */
1851                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1852                 break;
1853
1854         default:
1855                 break;
1856         }
1857
1858         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1860
1861         return 0;
1862 }
1863
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1865 {
1866         u32 spio_mask = (1 << spio_num);
1867         u32 spio_reg;
1868
1869         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870             (spio_num > MISC_REGISTERS_SPIO_7)) {
1871                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1872                 return -EINVAL;
1873         }
1874
1875         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876         /* read SPIO and mask except the float bits */
1877         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1878
1879         switch (mode) {
1880         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882                 /* clear FLOAT and set CLR */
1883                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1885                 break;
1886
1887         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889                 /* clear FLOAT and set SET */
1890                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1892                 break;
1893
1894         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1896                 /* set FLOAT */
1897                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1898                 break;
1899
1900         default:
1901                 break;
1902         }
1903
1904         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1906
1907         return 0;
1908 }
1909
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1911 {
1912         switch (bp->link_vars.ieee_fc &
1913                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1916                                           ADVERTISED_Pause);
1917                 break;
1918         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1920                                          ADVERTISED_Pause);
1921                 break;
1922         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1924                 break;
1925         default:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         }
1930 }
1931
1932 static void bnx2x_link_report(struct bnx2x *bp)
1933 {
1934         if (bp->link_vars.link_up) {
1935                 if (bp->state == BNX2X_STATE_OPEN)
1936                         netif_carrier_on(bp->dev);
1937                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1938
1939                 printk("%d Mbps ", bp->link_vars.line_speed);
1940
1941                 if (bp->link_vars.duplex == DUPLEX_FULL)
1942                         printk("full duplex");
1943                 else
1944                         printk("half duplex");
1945
1946                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948                                 printk(", receive ");
1949                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950                                         printk("& transmit ");
1951                         } else {
1952                                 printk(", transmit ");
1953                         }
1954                         printk("flow control ON");
1955                 }
1956                 printk("\n");
1957
1958         } else { /* link_down */
1959                 netif_carrier_off(bp->dev);
1960                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1961         }
1962 }
1963
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1965 {
1966         if (!BP_NOMCP(bp)) {
1967                 u8 rc;
1968
1969                 /* Initialize link parameters structure variables */
1970                 /* It is recommended to turn off RX FC for jumbo frames
1971                    for better performance */
1972                 if (IS_E1HMF(bp))
1973                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974                 else if (bp->dev->mtu > 5000)
1975                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1976                 else
1977                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1978
1979                 bnx2x_acquire_phy_lock(bp);
1980                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981                 bnx2x_release_phy_lock(bp);
1982
1983                 bnx2x_calc_fc_adv(bp);
1984
1985                 if (bp->link_vars.link_up)
1986                         bnx2x_link_report(bp);
1987
1988
1989                 return rc;
1990         }
1991         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1992         return -EINVAL;
1993 }
1994
1995 static void bnx2x_link_set(struct bnx2x *bp)
1996 {
1997         if (!BP_NOMCP(bp)) {
1998                 bnx2x_acquire_phy_lock(bp);
1999                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001
2002                 bnx2x_calc_fc_adv(bp);
2003         } else
2004                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2005 }
2006
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2008 {
2009         if (!BP_NOMCP(bp)) {
2010                 bnx2x_acquire_phy_lock(bp);
2011                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012                 bnx2x_release_phy_lock(bp);
2013         } else
2014                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2015 }
2016
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2018 {
2019         u8 rc;
2020
2021         bnx2x_acquire_phy_lock(bp);
2022         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023         bnx2x_release_phy_lock(bp);
2024
2025         return rc;
2026 }
2027
2028 /* Calculates the sum of vn_min_rates.
2029    It's needed for further normalizing of the min_rates.
2030
2031    Returns:
2032      sum of vn_min_rates
2033        or
2034      0 - if all the min_rates are 0.
2035      In the later case fairness algorithm should be deactivated.
2036      If not all min_rates are zero then those that are zeroes will
2037      be set to 1.
2038  */
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2040 {
2041         int i, port = BP_PORT(bp);
2042         u32 wsum = 0;
2043         int all_zero = 1;
2044
2045         for (i = 0; i < E1HVN_MAX; i++) {
2046                 u32 vn_cfg =
2047                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051                         /* If min rate is zero - set it to 1 */
2052                         if (!vn_min_rate)
2053                                 vn_min_rate = DEF_MIN_RATE;
2054                         else
2055                                 all_zero = 0;
2056
2057                         wsum += vn_min_rate;
2058                 }
2059         }
2060
2061         /* ... only if all min rates are zeros - disable FAIRNESS */
2062         if (all_zero)
2063                 return 0;
2064
2065         return wsum;
2066 }
2067
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2069                                    int en_fness,
2070                                    u16 port_rate,
2071                                    struct cmng_struct_per_port *m_cmng_port)
2072 {
2073         u32 r_param = port_rate / 8;
2074         int port = BP_PORT(bp);
2075         int i;
2076
2077         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2078
2079         /* Enable minmax only if we are in e1hmf mode */
2080         if (IS_E1HMF(bp)) {
2081                 u32 fair_periodic_timeout_usec;
2082                 u32 t_fair;
2083
2084                 /* Enable rate shaping and fairness */
2085                 m_cmng_port->flags.cmng_vn_enable = 1;
2086                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087                 m_cmng_port->flags.rate_shaping_enable = 1;
2088
2089                 if (!en_fness)
2090                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091                            "  fairness will be disabled\n");
2092
2093                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094                 m_cmng_port->rs_vars.rs_periodic_timeout =
2095                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2096
2097                 /* this is the threshold below which no timer arming will occur
2098                    1.25 coefficient is for the threshold to be a little bigger
2099                    than the real time, to compensate for timer in-accuracy */
2100                 m_cmng_port->rs_vars.rs_threshold =
2101                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2102
2103                 /* resolution of fairness timer */
2104                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106                 t_fair = T_FAIR_COEF / port_rate;
2107
2108                 /* this is the threshold below which we won't arm
2109                    the timer anymore */
2110                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2111
2112                 /* we multiply by 1e3/8 to get bytes/msec.
2113                    We don't want the credits to pass a credit
2114                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115                 m_cmng_port->fair_vars.upper_bound =
2116                                                 r_param * t_fair * FAIR_MEM;
2117                 /* since each tick is 4 usec */
2118                 m_cmng_port->fair_vars.fairness_timeout =
2119                                                 fair_periodic_timeout_usec / 4;
2120
2121         } else {
2122                 /* Disable rate shaping and fairness */
2123                 m_cmng_port->flags.cmng_vn_enable = 0;
2124                 m_cmng_port->flags.fairness_enable = 0;
2125                 m_cmng_port->flags.rate_shaping_enable = 0;
2126
2127                 DP(NETIF_MSG_IFUP,
2128                    "Single function mode  minmax will be disabled\n");
2129         }
2130
2131         /* Store it to internal memory */
2132         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135                        ((u32 *)(m_cmng_port))[i]);
2136 }
2137
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139                                    u32 wsum, u16 port_rate,
2140                                  struct cmng_struct_per_port *m_cmng_port)
2141 {
2142         struct rate_shaping_vars_per_vn m_rs_vn;
2143         struct fairness_vars_per_vn m_fair_vn;
2144         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145         u16 vn_min_rate, vn_max_rate;
2146         int i;
2147
2148         /* If function is hidden - set min and max to zeroes */
2149         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2150                 vn_min_rate = 0;
2151                 vn_max_rate = 0;
2152
2153         } else {
2154                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157                    if current min rate is zero - set it to 1.
2158                    This is a requirement of the algorithm. */
2159                 if ((vn_min_rate == 0) && wsum)
2160                         vn_min_rate = DEF_MIN_RATE;
2161                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2163         }
2164
2165         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2166            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2167
2168         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2170
2171         /* global vn counter - maximal Mbps for this vn */
2172         m_rs_vn.vn_counter.rate = vn_max_rate;
2173
2174         /* quota - number of bytes transmitted in this period */
2175         m_rs_vn.vn_counter.quota =
2176                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2177
2178 #ifdef BNX2X_PER_PROT_QOS
2179         /* per protocol counter */
2180         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181                 /* maximal Mbps for this protocol */
2182                 m_rs_vn.protocol_counters[protocol].rate =
2183                                                 protocol_max_rate[protocol];
2184                 /* the quota in each timer period -
2185                    number of bytes transmitted in this period */
2186                 m_rs_vn.protocol_counters[protocol].quota =
2187                         (u32)(rs_periodic_timeout_usec *
2188                           ((double)m_rs_vn.
2189                                    protocol_counters[protocol].rate/8));
2190         }
2191 #endif
2192
2193         if (wsum) {
2194                 /* credit for each period of the fairness algorithm:
2195                    number of bytes in T_FAIR (the vn share the port rate).
2196                    wsum should not be larger than 10000, thus
2197                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198                 m_fair_vn.vn_credit_delta =
2199                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202                    m_fair_vn.vn_credit_delta);
2203         }
2204
2205 #ifdef BNX2X_PER_PROT_QOS
2206         do {
2207                 u32 protocolWeightSum = 0;
2208
2209                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210                         protocolWeightSum +=
2211                                         drvInit.protocol_min_rate[protocol];
2212                 /* per protocol counter -
2213                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214                 if (protocolWeightSum > 0) {
2215                         for (protocol = 0;
2216                              protocol < NUM_OF_PROTOCOLS; protocol++)
2217                                 /* credit for each period of the
2218                                    fairness algorithm - number of bytes in
2219                                    T_FAIR (the protocol share the vn rate) */
2220                                 m_fair_vn.protocol_credit_delta[protocol] =
2221                                         (u32)((vn_min_rate / 8) * t_fair *
2222                                         protocol_min_rate / protocolWeightSum);
2223                 }
2224         } while (0);
2225 #endif
2226
2227         /* Store it to internal memory */
2228         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231                        ((u32 *)(&m_rs_vn))[i]);
2232
2233         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236                        ((u32 *)(&m_fair_vn))[i]);
2237 }
2238
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2241 {
2242         int vn;
2243
2244         /* Make sure that we are synced with the current statistics */
2245         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2246
2247         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2248
2249         if (bp->link_vars.link_up) {
2250
2251                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252                         struct host_port_stats *pstats;
2253
2254                         pstats = bnx2x_sp(bp, port_stats);
2255                         /* reset old bmac stats */
2256                         memset(&(pstats->mac_stx[0]), 0,
2257                                sizeof(struct mac_stx));
2258                 }
2259                 if ((bp->state == BNX2X_STATE_OPEN) ||
2260                     (bp->state == BNX2X_STATE_DISABLED))
2261                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2262         }
2263
2264         /* indicate link status */
2265         bnx2x_link_report(bp);
2266
2267         if (IS_E1HMF(bp)) {
2268                 int func;
2269
2270                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271                         if (vn == BP_E1HVN(bp))
2272                                 continue;
2273
2274                         func = ((vn << 1) | BP_PORT(bp));
2275
2276                         /* Set the attention towards other drivers
2277                            on the same port */
2278                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2280                 }
2281         }
2282
2283         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284                 struct cmng_struct_per_port m_cmng_port;
2285                 u32 wsum;
2286                 int port = BP_PORT(bp);
2287
2288                 /* Init RATE SHAPING and FAIRNESS contexts */
2289                 wsum = bnx2x_calc_vn_wsum(bp);
2290                 bnx2x_init_port_minmax(bp, (int)wsum,
2291                                         bp->link_vars.line_speed,
2292                                         &m_cmng_port);
2293                 if (IS_E1HMF(bp))
2294                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296                                         wsum, bp->link_vars.line_speed,
2297                                                      &m_cmng_port);
2298         }
2299 }
2300
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2302 {
2303         if (bp->state != BNX2X_STATE_OPEN)
2304                 return;
2305
2306         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2307
2308         if (bp->link_vars.link_up)
2309                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2310         else
2311                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2312
2313         /* indicate link status */
2314         bnx2x_link_report(bp);
2315 }
2316
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2318 {
2319         int port = BP_PORT(bp);
2320         u32 val;
2321
2322         bp->port.pmf = 1;
2323         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2324
2325         /* enable nig attention */
2326         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2329
2330         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2331 }
2332
2333 /* end of Link */
2334
2335 /* slow path */
2336
2337 /*
2338  * General service functions
2339  */
2340
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343                          u32 data_hi, u32 data_lo, int common)
2344 {
2345         int func = BP_FUNC(bp);
2346
2347         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2349            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2352
2353 #ifdef BNX2X_STOP_ON_ERROR
2354         if (unlikely(bp->panic))
2355                 return -EIO;
2356 #endif
2357
2358         spin_lock_bh(&bp->spq_lock);
2359
2360         if (!bp->spq_left) {
2361                 BNX2X_ERR("BUG! SPQ ring full!\n");
2362                 spin_unlock_bh(&bp->spq_lock);
2363                 bnx2x_panic();
2364                 return -EBUSY;
2365         }
2366
2367         /* CID needs port number to be encoded int it */
2368         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2370                                      HW_CID(bp, cid)));
2371         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2372         if (common)
2373                 bp->spq_prod_bd->hdr.type |=
2374                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2375
2376         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2378
2379         bp->spq_left--;
2380
2381         if (bp->spq_prod_bd == bp->spq_last_bd) {
2382                 bp->spq_prod_bd = bp->spq;
2383                 bp->spq_prod_idx = 0;
2384                 DP(NETIF_MSG_TIMER, "end of spq\n");
2385
2386         } else {
2387                 bp->spq_prod_bd++;
2388                 bp->spq_prod_idx++;
2389         }
2390
2391         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2392                bp->spq_prod_idx);
2393
2394         spin_unlock_bh(&bp->spq_lock);
2395         return 0;
2396 }
2397
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2400 {
2401         u32 i, j, val;
2402         int rc = 0;
2403
2404         might_sleep();
2405         i = 100;
2406         for (j = 0; j < i*10; j++) {
2407                 val = (1UL << 31);
2408                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410                 if (val & (1L << 31))
2411                         break;
2412
2413                 msleep(5);
2414         }
2415         if (!(val & (1L << 31))) {
2416                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2417                 rc = -EBUSY;
2418         }
2419
2420         return rc;
2421 }
2422
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2425 {
2426         u32 val = 0;
2427
2428         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2429 }
2430
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2432 {
2433         struct host_def_status_block *def_sb = bp->def_status_blk;
2434         u16 rc = 0;
2435
2436         barrier(); /* status block is written to by the chip */
2437         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2439                 rc |= 1;
2440         }
2441         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2443                 rc |= 2;
2444         }
2445         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2447                 rc |= 4;
2448         }
2449         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2451                 rc |= 8;
2452         }
2453         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2455                 rc |= 16;
2456         }
2457         return rc;
2458 }
2459
2460 /*
2461  * slow path service functions
2462  */
2463
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2465 {
2466         int port = BP_PORT(bp);
2467         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468                        COMMAND_REG_ATTN_BITS_SET);
2469         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472                                        NIG_REG_MASK_INTERRUPT_PORT0;
2473         u32 aeu_mask;
2474
2475         if (bp->attn_state & asserted)
2476                 BNX2X_ERR("IGU ERROR\n");
2477
2478         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479         aeu_mask = REG_RD(bp, aeu_addr);
2480
2481         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2482            aeu_mask, asserted);
2483         aeu_mask &= ~(asserted & 0xff);
2484         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2485
2486         REG_WR(bp, aeu_addr, aeu_mask);
2487         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2488
2489         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490         bp->attn_state |= asserted;
2491         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2492
2493         if (asserted & ATTN_HARD_WIRED_MASK) {
2494                 if (asserted & ATTN_NIG_FOR_FUNC) {
2495
2496                         bnx2x_acquire_phy_lock(bp);
2497
2498                         /* save nig interrupt mask */
2499                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500                         REG_WR(bp, nig_int_mask_addr, 0);
2501
2502                         bnx2x_link_attn(bp);
2503
2504                         /* handle unicore attn? */
2505                 }
2506                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2508
2509                 if (asserted & GPIO_2_FUNC)
2510                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2511
2512                 if (asserted & GPIO_3_FUNC)
2513                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2514
2515                 if (asserted & GPIO_4_FUNC)
2516                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2517
2518                 if (port == 0) {
2519                         if (asserted & ATTN_GENERAL_ATTN_1) {
2520                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2522                         }
2523                         if (asserted & ATTN_GENERAL_ATTN_2) {
2524                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2526                         }
2527                         if (asserted & ATTN_GENERAL_ATTN_3) {
2528                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2530                         }
2531                 } else {
2532                         if (asserted & ATTN_GENERAL_ATTN_4) {
2533                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2535                         }
2536                         if (asserted & ATTN_GENERAL_ATTN_5) {
2537                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2539                         }
2540                         if (asserted & ATTN_GENERAL_ATTN_6) {
2541                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2543                         }
2544                 }
2545
2546         } /* if hardwired */
2547
2548         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549            asserted, hc_addr);
2550         REG_WR(bp, hc_addr, asserted);
2551
2552         /* now set back the mask */
2553         if (asserted & ATTN_NIG_FOR_FUNC) {
2554                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555                 bnx2x_release_phy_lock(bp);
2556         }
2557 }
2558
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2560 {
2561         int port = BP_PORT(bp);
2562         int reg_offset;
2563         u32 val;
2564
2565         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2567
2568         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2569
2570                 val = REG_RD(bp, reg_offset);
2571                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572                 REG_WR(bp, reg_offset, val);
2573
2574                 BNX2X_ERR("SPIO5 hw attention\n");
2575
2576                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579                         /* Fan failure attention */
2580
2581                         /* The PHY reset is controlled by GPIO 1 */
2582                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584                         /* Low power mode is controlled by GPIO 2 */
2585                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587                         /* mark the failure */
2588                         bp->link_params.ext_phy_config &=
2589                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590                         bp->link_params.ext_phy_config |=
2591                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2592                         SHMEM_WR(bp,
2593                                  dev_info.port_hw_config[port].
2594                                                         external_phy_config,
2595                                  bp->link_params.ext_phy_config);
2596                         /* log the failure */
2597                         printk(KERN_ERR PFX "Fan Failure on Network"
2598                                " Controller %s has caused the driver to"
2599                                " shutdown the card to prevent permanent"
2600                                " damage.  Please contact Dell Support for"
2601                                " assistance\n", bp->dev->name);
2602                         break;
2603
2604                 default:
2605                         break;
2606                 }
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2610
2611                 val = REG_RD(bp, reg_offset);
2612                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613                 REG_WR(bp, reg_offset, val);
2614
2615                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616                           (attn & HW_INTERRUT_ASSERT_SET_0));
2617                 bnx2x_panic();
2618         }
2619 }
2620
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2622 {
2623         u32 val;
2624
2625         if (attn & BNX2X_DOORQ_ASSERT) {
2626
2627                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629                 /* DORQ discard attention */
2630                 if (val & 0x2)
2631                         BNX2X_ERR("FATAL error from DORQ\n");
2632         }
2633
2634         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2635
2636                 int port = BP_PORT(bp);
2637                 int reg_offset;
2638
2639                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2641
2642                 val = REG_RD(bp, reg_offset);
2643                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644                 REG_WR(bp, reg_offset, val);
2645
2646                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647                           (attn & HW_INTERRUT_ASSERT_SET_1));
2648                 bnx2x_panic();
2649         }
2650 }
2651
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2653 {
2654         u32 val;
2655
2656         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2657
2658                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660                 /* CFC error attention */
2661                 if (val & 0x2)
2662                         BNX2X_ERR("FATAL error from CFC\n");
2663         }
2664
2665         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2666
2667                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669                 /* RQ_USDMDP_FIFO_OVERFLOW */
2670                 if (val & 0x18000)
2671                         BNX2X_ERR("FATAL error from PXP\n");
2672         }
2673
2674         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2675
2676                 int port = BP_PORT(bp);
2677                 int reg_offset;
2678
2679                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2681
2682                 val = REG_RD(bp, reg_offset);
2683                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684                 REG_WR(bp, reg_offset, val);
2685
2686                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687                           (attn & HW_INTERRUT_ASSERT_SET_2));
2688                 bnx2x_panic();
2689         }
2690 }
2691
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2693 {
2694         u32 val;
2695
2696         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2697
2698                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699                         int func = BP_FUNC(bp);
2700
2701                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702                         bnx2x__link_status_update(bp);
2703                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2704                                                         DRV_STATUS_PMF)
2705                                 bnx2x_pmf_update(bp);
2706
2707                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2708
2709                         BNX2X_ERR("MC assert!\n");
2710                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2714                         bnx2x_panic();
2715
2716                 } else if (attn & BNX2X_MCP_ASSERT) {
2717
2718                         BNX2X_ERR("MCP assert!\n");
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2720                         bnx2x_fw_dump(bp);
2721
2722                 } else
2723                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2724         }
2725
2726         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728                 if (attn & BNX2X_GRC_TIMEOUT) {
2729                         val = CHIP_IS_E1H(bp) ?
2730                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2732                 }
2733                 if (attn & BNX2X_GRC_RSV) {
2734                         val = CHIP_IS_E1H(bp) ?
2735                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2737                 }
2738                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2739         }
2740 }
2741
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2743 {
2744         struct attn_route attn;
2745         struct attn_route group_mask;
2746         int port = BP_PORT(bp);
2747         int index;
2748         u32 reg_addr;
2749         u32 val;
2750         u32 aeu_mask;
2751
2752         /* need to take HW lock because MCP or other port might also
2753            try to handle this event */
2754         bnx2x_acquire_alr(bp);
2755
2756         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2762
2763         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764                 if (deasserted & (1 << index)) {
2765                         group_mask = bp->attn_group[index];
2766
2767                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768                            index, group_mask.sig[0], group_mask.sig[1],
2769                            group_mask.sig[2], group_mask.sig[3]);
2770
2771                         bnx2x_attn_int_deasserted3(bp,
2772                                         attn.sig[3] & group_mask.sig[3]);
2773                         bnx2x_attn_int_deasserted1(bp,
2774                                         attn.sig[1] & group_mask.sig[1]);
2775                         bnx2x_attn_int_deasserted2(bp,
2776                                         attn.sig[2] & group_mask.sig[2]);
2777                         bnx2x_attn_int_deasserted0(bp,
2778                                         attn.sig[0] & group_mask.sig[0]);
2779
2780                         if ((attn.sig[0] & group_mask.sig[0] &
2781                                                 HW_PRTY_ASSERT_SET_0) ||
2782                             (attn.sig[1] & group_mask.sig[1] &
2783                                                 HW_PRTY_ASSERT_SET_1) ||
2784                             (attn.sig[2] & group_mask.sig[2] &
2785                                                 HW_PRTY_ASSERT_SET_2))
2786                                 BNX2X_ERR("FATAL HW block parity attention\n");
2787                 }
2788         }
2789
2790         bnx2x_release_alr(bp);
2791
2792         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2793
2794         val = ~deasserted;
2795         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2796            val, reg_addr);
2797         REG_WR(bp, reg_addr, val);
2798
2799         if (~bp->attn_state & deasserted)
2800                 BNX2X_ERR("IGU ERROR\n");
2801
2802         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2804
2805         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806         aeu_mask = REG_RD(bp, reg_addr);
2807
2808         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2809            aeu_mask, deasserted);
2810         aeu_mask |= (deasserted & 0xff);
2811         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2812
2813         REG_WR(bp, reg_addr, aeu_mask);
2814         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2815
2816         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817         bp->attn_state &= ~deasserted;
2818         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2819 }
2820
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2822 {
2823         /* read local copy of bits */
2824         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2825                                                                 attn_bits);
2826         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2827                                                                 attn_bits_ack);
2828         u32 attn_state = bp->attn_state;
2829
2830         /* look for changed bits */
2831         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2832         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2833
2834         DP(NETIF_MSG_HW,
2835            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2836            attn_bits, attn_ack, asserted, deasserted);
2837
2838         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839                 BNX2X_ERR("BAD attention state\n");
2840
2841         /* handle bits that were raised */
2842         if (asserted)
2843                 bnx2x_attn_int_asserted(bp, asserted);
2844
2845         if (deasserted)
2846                 bnx2x_attn_int_deasserted(bp, deasserted);
2847 }
2848
2849 static void bnx2x_sp_task(struct work_struct *work)
2850 {
2851         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2852         u16 status;
2853
2854
2855         /* Return here if interrupt is disabled */
2856         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2858                 return;
2859         }
2860
2861         status = bnx2x_update_dsb_idx(bp);
2862 /*      if (status == 0)                                     */
2863 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2864
2865         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2866
2867         /* HW attentions */
2868         if (status & 0x1)
2869                 bnx2x_attn_int(bp);
2870
2871         /* CStorm events: query_stats, port delete ramrod */
2872         if (status & 0x2)
2873                 bp->stats_pending = 0;
2874
2875         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2876                      IGU_INT_NOP, 1);
2877         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2878                      IGU_INT_NOP, 1);
2879         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2880                      IGU_INT_NOP, 1);
2881         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2882                      IGU_INT_NOP, 1);
2883         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2884                      IGU_INT_ENABLE, 1);
2885
2886 }
2887
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2889 {
2890         struct net_device *dev = dev_instance;
2891         struct bnx2x *bp = netdev_priv(dev);
2892
2893         /* Return here if interrupt is disabled */
2894         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2896                 return IRQ_HANDLED;
2897         }
2898
2899         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2900
2901 #ifdef BNX2X_STOP_ON_ERROR
2902         if (unlikely(bp->panic))
2903                 return IRQ_HANDLED;
2904 #endif
2905
2906         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2907
2908         return IRQ_HANDLED;
2909 }
2910
2911 /* end of slow path */
2912
2913 /* Statistics */
2914
2915 /****************************************************************************
2916 * Macros
2917 ****************************************************************************/
2918
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2921         do { \
2922                 s_lo += a_lo; \
2923                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2924         } while (0)
2925
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2928         do { \
2929                 if (m_lo < s_lo) { \
2930                         /* underflow */ \
2931                         d_hi = m_hi - s_hi; \
2932                         if (d_hi > 0) { \
2933                                 /* we can 'loan' 1 */ \
2934                                 d_hi--; \
2935                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2936                         } else { \
2937                                 /* m_hi <= s_hi */ \
2938                                 d_hi = 0; \
2939                                 d_lo = 0; \
2940                         } \
2941                 } else { \
2942                         /* m_lo >= s_lo */ \
2943                         if (m_hi < s_hi) { \
2944                                 d_hi = 0; \
2945                                 d_lo = 0; \
2946                         } else { \
2947                                 /* m_hi >= s_hi */ \
2948                                 d_hi = m_hi - s_hi; \
2949                                 d_lo = m_lo - s_lo; \
2950                         } \
2951                 } \
2952         } while (0)
2953
2954 #define UPDATE_STAT64(s, t) \
2955         do { \
2956                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961                        pstats->mac_stx[1].t##_lo, diff.lo); \
2962         } while (0)
2963
2964 #define UPDATE_STAT64_NIG(s, t) \
2965         do { \
2966                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967                         diff.lo, new->s##_lo, old->s##_lo); \
2968                 ADD_64(estats->t##_hi, diff.hi, \
2969                        estats->t##_lo, diff.lo); \
2970         } while (0)
2971
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2974         do { \
2975                 s_lo += a; \
2976                 s_hi += (s_lo < a) ? 1 : 0; \
2977         } while (0)
2978
2979 #define UPDATE_EXTEND_STAT(s) \
2980         do { \
2981                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982                               pstats->mac_stx[1].s##_lo, \
2983                               new->s); \
2984         } while (0)
2985
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2987         do { \
2988                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989                 old_tclient->s = le32_to_cpu(tclient->s); \
2990                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2991         } while (0)
2992
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2994         do { \
2995                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996                 old_xclient->s = le32_to_cpu(xclient->s); \
2997                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2998         } while (0)
2999
3000 /*
3001  * General service functions
3002  */
3003
3004 static inline long bnx2x_hilo(u32 *hiref)
3005 {
3006         u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3008         u32 hi = *hiref;
3009
3010         return HILO_U64(hi, lo);
3011 #else
3012         return lo;
3013 #endif
3014 }
3015
3016 /*
3017  * Init service functions
3018  */
3019
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3021 {
3022         if (!bp->stats_pending) {
3023                 struct eth_query_ramrod_data ramrod_data = {0};
3024                 int rc;
3025
3026                 ramrod_data.drv_counter = bp->stats_counter++;
3027                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3029
3030                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031                                    ((u32 *)&ramrod_data)[1],
3032                                    ((u32 *)&ramrod_data)[0], 0);
3033                 if (rc == 0) {
3034                         /* stats ramrod has it's own slot on the spq */
3035                         bp->spq_left++;
3036                         bp->stats_pending = 1;
3037                 }
3038         }
3039 }
3040
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3042 {
3043         int port = BP_PORT(bp);
3044
3045         bp->executer_idx = 0;
3046         bp->stats_counter = 0;
3047
3048         /* port stats */
3049         if (!BP_NOMCP(bp))
3050                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3051         else
3052                 bp->port.port_stx = 0;
3053         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3054
3055         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056         bp->port.old_nig_stats.brb_discard =
3057                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058         bp->port.old_nig_stats.brb_truncate =
3059                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3064
3065         /* function stats */
3066         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3070
3071         bp->stats_state = STATS_STATE_DISABLED;
3072         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3074 }
3075
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3077 {
3078         struct dmae_command *dmae = &bp->stats_dmae;
3079         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080
3081         *stats_comp = DMAE_COMP_VAL;
3082
3083         /* loader */
3084         if (bp->executer_idx) {
3085                 int loader_idx = PMF_DMAE_C(bp);
3086
3087                 memset(dmae, 0, sizeof(struct dmae_command));
3088
3089                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091                                 DMAE_CMD_DST_RESET |
3092 #ifdef __BIG_ENDIAN
3093                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3094 #else
3095                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3096 #endif
3097                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3098                                                DMAE_CMD_PORT_0) |
3099                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103                                      sizeof(struct dmae_command) *
3104                                      (loader_idx + 1)) >> 2;
3105                 dmae->dst_addr_hi = 0;
3106                 dmae->len = sizeof(struct dmae_command) >> 2;
3107                 if (CHIP_IS_E1(bp))
3108                         dmae->len--;
3109                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110                 dmae->comp_addr_hi = 0;
3111                 dmae->comp_val = 1;
3112
3113                 *stats_comp = 0;
3114                 bnx2x_post_dmae(bp, dmae, loader_idx);
3115
3116         } else if (bp->func_stx) {
3117                 *stats_comp = 0;
3118                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3119         }
3120 }
3121
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3123 {
3124         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125         int cnt = 10;
3126
3127         might_sleep();
3128         while (*stats_comp != DMAE_COMP_VAL) {
3129                 if (!cnt) {
3130                         BNX2X_ERR("timeout waiting for stats finished\n");
3131                         break;
3132                 }
3133                 cnt--;
3134                 msleep(1);
3135         }
3136         return 1;
3137 }
3138
3139 /*
3140  * Statistics service functions
3141  */
3142
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3144 {
3145         struct dmae_command *dmae;
3146         u32 opcode;
3147         int loader_idx = PMF_DMAE_C(bp);
3148         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3149
3150         /* sanity */
3151         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152                 BNX2X_ERR("BUG!\n");
3153                 return;
3154         }
3155
3156         bp->executer_idx = 0;
3157
3158         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3159                   DMAE_CMD_C_ENABLE |
3160                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3161 #ifdef __BIG_ENDIAN
3162                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3163 #else
3164                   DMAE_CMD_ENDIANITY_DW_SWAP |
3165 #endif
3166                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3168
3169         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171         dmae->src_addr_lo = bp->port.port_stx >> 2;
3172         dmae->src_addr_hi = 0;
3173         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175         dmae->len = DMAE_LEN32_RD_MAX;
3176         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177         dmae->comp_addr_hi = 0;
3178         dmae->comp_val = 1;
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185                                    DMAE_LEN32_RD_MAX * 4);
3186         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187                                    DMAE_LEN32_RD_MAX * 4);
3188         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191         dmae->comp_val = DMAE_COMP_VAL;
3192
3193         *stats_comp = 0;
3194         bnx2x_hw_stats_post(bp);
3195         bnx2x_stats_comp(bp);
3196 }
3197
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3199 {
3200         struct dmae_command *dmae;
3201         int port = BP_PORT(bp);
3202         int vn = BP_E1HVN(bp);
3203         u32 opcode;
3204         int loader_idx = PMF_DMAE_C(bp);
3205         u32 mac_addr;
3206         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3207
3208         /* sanity */
3209         if (!bp->link_vars.link_up || !bp->port.pmf) {
3210                 BNX2X_ERR("BUG!\n");
3211                 return;
3212         }
3213
3214         bp->executer_idx = 0;
3215
3216         /* MCP */
3217         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3220 #ifdef __BIG_ENDIAN
3221                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3222 #else
3223                   DMAE_CMD_ENDIANITY_DW_SWAP |
3224 #endif
3225                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226                   (vn << DMAE_CMD_E1HVN_SHIFT));
3227
3228         if (bp->port.port_stx) {
3229
3230                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231                 dmae->opcode = opcode;
3232                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235                 dmae->dst_addr_hi = 0;
3236                 dmae->len = sizeof(struct host_port_stats) >> 2;
3237                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238                 dmae->comp_addr_hi = 0;
3239                 dmae->comp_val = 1;
3240         }
3241
3242         if (bp->func_stx) {
3243
3244                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245                 dmae->opcode = opcode;
3246                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248                 dmae->dst_addr_lo = bp->func_stx >> 2;
3249                 dmae->dst_addr_hi = 0;
3250                 dmae->len = sizeof(struct host_func_stats) >> 2;
3251                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252                 dmae->comp_addr_hi = 0;
3253                 dmae->comp_val = 1;
3254         }
3255
3256         /* MAC */
3257         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3260 #ifdef __BIG_ENDIAN
3261                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3262 #else
3263                   DMAE_CMD_ENDIANITY_DW_SWAP |
3264 #endif
3265                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266                   (vn << DMAE_CMD_E1HVN_SHIFT));
3267
3268         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3269
3270                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271                                    NIG_REG_INGRESS_BMAC0_MEM);
3272
3273                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3275                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276                 dmae->opcode = opcode;
3277                 dmae->src_addr_lo = (mac_addr +
3278                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279                 dmae->src_addr_hi = 0;
3280                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291                 dmae->opcode = opcode;
3292                 dmae->src_addr_lo = (mac_addr +
3293                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294                 dmae->src_addr_hi = 0;
3295                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302                 dmae->comp_addr_hi = 0;
3303                 dmae->comp_val = 1;
3304
3305         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3306
3307                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3308
3309                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311                 dmae->opcode = opcode;
3312                 dmae->src_addr_lo = (mac_addr +
3313                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314                 dmae->src_addr_hi = 0;
3315                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319                 dmae->comp_addr_hi = 0;
3320                 dmae->comp_val = 1;
3321
3322                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324                 dmae->opcode = opcode;
3325                 dmae->src_addr_lo = (mac_addr +
3326                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327                 dmae->src_addr_hi = 0;
3328                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3332                 dmae->len = 1;
3333                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334                 dmae->comp_addr_hi = 0;
3335                 dmae->comp_val = 1;
3336
3337                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339                 dmae->opcode = opcode;
3340                 dmae->src_addr_lo = (mac_addr +
3341                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342                 dmae->src_addr_hi = 0;
3343                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349                 dmae->comp_addr_hi = 0;
3350                 dmae->comp_val = 1;
3351         }
3352
3353         /* NIG */
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = opcode;
3356         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358         dmae->src_addr_hi = 0;
3359         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363         dmae->comp_addr_hi = 0;
3364         dmae->comp_val = 1;
3365
3366         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367         dmae->opcode = opcode;
3368         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370         dmae->src_addr_hi = 0;
3371         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375         dmae->len = (2*sizeof(u32)) >> 2;
3376         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377         dmae->comp_addr_hi = 0;
3378         dmae->comp_val = 1;
3379
3380         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3384 #ifdef __BIG_ENDIAN
3385                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3386 #else
3387                         DMAE_CMD_ENDIANITY_DW_SWAP |
3388 #endif
3389                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390                         (vn << DMAE_CMD_E1HVN_SHIFT));
3391         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393         dmae->src_addr_hi = 0;
3394         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398         dmae->len = (2*sizeof(u32)) >> 2;
3399         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401         dmae->comp_val = DMAE_COMP_VAL;
3402
3403         *stats_comp = 0;
3404 }
3405
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3407 {
3408         struct dmae_command *dmae = &bp->stats_dmae;
3409         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3410
3411         /* sanity */
3412         if (!bp->func_stx) {
3413                 BNX2X_ERR("BUG!\n");
3414                 return;
3415         }
3416
3417         bp->executer_idx = 0;
3418         memset(dmae, 0, sizeof(struct dmae_command));
3419
3420         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3423 #ifdef __BIG_ENDIAN
3424                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425 #else
3426                         DMAE_CMD_ENDIANITY_DW_SWAP |
3427 #endif
3428                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432         dmae->dst_addr_lo = bp->func_stx >> 2;
3433         dmae->dst_addr_hi = 0;
3434         dmae->len = sizeof(struct host_func_stats) >> 2;
3435         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437         dmae->comp_val = DMAE_COMP_VAL;
3438
3439         *stats_comp = 0;
3440 }
3441
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3443 {
3444         if (bp->port.pmf)
3445                 bnx2x_port_stats_init(bp);
3446
3447         else if (bp->func_stx)
3448                 bnx2x_func_stats_init(bp);
3449
3450         bnx2x_hw_stats_post(bp);
3451         bnx2x_storm_stats_post(bp);
3452 }
3453
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3455 {
3456         bnx2x_stats_comp(bp);
3457         bnx2x_stats_pmf_update(bp);
3458         bnx2x_stats_start(bp);
3459 }
3460
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3462 {
3463         bnx2x_stats_comp(bp);
3464         bnx2x_stats_start(bp);
3465 }
3466
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3468 {
3469         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471         struct regpair diff;
3472
3473         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485         UPDATE_STAT64(tx_stat_gt127,
3486                                 tx_stat_etherstatspkts65octetsto127octets);
3487         UPDATE_STAT64(tx_stat_gt255,
3488                                 tx_stat_etherstatspkts128octetsto255octets);
3489         UPDATE_STAT64(tx_stat_gt511,
3490                                 tx_stat_etherstatspkts256octetsto511octets);
3491         UPDATE_STAT64(tx_stat_gt1023,
3492                                 tx_stat_etherstatspkts512octetsto1023octets);
3493         UPDATE_STAT64(tx_stat_gt1518,
3494                                 tx_stat_etherstatspkts1024octetsto1522octets);
3495         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499         UPDATE_STAT64(tx_stat_gterr,
3500                                 tx_stat_dot3statsinternalmactransmiterrors);
3501         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3502 }
3503
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3505 {
3506         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3508
3509         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3540 }
3541
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3543 {
3544         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545         struct nig_stats *old = &(bp->port.old_nig_stats);
3546         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548         struct regpair diff;
3549
3550         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551                 bnx2x_bmac_stats_update(bp);
3552
3553         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554                 bnx2x_emac_stats_update(bp);
3555
3556         else { /* unreached */
3557                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3558                 return -1;
3559         }
3560
3561         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562                       new->brb_discard - old->brb_discard);
3563         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564                       new->brb_truncate - old->brb_truncate);
3565
3566         UPDATE_STAT64_NIG(egress_mac_pkt0,
3567                                         etherstatspkts1024octetsto1522octets);
3568         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3569
3570         memcpy(old, new, sizeof(struct nig_stats));
3571
3572         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573                sizeof(struct mac_stx));
3574         estats->brb_drop_hi = pstats->brb_drop_hi;
3575         estats->brb_drop_lo = pstats->brb_drop_lo;
3576
3577         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3578
3579         return 0;
3580 }
3581
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3583 {
3584         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585         int cl_id = BP_CL_ID(bp);
3586         struct tstorm_per_port_stats *tport =
3587                                 &stats->tstorm_common.port_statistics;
3588         struct tstorm_per_client_stats *tclient =
3589                         &stats->tstorm_common.client_statistics[cl_id];
3590         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591         struct xstorm_per_client_stats *xclient =
3592                         &stats->xstorm_common.client_statistics[cl_id];
3593         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3596         u32 diff;
3597
3598         /* are storm stats valid? */
3599         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600                                                         bp->stats_counter) {
3601                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602                    "  tstorm counter (%d) != stats_counter (%d)\n",
3603                    tclient->stats_counter, bp->stats_counter);
3604                 return -1;
3605         }
3606         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607                                                         bp->stats_counter) {
3608                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609                    "  xstorm counter (%d) != stats_counter (%d)\n",
3610                    xclient->stats_counter, bp->stats_counter);
3611                 return -2;
3612         }
3613
3614         fstats->total_bytes_received_hi =
3615         fstats->valid_bytes_received_hi =
3616                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617         fstats->total_bytes_received_lo =
3618         fstats->valid_bytes_received_lo =
3619                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3620
3621         estats->error_bytes_received_hi =
3622                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623         estats->error_bytes_received_lo =
3624                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625         ADD_64(estats->error_bytes_received_hi,
3626                estats->rx_stat_ifhcinbadoctets_hi,
3627                estats->error_bytes_received_lo,
3628                estats->rx_stat_ifhcinbadoctets_lo);
3629
3630         ADD_64(fstats->total_bytes_received_hi,
3631                estats->error_bytes_received_hi,
3632                fstats->total_bytes_received_lo,
3633                estats->error_bytes_received_lo);
3634
3635         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637                                 total_multicast_packets_received);
3638         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639                                 total_broadcast_packets_received);
3640
3641         fstats->total_bytes_transmitted_hi =
3642                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3643         fstats->total_bytes_transmitted_lo =
3644                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3645
3646         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647                                 total_unicast_packets_transmitted);
3648         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649                                 total_multicast_packets_transmitted);
3650         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651                                 total_broadcast_packets_transmitted);
3652
3653         memcpy(estats, &(fstats->total_bytes_received_hi),
3654                sizeof(struct host_func_stats) - 2*sizeof(u32));
3655
3656         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658         estats->brb_truncate_discard =
3659                                 le32_to_cpu(tport->brb_truncate_discard);
3660         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3661
3662         old_tclient->rcv_unicast_bytes.hi =
3663                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664         old_tclient->rcv_unicast_bytes.lo =
3665                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666         old_tclient->rcv_broadcast_bytes.hi =
3667                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668         old_tclient->rcv_broadcast_bytes.lo =
3669                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670         old_tclient->rcv_multicast_bytes.hi =
3671                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672         old_tclient->rcv_multicast_bytes.lo =
3673                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3675
3676         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677         old_tclient->packets_too_big_discard =
3678                                 le32_to_cpu(tclient->packets_too_big_discard);
3679         estats->no_buff_discard =
3680         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3682
3683         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684         old_xclient->unicast_bytes_sent.hi =
3685                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686         old_xclient->unicast_bytes_sent.lo =
3687                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688         old_xclient->multicast_bytes_sent.hi =
3689                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690         old_xclient->multicast_bytes_sent.lo =
3691                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692         old_xclient->broadcast_bytes_sent.hi =
3693                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694         old_xclient->broadcast_bytes_sent.lo =
3695                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3696
3697         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3698
3699         return 0;
3700 }
3701
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3703 {
3704         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706         struct net_device_stats *nstats = &bp->dev->stats;
3707
3708         nstats->rx_packets =
3709                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3712
3713         nstats->tx_packets =
3714                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3717
3718         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3719
3720         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3721
3722         nstats->rx_dropped = old_tclient->checksum_discard +
3723                              estats->mac_discard;
3724         nstats->tx_dropped = 0;
3725
3726         nstats->multicast =
3727                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3728
3729         nstats->collisions =
3730                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3731                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732                         estats->tx_stat_dot3statslatecollisions_lo +
3733                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3734
3735         estats->jabber_packets_received =
3736                                 old_tclient->packets_too_big_discard +
3737                                 estats->rx_stat_dot3statsframestoolong_lo;
3738
3739         nstats->rx_length_errors =
3740                                 estats->rx_stat_etherstatsundersizepkts_lo +
3741                                 estats->jabber_packets_received;
3742         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746         nstats->rx_missed_errors = estats->xxoverflow_discard;
3747
3748         nstats->rx_errors = nstats->rx_length_errors +
3749                             nstats->rx_over_errors +
3750                             nstats->rx_crc_errors +
3751                             nstats->rx_frame_errors +
3752                             nstats->rx_fifo_errors +
3753                             nstats->rx_missed_errors;
3754
3755         nstats->tx_aborted_errors =
3756                         estats->tx_stat_dot3statslatecollisions_lo +
3757                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3758         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759         nstats->tx_fifo_errors = 0;
3760         nstats->tx_heartbeat_errors = 0;
3761         nstats->tx_window_errors = 0;
3762
3763         nstats->tx_errors = nstats->tx_aborted_errors +
3764                             nstats->tx_carrier_errors;
3765 }
3766
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3768 {
3769         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3770         int update = 0;
3771
3772         if (*stats_comp != DMAE_COMP_VAL)
3773                 return;
3774
3775         if (bp->port.pmf)
3776                 update = (bnx2x_hw_stats_update(bp) == 0);
3777
3778         update |= (bnx2x_storm_stats_update(bp) == 0);
3779
3780         if (update)
3781                 bnx2x_net_stats_update(bp);
3782
3783         else {
3784                 if (bp->stats_pending) {
3785                         bp->stats_pending++;
3786                         if (bp->stats_pending == 3) {
3787                                 BNX2X_ERR("stats not updated for 3 times\n");
3788                                 bnx2x_panic();
3789                                 return;
3790                         }
3791                 }
3792         }
3793
3794         if (bp->msglevel & NETIF_MSG_TIMER) {
3795                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797                 struct net_device_stats *nstats = &bp->dev->stats;
3798                 int i;
3799
3800                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3802                                   "  tx pkt (%lx)\n",
3803                        bnx2x_tx_avail(bp->fp),
3804                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3806                                   "  rx pkt (%lx)\n",
3807                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808                              bp->fp->rx_comp_cons),
3809                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3811                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812                        estats->driver_xoff, estats->brb_drop_lo);
3813                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3814                         "packets_too_big_discard %u  no_buff_discard %u  "
3815                         "mac_discard %u  mac_filter_discard %u  "
3816                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3817                         "ttl0_discard %u\n",
3818                        old_tclient->checksum_discard,
3819                        old_tclient->packets_too_big_discard,
3820                        old_tclient->no_buff_discard, estats->mac_discard,
3821                        estats->mac_filter_discard, estats->xxoverflow_discard,
3822                        estats->brb_truncate_discard,
3823                        old_tclient->ttl0_discard);
3824
3825                 for_each_queue(bp, i) {
3826                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827                                bnx2x_fp(bp, i, tx_pkt),
3828                                bnx2x_fp(bp, i, rx_pkt),
3829                                bnx2x_fp(bp, i, rx_calls));
3830                 }
3831         }
3832
3833         bnx2x_hw_stats_post(bp);
3834         bnx2x_storm_stats_post(bp);
3835 }
3836
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3838 {
3839         struct dmae_command *dmae;
3840         u32 opcode;
3841         int loader_idx = PMF_DMAE_C(bp);
3842         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3843
3844         bp->executer_idx = 0;
3845
3846         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3847                   DMAE_CMD_C_ENABLE |
3848                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3849 #ifdef __BIG_ENDIAN
3850                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3851 #else
3852                   DMAE_CMD_ENDIANITY_DW_SWAP |
3853 #endif
3854                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3856
3857         if (bp->port.port_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 if (bp->func_stx)
3861                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3862                 else
3863                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867                 dmae->dst_addr_hi = 0;
3868                 dmae->len = sizeof(struct host_port_stats) >> 2;
3869                 if (bp->func_stx) {
3870                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871                         dmae->comp_addr_hi = 0;
3872                         dmae->comp_val = 1;
3873                 } else {
3874                         dmae->comp_addr_lo =
3875                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876                         dmae->comp_addr_hi =
3877                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878                         dmae->comp_val = DMAE_COMP_VAL;
3879
3880                         *stats_comp = 0;
3881                 }
3882         }
3883
3884         if (bp->func_stx) {
3885
3886                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890                 dmae->dst_addr_lo = bp->func_stx >> 2;
3891                 dmae->dst_addr_hi = 0;
3892                 dmae->len = sizeof(struct host_func_stats) >> 2;
3893                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895                 dmae->comp_val = DMAE_COMP_VAL;
3896
3897                 *stats_comp = 0;
3898         }
3899 }
3900
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3902 {
3903         int update = 0;
3904
3905         bnx2x_stats_comp(bp);
3906
3907         if (bp->port.pmf)
3908                 update = (bnx2x_hw_stats_update(bp) == 0);
3909
3910         update |= (bnx2x_storm_stats_update(bp) == 0);
3911
3912         if (update) {
3913                 bnx2x_net_stats_update(bp);
3914
3915                 if (bp->port.pmf)
3916                         bnx2x_port_stats_stop(bp);
3917
3918                 bnx2x_hw_stats_post(bp);
3919                 bnx2x_stats_comp(bp);
3920         }
3921 }
3922
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3924 {
3925 }
3926
3927 static const struct {
3928         void (*action)(struct bnx2x *bp);
3929         enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3931 /* state        event   */
3932 {
3933 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3935 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3937 },
3938 {
3939 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3940 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3941 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3942 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3943 }
3944 };
3945
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3947 {
3948         enum bnx2x_stats_state state = bp->stats_state;
3949
3950         bnx2x_stats_stm[state][event].action(bp);
3951         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3952
3953         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955                    state, event, bp->stats_state);
3956 }
3957
3958 static void bnx2x_timer(unsigned long data)
3959 {
3960         struct bnx2x *bp = (struct bnx2x *) data;
3961
3962         if (!netif_running(bp->dev))
3963                 return;
3964
3965         if (atomic_read(&bp->intr_sem) != 0)
3966                 goto timer_restart;
3967
3968         if (poll) {
3969                 struct bnx2x_fastpath *fp = &bp->fp[0];
3970                 int rc;
3971
3972                 bnx2x_tx_int(fp, 1000);
3973                 rc = bnx2x_rx_int(fp, 1000);
3974         }
3975
3976         if (!BP_NOMCP(bp)) {
3977                 int func = BP_FUNC(bp);
3978                 u32 drv_pulse;
3979                 u32 mcp_pulse;
3980
3981                 ++bp->fw_drv_pulse_wr_seq;
3982                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983                 /* TBD - add SYSTEM_TIME */
3984                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3986
3987                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988                              MCP_PULSE_SEQ_MASK);
3989                 /* The delta between driver pulse and mcp response
3990                  * should be 1 (before mcp response) or 0 (after mcp response)
3991                  */
3992                 if ((drv_pulse != mcp_pulse) &&
3993                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994                         /* someone lost a heartbeat... */
3995                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996                                   drv_pulse, mcp_pulse);
3997                 }
3998         }
3999
4000         if ((bp->state == BNX2X_STATE_OPEN) ||
4001             (bp->state == BNX2X_STATE_DISABLED))
4002                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4003
4004 timer_restart:
4005         mod_timer(&bp->timer, jiffies + bp->current_interval);
4006 }
4007
4008 /* end of Statistics */
4009
4010 /* nic init */
4011
4012 /*
4013  * nic init service functions
4014  */
4015
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4017 {
4018         int port = BP_PORT(bp);
4019
4020         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022                         sizeof(struct ustorm_status_block)/4);
4023         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025                         sizeof(struct cstorm_status_block)/4);
4026 }
4027
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029                           dma_addr_t mapping, int sb_id)
4030 {
4031         int port = BP_PORT(bp);
4032         int func = BP_FUNC(bp);
4033         int index;
4034         u64 section;
4035
4036         /* USTORM */
4037         section = ((u64)mapping) + offsetof(struct host_status_block,
4038                                             u_status_block);
4039         sb->u_status_block.status_block_id = sb_id;
4040
4041         REG_WR(bp, BAR_USTRORM_INTMEM +
4042                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043         REG_WR(bp, BAR_USTRORM_INTMEM +
4044                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4045                U64_HI(section));
4046         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4048
4049         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4052
4053         /* CSTORM */
4054         section = ((u64)mapping) + offsetof(struct host_status_block,
4055                                             c_status_block);
4056         sb->c_status_block.status_block_id = sb_id;
4057
4058         REG_WR(bp, BAR_CSTRORM_INTMEM +
4059                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060         REG_WR(bp, BAR_CSTRORM_INTMEM +
4061                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4062                U64_HI(section));
4063         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4065
4066         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4069
4070         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4071 }
4072
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4074 {
4075         int func = BP_FUNC(bp);
4076
4077         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079                         sizeof(struct ustorm_def_status_block)/4);
4080         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082                         sizeof(struct cstorm_def_status_block)/4);
4083         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085                         sizeof(struct xstorm_def_status_block)/4);
4086         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088                         sizeof(struct tstorm_def_status_block)/4);
4089 }
4090
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092                               struct host_def_status_block *def_sb,
4093                               dma_addr_t mapping, int sb_id)
4094 {
4095         int port = BP_PORT(bp);
4096         int func = BP_FUNC(bp);
4097         int index, val, reg_offset;
4098         u64 section;
4099
4100         /* ATTN */
4101         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102                                             atten_status_block);
4103         def_sb->atten_status_block.status_block_id = sb_id;
4104
4105         bp->attn_state = 0;
4106
4107         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4109
4110         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111                 bp->attn_group[index].sig[0] = REG_RD(bp,
4112                                                      reg_offset + 0x10*index);
4113                 bp->attn_group[index].sig[1] = REG_RD(bp,
4114                                                reg_offset + 0x4 + 0x10*index);
4115                 bp->attn_group[index].sig[2] = REG_RD(bp,
4116                                                reg_offset + 0x8 + 0x10*index);
4117                 bp->attn_group[index].sig[3] = REG_RD(bp,
4118                                                reg_offset + 0xc + 0x10*index);
4119         }
4120
4121         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122                              HC_REG_ATTN_MSG0_ADDR_L);
4123
4124         REG_WR(bp, reg_offset, U64_LO(section));
4125         REG_WR(bp, reg_offset + 4, U64_HI(section));
4126
4127         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4128
4129         val = REG_RD(bp, reg_offset);
4130         val |= sb_id;
4131         REG_WR(bp, reg_offset, val);
4132
4133         /* USTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             u_def_status_block);
4136         def_sb->u_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_USTRORM_INTMEM +
4139                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_USTRORM_INTMEM +
4141                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* CSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             c_def_status_block);
4153         def_sb->c_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_CSTRORM_INTMEM +
4156                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_CSTRORM_INTMEM +
4158                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         /* TSTORM */
4168         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169                                             t_def_status_block);
4170         def_sb->t_def_status_block.status_block_id = sb_id;
4171
4172         REG_WR(bp, BAR_TSTRORM_INTMEM +
4173                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174         REG_WR(bp, BAR_TSTRORM_INTMEM +
4175                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4176                U64_HI(section));
4177         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4179
4180         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4183
4184         /* XSTORM */
4185         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186                                             x_def_status_block);
4187         def_sb->x_def_status_block.status_block_id = sb_id;
4188
4189         REG_WR(bp, BAR_XSTRORM_INTMEM +
4190                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191         REG_WR(bp, BAR_XSTRORM_INTMEM +
4192                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4193                U64_HI(section));
4194         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4196
4197         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4200
4201         bp->stats_pending = 0;
4202         bp->set_mac_pending = 0;
4203
4204         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4205 }
4206
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4208 {
4209         int port = BP_PORT(bp);
4210         int i;
4211
4212         for_each_queue(bp, i) {
4213                 int sb_id = bp->fp[i].sb_id;
4214
4215                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218                                                     U_SB_ETH_RX_CQ_INDEX),
4219                         bp->rx_ticks/12);
4220                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222                                                      U_SB_ETH_RX_CQ_INDEX),
4223                          bp->rx_ticks ? 0 : 1);
4224                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226                                                      U_SB_ETH_RX_BD_INDEX),
4227                          bp->rx_ticks ? 0 : 1);
4228
4229                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232                                                     C_SB_ETH_TX_CQ_INDEX),
4233                         bp->tx_ticks/12);
4234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236                                                      C_SB_ETH_TX_CQ_INDEX),
4237                          bp->tx_ticks ? 0 : 1);
4238         }
4239 }
4240
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242                                        struct bnx2x_fastpath *fp, int last)
4243 {
4244         int i;
4245
4246         for (i = 0; i < last; i++) {
4247                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248                 struct sk_buff *skb = rx_buf->skb;
4249
4250                 if (skb == NULL) {
4251                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4252                         continue;
4253                 }
4254
4255                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256                         pci_unmap_single(bp->pdev,
4257                                          pci_unmap_addr(rx_buf, mapping),
4258                                          bp->rx_buf_size,
4259                                          PCI_DMA_FROMDEVICE);
4260
4261                 dev_kfree_skb(skb);
4262                 rx_buf->skb = NULL;
4263         }
4264 }
4265
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4267 {
4268         int func = BP_FUNC(bp);
4269         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4271         u16 ring_prod, cqe_ring_prod;
4272         int i, j;
4273
4274         bp->rx_buf_size = bp->dev->mtu;
4275         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276                 BCM_RX_ETH_PAYLOAD_ALIGN;
4277
4278         if (bp->flags & TPA_ENABLE_FLAG) {
4279                 DP(NETIF_MSG_IFUP,
4280                    "rx_buf_size %d  effective_mtu %d\n",
4281                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4282
4283                 for_each_queue(bp, j) {
4284                         struct bnx2x_fastpath *fp = &bp->fp[j];
4285
4286                         for (i = 0; i < max_agg_queues; i++) {
4287                                 fp->tpa_pool[i].skb =
4288                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289                                 if (!fp->tpa_pool[i].skb) {
4290                                         BNX2X_ERR("Failed to allocate TPA "
4291                                                   "skb pool for queue[%d] - "
4292                                                   "disabling TPA on this "
4293                                                   "queue!\n", j);
4294                                         bnx2x_free_tpa_pool(bp, fp, i);
4295                                         fp->disable_tpa = 1;
4296                                         break;
4297                                 }
4298                                 pci_unmap_addr_set((struct sw_rx_bd *)
4299                                                         &bp->fp->tpa_pool[i],
4300                                                    mapping, 0);
4301                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4302                         }
4303                 }
4304         }
4305
4306         for_each_queue(bp, j) {
4307                 struct bnx2x_fastpath *fp = &bp->fp[j];
4308
4309                 fp->rx_bd_cons = 0;
4310                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4312
4313                 /* "next page" elements initialization */
4314                 /* SGE ring */
4315                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316                         struct eth_rx_sge *sge;
4317
4318                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4319                         sge->addr_hi =
4320                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322                         sge->addr_lo =
4323                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4325                 }
4326
4327                 bnx2x_init_sge_ring_bit_mask(fp);
4328
4329                 /* RX BD ring */
4330                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331                         struct eth_rx_bd *rx_bd;
4332
4333                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4334                         rx_bd->addr_hi =
4335                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4337                         rx_bd->addr_lo =
4338                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4340                 }
4341
4342                 /* CQ ring */
4343                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344                         struct eth_rx_cqe_next_page *nextpg;
4345
4346                         nextpg = (struct eth_rx_cqe_next_page *)
4347                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4348                         nextpg->addr_hi =
4349                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351                         nextpg->addr_lo =
4352                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4354                 }
4355
4356                 /* Allocate SGEs and initialize the ring elements */
4357                 for (i = 0, ring_prod = 0;
4358                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4359
4360                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361                                 BNX2X_ERR("was only able to allocate "
4362                                           "%d rx sges\n", i);
4363                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364                                 /* Cleanup already allocated elements */
4365                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367                                 fp->disable_tpa = 1;
4368                                 ring_prod = 0;
4369                                 break;
4370                         }
4371                         ring_prod = NEXT_SGE_IDX(ring_prod);
4372                 }
4373                 fp->rx_sge_prod = ring_prod;
4374
4375                 /* Allocate BDs and initialize BD ring */
4376                 fp->rx_comp_cons = 0;
4377                 cqe_ring_prod = ring_prod = 0;
4378                 for (i = 0; i < bp->rx_ring_size; i++) {
4379                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380                                 BNX2X_ERR("was only able to allocate "
4381                                           "%d rx skbs\n", i);
4382                                 bp->eth_stats.rx_skb_alloc_failed++;
4383                                 break;
4384                         }
4385                         ring_prod = NEXT_RX_IDX(ring_prod);
4386                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387                         WARN_ON(ring_prod <= i);
4388                 }
4389
4390                 fp->rx_bd_prod = ring_prod;
4391                 /* must not have more available CQEs than BDs */
4392                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4393                                        cqe_ring_prod);
4394                 fp->rx_pkt = fp->rx_calls = 0;
4395
4396                 /* Warning!
4397                  * this will generate an interrupt (to the TSTORM)
4398                  * must only be done after chip is initialized
4399                  */
4400                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4401                                      fp->rx_sge_prod);
4402                 if (j != 0)
4403                         continue;
4404
4405                 REG_WR(bp, BAR_USTRORM_INTMEM +
4406                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407                        U64_LO(fp->rx_comp_mapping));
4408                 REG_WR(bp, BAR_USTRORM_INTMEM +
4409                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410                        U64_HI(fp->rx_comp_mapping));
4411         }
4412 }
4413
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4415 {
4416         int i, j;
4417
4418         for_each_queue(bp, j) {
4419                 struct bnx2x_fastpath *fp = &bp->fp[j];
4420
4421                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422                         struct eth_tx_bd *tx_bd =
4423                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4424
4425                         tx_bd->addr_hi =
4426                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4428                         tx_bd->addr_lo =
4429                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4431                 }
4432
4433                 fp->tx_pkt_prod = 0;
4434                 fp->tx_pkt_cons = 0;
4435                 fp->tx_bd_prod = 0;
4436                 fp->tx_bd_cons = 0;
4437                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4438                 fp->tx_pkt = 0;
4439         }
4440 }
4441
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4443 {
4444         int func = BP_FUNC(bp);
4445
4446         spin_lock_init(&bp->spq_lock);
4447
4448         bp->spq_left = MAX_SPQ_PENDING;
4449         bp->spq_prod_idx = 0;
4450         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451         bp->spq_prod_bd = bp->spq;
4452         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4453
4454         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455                U64_LO(bp->spq_mapping));
4456         REG_WR(bp,
4457                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458                U64_HI(bp->spq_mapping));
4459
4460         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4461                bp->spq_prod_idx);
4462 }
4463
4464 static void bnx2x_init_context(struct bnx2x *bp)
4465 {
4466         int i;
4467
4468         for_each_queue(bp, i) {
4469                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470                 struct bnx2x_fastpath *fp = &bp->fp[i];
4471                 u8 sb_id = FP_SB_ID(fp);
4472
4473                 context->xstorm_st_context.tx_bd_page_base_hi =
4474                                                 U64_HI(fp->tx_desc_mapping);
4475                 context->xstorm_st_context.tx_bd_page_base_lo =
4476                                                 U64_LO(fp->tx_desc_mapping);
4477                 context->xstorm_st_context.db_data_addr_hi =
4478                                                 U64_HI(fp->tx_prods_mapping);
4479                 context->xstorm_st_context.db_data_addr_lo =
4480                                                 U64_LO(fp->tx_prods_mapping);
4481                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4483
4484                 context->ustorm_st_context.common.sb_index_numbers =
4485                                                 BNX2X_RX_SB_INDEX_NUM;
4486                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487                 context->ustorm_st_context.common.status_block_id = sb_id;
4488                 context->ustorm_st_context.common.flags =
4489                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490                 context->ustorm_st_context.common.mc_alignment_size =
4491                         BCM_RX_ETH_PAYLOAD_ALIGN;
4492                 context->ustorm_st_context.common.bd_buff_size =
4493                                                 bp->rx_buf_size;
4494                 context->ustorm_st_context.common.bd_page_base_hi =
4495                                                 U64_HI(fp->rx_desc_mapping);
4496                 context->ustorm_st_context.common.bd_page_base_lo =
4497                                                 U64_LO(fp->rx_desc_mapping);
4498                 if (!fp->disable_tpa) {
4499                         context->ustorm_st_context.common.flags |=
4500                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502                         context->ustorm_st_context.common.sge_buff_size =
4503                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504                         context->ustorm_st_context.common.sge_page_base_hi =
4505                                                 U64_HI(fp->rx_sge_mapping);
4506                         context->ustorm_st_context.common.sge_page_base_lo =
4507                                                 U64_LO(fp->rx_sge_mapping);
4508                 }
4509
4510                 context->cstorm_st_context.sb_index_number =
4511                                                 C_SB_ETH_TX_CQ_INDEX;
4512                 context->cstorm_st_context.status_block_id = sb_id;
4513
4514                 context->xstorm_ag_context.cdu_reserved =
4515                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516                                                CDU_REGION_NUMBER_XCM_AG,
4517                                                ETH_CONNECTION_TYPE);
4518                 context->ustorm_ag_context.cdu_usage =
4519                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520                                                CDU_REGION_NUMBER_UCM_AG,
4521                                                ETH_CONNECTION_TYPE);
4522         }
4523 }
4524
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4526 {
4527         int func = BP_FUNC(bp);
4528         int i;
4529
4530         if (!is_multi(bp))
4531                 return;
4532
4533         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4537                         BP_CL_ID(bp) + (i % bp->num_queues));
4538 }
4539
4540 static void bnx2x_set_client_config(struct bnx2x *bp)
4541 {
4542         struct tstorm_eth_client_config tstorm_client = {0};
4543         int port = BP_PORT(bp);
4544         int i;
4545
4546         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4547         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4548         tstorm_client.config_flags =
4549                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4550 #ifdef BCM_VLAN
4551         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4552                 tstorm_client.config_flags |=
4553                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4554                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4555         }
4556 #endif
4557
4558         if (bp->flags & TPA_ENABLE_FLAG) {
4559                 tstorm_client.max_sges_for_packet =
4560                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4561                 tstorm_client.max_sges_for_packet =
4562                         ((tstorm_client.max_sges_for_packet +
4563                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4564                         PAGES_PER_SGE_SHIFT;
4565
4566                 tstorm_client.config_flags |=
4567                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4568         }
4569
4570         for_each_queue(bp, i) {
4571                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4572                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4573                        ((u32 *)&tstorm_client)[0]);
4574                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4575                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4576                        ((u32 *)&tstorm_client)[1]);
4577         }
4578
4579         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4580            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4581 }
4582
4583 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4584 {
4585         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4586         int mode = bp->rx_mode;
4587         int mask = (1 << BP_L_ID(bp));
4588         int func = BP_FUNC(bp);
4589         int i;
4590
4591         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4592
4593         switch (mode) {
4594         case BNX2X_RX_MODE_NONE: /* no Rx */
4595                 tstorm_mac_filter.ucast_drop_all = mask;
4596                 tstorm_mac_filter.mcast_drop_all = mask;
4597                 tstorm_mac_filter.bcast_drop_all = mask;
4598                 break;
4599         case BNX2X_RX_MODE_NORMAL:
4600                 tstorm_mac_filter.bcast_accept_all = mask;
4601                 break;
4602         case BNX2X_RX_MODE_ALLMULTI:
4603                 tstorm_mac_filter.mcast_accept_all = mask;
4604                 tstorm_mac_filter.bcast_accept_all = mask;
4605                 break;
4606         case BNX2X_RX_MODE_PROMISC:
4607                 tstorm_mac_filter.ucast_accept_all = mask;
4608                 tstorm_mac_filter.mcast_accept_all = mask;
4609                 tstorm_mac_filter.bcast_accept_all = mask;
4610                 break;
4611         default:
4612                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4613                 break;
4614         }
4615
4616         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4617                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4618                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4619                        ((u32 *)&tstorm_mac_filter)[i]);
4620
4621 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4622                    ((u32 *)&tstorm_mac_filter)[i]); */
4623         }
4624
4625         if (mode != BNX2X_RX_MODE_NONE)
4626                 bnx2x_set_client_config(bp);
4627 }
4628
4629 static void bnx2x_init_internal_common(struct bnx2x *bp)
4630 {
4631         int i;
4632
4633         if (bp->flags & TPA_ENABLE_FLAG) {
4634                 struct tstorm_eth_tpa_exist tpa = {0};
4635
4636                 tpa.tpa_exist = 1;
4637
4638                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4639                        ((u32 *)&tpa)[0]);
4640                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4641                        ((u32 *)&tpa)[1]);
4642         }
4643
4644         /* Zero this manually as its initialization is
4645            currently missing in the initTool */
4646         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4647                 REG_WR(bp, BAR_USTRORM_INTMEM +
4648                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4649 }
4650
4651 static void bnx2x_init_internal_port(struct bnx2x *bp)
4652 {
4653         int port = BP_PORT(bp);
4654
4655         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4656         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4657         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4659 }
4660
4661 static void bnx2x_init_internal_func(struct bnx2x *bp)
4662 {
4663         struct tstorm_eth_function_common_config tstorm_config = {0};
4664         struct stats_indication_flags stats_flags = {0};
4665         int port = BP_PORT(bp);
4666         int func = BP_FUNC(bp);
4667         int i;
4668         u16 max_agg_size;
4669
4670         if (is_multi(bp)) {
4671                 tstorm_config.config_flags = MULTI_FLAGS;
4672                 tstorm_config.rss_result_mask = MULTI_MASK;
4673         }
4674
4675         tstorm_config.leading_client_id = BP_L_ID(bp);
4676
4677         REG_WR(bp, BAR_TSTRORM_INTMEM +
4678                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4679                (*(u32 *)&tstorm_config));
4680
4681         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4682         bnx2x_set_storm_rx_mode(bp);
4683
4684         /* reset xstorm per client statistics */
4685         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4686                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4687                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4688                        i*4, 0);
4689         }
4690         /* reset tstorm per client statistics */
4691         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4692                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4694                        i*4, 0);
4695         }
4696
4697         /* Init statistics related context */
4698         stats_flags.collect_eth = 1;
4699
4700         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4701                ((u32 *)&stats_flags)[0]);
4702         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4703                ((u32 *)&stats_flags)[1]);
4704
4705         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4706                ((u32 *)&stats_flags)[0]);
4707         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4708                ((u32 *)&stats_flags)[1]);
4709
4710         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4711                ((u32 *)&stats_flags)[0]);
4712         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4713                ((u32 *)&stats_flags)[1]);
4714
4715         REG_WR(bp, BAR_XSTRORM_INTMEM +
4716                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4717                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4718         REG_WR(bp, BAR_XSTRORM_INTMEM +
4719                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4720                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4721
4722         REG_WR(bp, BAR_TSTRORM_INTMEM +
4723                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4724                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4725         REG_WR(bp, BAR_TSTRORM_INTMEM +
4726                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4727                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4728
4729         if (CHIP_IS_E1H(bp)) {
4730                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4731                         IS_E1HMF(bp));
4732                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4733                         IS_E1HMF(bp));
4734                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4735                         IS_E1HMF(bp));
4736                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4737                         IS_E1HMF(bp));
4738
4739                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4740                          bp->e1hov);
4741         }
4742
4743         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4744         max_agg_size =
4745                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4746                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4747                     (u32)0xffff);
4748         for_each_queue(bp, i) {
4749                 struct bnx2x_fastpath *fp = &bp->fp[i];
4750
4751                 REG_WR(bp, BAR_USTRORM_INTMEM +
4752                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4753                        U64_LO(fp->rx_comp_mapping));
4754                 REG_WR(bp, BAR_USTRORM_INTMEM +
4755                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4756                        U64_HI(fp->rx_comp_mapping));
4757
4758                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4759                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4760                          max_agg_size);
4761         }
4762 }
4763
4764 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4765 {
4766         switch (load_code) {
4767         case FW_MSG_CODE_DRV_LOAD_COMMON:
4768                 bnx2x_init_internal_common(bp);
4769                 /* no break */
4770
4771         case FW_MSG_CODE_DRV_LOAD_PORT:
4772                 bnx2x_init_internal_port(bp);
4773                 /* no break */
4774
4775         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4776                 bnx2x_init_internal_func(bp);
4777                 break;
4778
4779         default:
4780                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4781                 break;
4782         }
4783 }
4784
4785 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4786 {
4787         int i;
4788
4789         for_each_queue(bp, i) {
4790                 struct bnx2x_fastpath *fp = &bp->fp[i];
4791
4792                 fp->bp = bp;
4793                 fp->state = BNX2X_FP_STATE_CLOSED;
4794                 fp->index = i;
4795                 fp->cl_id = BP_L_ID(bp) + i;
4796                 fp->sb_id = fp->cl_id;
4797                 DP(NETIF_MSG_IFUP,
4798                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4799                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4800                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4801                               FP_SB_ID(fp));
4802                 bnx2x_update_fpsb_idx(fp);
4803         }
4804
4805         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4806                           DEF_SB_ID);
4807         bnx2x_update_dsb_idx(bp);
4808         bnx2x_update_coalesce(bp);
4809         bnx2x_init_rx_rings(bp);
4810         bnx2x_init_tx_ring(bp);
4811         bnx2x_init_sp_ring(bp);
4812         bnx2x_init_context(bp);
4813         bnx2x_init_internal(bp, load_code);
4814         bnx2x_init_ind_table(bp);
4815         bnx2x_int_enable(bp);
4816 }
4817
4818 /* end of nic init */
4819
4820 /*
4821  * gzip service functions
4822  */
4823
4824 static int bnx2x_gunzip_init(struct bnx2x *bp)
4825 {
4826         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4827                                               &bp->gunzip_mapping);
4828         if (bp->gunzip_buf  == NULL)
4829                 goto gunzip_nomem1;
4830
4831         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4832         if (bp->strm  == NULL)
4833                 goto gunzip_nomem2;
4834
4835         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4836                                       GFP_KERNEL);
4837         if (bp->strm->workspace == NULL)
4838                 goto gunzip_nomem3;
4839
4840         return 0;
4841
4842 gunzip_nomem3:
4843         kfree(bp->strm);
4844         bp->strm = NULL;
4845
4846 gunzip_nomem2:
4847         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4848                             bp->gunzip_mapping);
4849         bp->gunzip_buf = NULL;
4850
4851 gunzip_nomem1:
4852         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4853                " un-compression\n", bp->dev->name);
4854         return -ENOMEM;
4855 }
4856
4857 static void bnx2x_gunzip_end(struct bnx2x *bp)
4858 {
4859         kfree(bp->strm->workspace);
4860
4861         kfree(bp->strm);
4862         bp->strm = NULL;
4863
4864         if (bp->gunzip_buf) {
4865                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4866                                     bp->gunzip_mapping);
4867                 bp->gunzip_buf = NULL;
4868         }
4869 }
4870
4871 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4872 {
4873         int n, rc;
4874
4875         /* check gzip header */
4876         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4877                 return -EINVAL;
4878
4879         n = 10;
4880
4881 #define FNAME                           0x8
4882
4883         if (zbuf[3] & FNAME)
4884                 while ((zbuf[n++] != 0) && (n < len));
4885
4886         bp->strm->next_in = zbuf + n;
4887         bp->strm->avail_in = len - n;
4888         bp->strm->next_out = bp->gunzip_buf;
4889         bp->strm->avail_out = FW_BUF_SIZE;
4890
4891         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4892         if (rc != Z_OK)
4893                 return rc;
4894
4895         rc = zlib_inflate(bp->strm, Z_FINISH);
4896         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4897                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4898                        bp->dev->name, bp->strm->msg);
4899
4900         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4901         if (bp->gunzip_outlen & 0x3)
4902                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4903                                     " gunzip_outlen (%d) not aligned\n",
4904                        bp->dev->name, bp->gunzip_outlen);
4905         bp->gunzip_outlen >>= 2;
4906
4907         zlib_inflateEnd(bp->strm);
4908
4909         if (rc == Z_STREAM_END)
4910                 return 0;
4911
4912         return rc;
4913 }
4914
4915 /* nic load/unload */
4916
4917 /*
4918  * General service functions
4919  */
4920
4921 /* send a NIG loopback debug packet */
4922 static void bnx2x_lb_pckt(struct bnx2x *bp)
4923 {
4924         u32 wb_write[3];
4925
4926         /* Ethernet source and destination addresses */
4927         wb_write[0] = 0x55555555;
4928         wb_write[1] = 0x55555555;
4929         wb_write[2] = 0x20;             /* SOP */
4930         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4931
4932         /* NON-IP protocol */
4933         wb_write[0] = 0x09000000;
4934         wb_write[1] = 0x55555555;
4935         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4936         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4937 }
4938
4939 /* some of the internal memories
4940  * are not directly readable from the driver
4941  * to test them we send debug packets
4942  */
4943 static int bnx2x_int_mem_test(struct bnx2x *bp)
4944 {
4945         int factor;
4946         int count, i;
4947         u32 val = 0;
4948
4949         if (CHIP_REV_IS_FPGA(bp))
4950                 factor = 120;
4951         else if (CHIP_REV_IS_EMUL(bp))
4952                 factor = 200;
4953         else
4954                 factor = 1;
4955
4956         DP(NETIF_MSG_HW, "start part1\n");
4957
4958         /* Disable inputs of parser neighbor blocks */
4959         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4960         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4961         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4962         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4963
4964         /*  Write 0 to parser credits for CFC search request */
4965         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4966
4967         /* send Ethernet packet */
4968         bnx2x_lb_pckt(bp);
4969
4970         /* TODO do i reset NIG statistic? */
4971         /* Wait until NIG register shows 1 packet of size 0x10 */
4972         count = 1000 * factor;
4973         while (count) {
4974
4975                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4976                 val = *bnx2x_sp(bp, wb_data[0]);
4977                 if (val == 0x10)
4978                         break;
4979
4980                 msleep(10);
4981                 count--;
4982         }
4983         if (val != 0x10) {
4984                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4985                 return -1;
4986         }
4987
4988         /* Wait until PRS register shows 1 packet */
4989         count = 1000 * factor;
4990         while (count) {
4991                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4992                 if (val == 1)
4993                         break;
4994
4995                 msleep(10);
4996                 count--;
4997         }
4998         if (val != 0x1) {
4999                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5000                 return -2;
5001         }
5002
5003         /* Reset and init BRB, PRS */
5004         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5005         msleep(50);
5006         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5007         msleep(50);
5008         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5009         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5010
5011         DP(NETIF_MSG_HW, "part2\n");
5012
5013         /* Disable inputs of parser neighbor blocks */
5014         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5015         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5016         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5017         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5018
5019         /* Write 0 to parser credits for CFC search request */
5020         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5021
5022         /* send 10 Ethernet packets */
5023         for (i = 0; i < 10; i++)
5024                 bnx2x_lb_pckt(bp);
5025
5026         /* Wait until NIG register shows 10 + 1
5027            packets of size 11*0x10 = 0xb0 */
5028         count = 1000 * factor;
5029         while (count) {
5030
5031                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5032                 val = *bnx2x_sp(bp, wb_data[0]);
5033                 if (val == 0xb0)
5034                         break;
5035
5036                 msleep(10);
5037                 count--;
5038         }
5039         if (val != 0xb0) {
5040                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5041                 return -3;
5042         }
5043
5044         /* Wait until PRS register shows 2 packets */
5045         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5046         if (val != 2)
5047                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5048
5049         /* Write 1 to parser credits for CFC search request */
5050         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5051
5052         /* Wait until PRS register shows 3 packets */
5053         msleep(10 * factor);
5054         /* Wait until NIG register shows 1 packet of size 0x10 */
5055         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5056         if (val != 3)
5057                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5058
5059         /* clear NIG EOP FIFO */
5060         for (i = 0; i < 11; i++)
5061                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5062         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5063         if (val != 1) {
5064                 BNX2X_ERR("clear of NIG failed\n");
5065                 return -4;
5066         }
5067
5068         /* Reset and init BRB, PRS, NIG */
5069         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5070         msleep(50);
5071         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5072         msleep(50);
5073         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5074         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5075 #ifndef BCM_ISCSI
5076         /* set NIC mode */
5077         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5078 #endif
5079
5080         /* Enable inputs of parser neighbor blocks */
5081         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5082         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5083         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5084         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5085
5086         DP(NETIF_MSG_HW, "done\n");
5087
5088         return 0; /* OK */
5089 }
5090
5091 static void enable_blocks_attention(struct bnx2x *bp)
5092 {
5093         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5094         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5095         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5096         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5097         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5098         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5099         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5100         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5101         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5102 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5103 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5104         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5105         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5106         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5107 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5108 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5109         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5110         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5111         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5112         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5113 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5114 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5115         if (CHIP_REV_IS_FPGA(bp))
5116                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5117         else
5118                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5119         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5120         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5121         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5122 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5123 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5124         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5125         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5126 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5127         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5128 }
5129
5130
5131 static int bnx2x_init_common(struct bnx2x *bp)
5132 {
5133         u32 val, i;
5134
5135         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5136
5137         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5138         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5139
5140         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5141         if (CHIP_IS_E1H(bp))
5142                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5143
5144         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5145         msleep(30);
5146         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5147
5148         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5149         if (CHIP_IS_E1(bp)) {
5150                 /* enable HW interrupt from PXP on USDM overflow
5151                    bit 16 on INT_MASK_0 */
5152                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5153         }
5154
5155         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5156         bnx2x_init_pxp(bp);
5157
5158 #ifdef __BIG_ENDIAN
5159         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5160         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5161         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5162         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5163         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5164
5165 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5166         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5167         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5168         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5169         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5170 #endif
5171
5172         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5173 #ifdef BCM_ISCSI
5174         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5175         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5176         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5177 #endif
5178
5179         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5180                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5181
5182         /* let the HW do it's magic ... */
5183         msleep(100);
5184         /* finish PXP init */
5185         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5186         if (val != 1) {
5187                 BNX2X_ERR("PXP2 CFG failed\n");
5188                 return -EBUSY;
5189         }
5190         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5191         if (val != 1) {
5192                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5193                 return -EBUSY;
5194         }
5195
5196         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5197         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5198
5199         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5200
5201         /* clean the DMAE memory */
5202         bp->dmae_ready = 1;
5203         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5204
5205         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5206         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5207         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5208         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5209
5210         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5211         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5212         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5213         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5214
5215         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5216         /* soft reset pulse */
5217         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5218         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5219
5220 #ifdef BCM_ISCSI
5221         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5222 #endif
5223
5224         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5225         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5226         if (!CHIP_REV_IS_SLOW(bp)) {
5227                 /* enable hw interrupt from doorbell Q */
5228                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5229         }
5230
5231         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5232         if (CHIP_REV_IS_SLOW(bp)) {
5233                 /* fix for emulation and FPGA for no pause */
5234                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5235                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5236                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5237                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5238         }
5239
5240         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5241         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5242         /* set NIC mode */
5243         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5244         if (CHIP_IS_E1H(bp))
5245                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5246
5247         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5248         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5249         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5250         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5251
5252         if (CHIP_IS_E1H(bp)) {
5253                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5254                                 STORM_INTMEM_SIZE_E1H/2);
5255                 bnx2x_init_fill(bp,
5256                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5257                                 0, STORM_INTMEM_SIZE_E1H/2);
5258                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5259                                 STORM_INTMEM_SIZE_E1H/2);
5260                 bnx2x_init_fill(bp,
5261                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5262                                 0, STORM_INTMEM_SIZE_E1H/2);
5263                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5264                                 STORM_INTMEM_SIZE_E1H/2);
5265                 bnx2x_init_fill(bp,
5266                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5267                                 0, STORM_INTMEM_SIZE_E1H/2);
5268                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5269                                 STORM_INTMEM_SIZE_E1H/2);
5270                 bnx2x_init_fill(bp,
5271                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5272                                 0, STORM_INTMEM_SIZE_E1H/2);
5273         } else { /* E1 */
5274                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5275                                 STORM_INTMEM_SIZE_E1);
5276                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5277                                 STORM_INTMEM_SIZE_E1);
5278                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5279                                 STORM_INTMEM_SIZE_E1);
5280                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5281                                 STORM_INTMEM_SIZE_E1);
5282         }
5283
5284         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5285         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5286         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5287         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5288
5289         /* sync semi rtc */
5290         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5291                0x80000000);
5292         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5293                0x80000000);
5294
5295         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5296         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5297         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5298
5299         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5300         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5301                 REG_WR(bp, i, 0xc0cac01a);
5302                 /* TODO: replace with something meaningful */
5303         }
5304         if (CHIP_IS_E1H(bp))
5305                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5306         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5307
5308         if (sizeof(union cdu_context) != 1024)
5309                 /* we currently assume that a context is 1024 bytes */
5310                 printk(KERN_ALERT PFX "please adjust the size of"
5311                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5312
5313         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5314         val = (4 << 24) + (0 << 12) + 1024;
5315         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5316         if (CHIP_IS_E1(bp)) {
5317                 /* !!! fix pxp client crdit until excel update */
5318                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5319                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5320         }
5321
5322         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5323         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5324
5325         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5326         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5327
5328         /* PXPCS COMMON comes here */
5329         /* Reset PCIE errors for debug */
5330         REG_WR(bp, 0x2814, 0xffffffff);
5331         REG_WR(bp, 0x3820, 0xffffffff);
5332
5333         /* EMAC0 COMMON comes here */
5334         /* EMAC1 COMMON comes here */
5335         /* DBU COMMON comes here */
5336         /* DBG COMMON comes here */
5337
5338         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5339         if (CHIP_IS_E1H(bp)) {
5340                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5341                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5342         }
5343
5344         if (CHIP_REV_IS_SLOW(bp))
5345                 msleep(200);
5346
5347         /* finish CFC init */
5348         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5349         if (val != 1) {
5350                 BNX2X_ERR("CFC LL_INIT failed\n");
5351                 return -EBUSY;
5352         }
5353         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5354         if (val != 1) {
5355                 BNX2X_ERR("CFC AC_INIT failed\n");
5356                 return -EBUSY;
5357         }
5358         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5359         if (val != 1) {
5360                 BNX2X_ERR("CFC CAM_INIT failed\n");
5361                 return -EBUSY;
5362         }
5363         REG_WR(bp, CFC_REG_DEBUG0, 0);
5364
5365         /* read NIG statistic
5366            to see if this is our first up since powerup */
5367         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5368         val = *bnx2x_sp(bp, wb_data[0]);
5369
5370         /* do internal memory self test */
5371         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5372                 BNX2X_ERR("internal mem self test failed\n");
5373                 return -EBUSY;
5374         }
5375
5376         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5377         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5378         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5379                 /* Fan failure is indicated by SPIO 5 */
5380                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5381                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5382
5383                 /* set to active low mode */
5384                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5385                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5386                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5387                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5388
5389                 /* enable interrupt to signal the IGU */
5390                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5391                 val |= (1 << MISC_REGISTERS_SPIO_5);
5392                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5393                 break;
5394
5395         default:
5396                 break;
5397         }
5398
5399         /* clear PXP2 attentions */
5400         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5401
5402         enable_blocks_attention(bp);
5403
5404         if (!BP_NOMCP(bp)) {
5405                 bnx2x_acquire_phy_lock(bp);
5406                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5407                 bnx2x_release_phy_lock(bp);
5408         } else
5409                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5410
5411         return 0;
5412 }
5413
5414 static int bnx2x_init_port(struct bnx2x *bp)
5415 {
5416         int port = BP_PORT(bp);
5417         u32 val;
5418
5419         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5420
5421         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5422
5423         /* Port PXP comes here */
5424         /* Port PXP2 comes here */
5425 #ifdef BCM_ISCSI
5426         /* Port0  1
5427          * Port1  385 */
5428         i++;
5429         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5430         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5431         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5432         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5433
5434         /* Port0  2
5435          * Port1  386 */
5436         i++;
5437         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5438         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5439         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5440         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5441
5442         /* Port0  3
5443          * Port1  387 */
5444         i++;
5445         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5446         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5447         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5448         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5449 #endif
5450         /* Port CMs come here */
5451
5452         /* Port QM comes here */
5453 #ifdef BCM_ISCSI
5454         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5455         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5456
5457         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5458                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5459 #endif
5460         /* Port DQ comes here */
5461         /* Port BRB1 comes here */
5462         /* Port PRS comes here */
5463         /* Port TSDM comes here */
5464         /* Port CSDM comes here */
5465         /* Port USDM comes here */
5466         /* Port XSDM comes here */
5467         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5468                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5469         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5470                              port ? USEM_PORT1_END : USEM_PORT0_END);
5471         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5472                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5473         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5474                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5475         /* Port UPB comes here */
5476         /* Port XPB comes here */
5477
5478         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5479                              port ? PBF_PORT1_END : PBF_PORT0_END);
5480
5481         /* configure PBF to work without PAUSE mtu 9000 */
5482         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5483
5484         /* update threshold */
5485         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5486         /* update init credit */
5487         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5488
5489         /* probe changes */
5490         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5491         msleep(5);
5492         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5493
5494 #ifdef BCM_ISCSI
5495         /* tell the searcher where the T2 table is */
5496         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5497
5498         wb_write[0] = U64_LO(bp->t2_mapping);
5499         wb_write[1] = U64_HI(bp->t2_mapping);
5500         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5501         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5502         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5503         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5504
5505         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5506         /* Port SRCH comes here */
5507 #endif
5508         /* Port CDU comes here */
5509         /* Port CFC comes here */
5510
5511         if (CHIP_IS_E1(bp)) {
5512                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5513                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5514         }
5515         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5516                              port ? HC_PORT1_END : HC_PORT0_END);
5517
5518         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5519                                     MISC_AEU_PORT0_START,
5520                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5521         /* init aeu_mask_attn_func_0/1:
5522          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5523          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5524          *             bits 4-7 are used for "per vn group attention" */
5525         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5526                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5527
5528         /* Port PXPCS comes here */
5529         /* Port EMAC0 comes here */
5530         /* Port EMAC1 comes here */
5531         /* Port DBU comes here */
5532         /* Port DBG comes here */
5533         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5534                              port ? NIG_PORT1_END : NIG_PORT0_END);
5535
5536         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5537
5538         if (CHIP_IS_E1H(bp)) {
5539                 u32 wsum;
5540                 struct cmng_struct_per_port m_cmng_port;
5541                 int vn;
5542
5543                 /* 0x2 disable e1hov, 0x1 enable */
5544                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5545                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5546
5547                 /* Init RATE SHAPING and FAIRNESS contexts.
5548                    Initialize as if there is 10G link. */
5549                 wsum = bnx2x_calc_vn_wsum(bp);
5550                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5551                 if (IS_E1HMF(bp))
5552                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5553                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5554                                         wsum, 10000, &m_cmng_port);
5555         }
5556
5557         /* Port MCP comes here */
5558         /* Port DMAE comes here */
5559
5560         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5561         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5562         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5563                 /* add SPIO 5 to group 0 */
5564                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5565                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5566                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5567                 break;
5568
5569         default:
5570                 break;
5571         }
5572
5573         bnx2x__link_reset(bp);
5574
5575         return 0;
5576 }
5577
5578 #define ILT_PER_FUNC            (768/2)
5579 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5580 /* the phys address is shifted right 12 bits and has an added
5581    1=valid bit added to the 53rd bit
5582    then since this is a wide register(TM)
5583    we split it into two 32 bit writes
5584  */
5585 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5586 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5587 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5588 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5589
5590 #define CNIC_ILT_LINES          0
5591
5592 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5593 {
5594         int reg;
5595
5596         if (CHIP_IS_E1H(bp))
5597                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5598         else /* E1 */
5599                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5600
5601         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5602 }
5603
5604 static int bnx2x_init_func(struct bnx2x *bp)
5605 {
5606         int port = BP_PORT(bp);
5607         int func = BP_FUNC(bp);
5608         int i;
5609
5610         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5611
5612         i = FUNC_ILT_BASE(func);
5613
5614         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5615         if (CHIP_IS_E1H(bp)) {
5616                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5617                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5618         } else /* E1 */
5619                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5620                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5621
5622
5623         if (CHIP_IS_E1H(bp)) {
5624                 for (i = 0; i < 9; i++)
5625                         bnx2x_init_block(bp,
5626                                          cm_start[func][i], cm_end[func][i]);
5627
5628                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5629                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5630         }
5631
5632         /* HC init per function */
5633         if (CHIP_IS_E1H(bp)) {
5634                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5635
5636                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5637                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5638         }
5639         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5640
5641         if (CHIP_IS_E1H(bp))
5642                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5643
5644         /* Reset PCIE errors for debug */
5645         REG_WR(bp, 0x2114, 0xffffffff);
5646         REG_WR(bp, 0x2120, 0xffffffff);
5647
5648         return 0;
5649 }
5650
5651 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5652 {
5653         int i, rc = 0;
5654
5655         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5656            BP_FUNC(bp), load_code);
5657
5658         bp->dmae_ready = 0;
5659         mutex_init(&bp->dmae_mutex);
5660         bnx2x_gunzip_init(bp);
5661
5662         switch (load_code) {
5663         case FW_MSG_CODE_DRV_LOAD_COMMON:
5664                 rc = bnx2x_init_common(bp);
5665                 if (rc)
5666                         goto init_hw_err;
5667                 /* no break */
5668
5669         case FW_MSG_CODE_DRV_LOAD_PORT:
5670                 bp->dmae_ready = 1;
5671                 rc = bnx2x_init_port(bp);
5672                 if (rc)
5673                         goto init_hw_err;
5674                 /* no break */
5675
5676         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5677                 bp->dmae_ready = 1;
5678                 rc = bnx2x_init_func(bp);
5679                 if (rc)
5680                         goto init_hw_err;
5681                 break;
5682
5683         default:
5684                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5685                 break;
5686         }
5687
5688         if (!BP_NOMCP(bp)) {
5689                 int func = BP_FUNC(bp);
5690
5691                 bp->fw_drv_pulse_wr_seq =
5692                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5693                                  DRV_PULSE_SEQ_MASK);
5694                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5695                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5696                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5697         } else
5698                 bp->func_stx = 0;
5699
5700         /* this needs to be done before gunzip end */
5701         bnx2x_zero_def_sb(bp);
5702         for_each_queue(bp, i)
5703                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5704
5705 init_hw_err:
5706         bnx2x_gunzip_end(bp);
5707
5708         return rc;
5709 }
5710
5711 /* send the MCP a request, block until there is a reply */
5712 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5713 {
5714         int func = BP_FUNC(bp);
5715         u32 seq = ++bp->fw_seq;
5716         u32 rc = 0;
5717         u32 cnt = 1;
5718         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5719
5720         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5721         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5722
5723         do {
5724                 /* let the FW do it's magic ... */
5725                 msleep(delay);
5726
5727                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5728
5729                 /* Give the FW up to 2 second (200*10ms) */
5730         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5731
5732         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5733            cnt*delay, rc, seq);
5734
5735         /* is this a reply to our command? */
5736         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5737                 rc &= FW_MSG_CODE_MASK;
5738
5739         } else {
5740                 /* FW BUG! */
5741                 BNX2X_ERR("FW failed to respond!\n");
5742                 bnx2x_fw_dump(bp);
5743                 rc = 0;
5744         }
5745
5746         return rc;
5747 }
5748
5749 static void bnx2x_free_mem(struct bnx2x *bp)
5750 {
5751
5752 #define BNX2X_PCI_FREE(x, y, size) \
5753         do { \
5754                 if (x) { \
5755                         pci_free_consistent(bp->pdev, size, x, y); \
5756                         x = NULL; \
5757                         y = 0; \
5758                 } \
5759         } while (0)
5760
5761 #define BNX2X_FREE(x) \
5762         do { \
5763                 if (x) { \
5764                         vfree(x); \
5765                         x = NULL; \
5766                 } \
5767         } while (0)
5768
5769         int i;
5770
5771         /* fastpath */
5772         for_each_queue(bp, i) {
5773
5774                 /* Status blocks */
5775                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5776                                bnx2x_fp(bp, i, status_blk_mapping),
5777                                sizeof(struct host_status_block) +
5778                                sizeof(struct eth_tx_db_data));
5779
5780                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5781                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));