ce55e84c44dac87787e7a201fea7ef769ff46081
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
78
79 static int disable_tpa;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83
84 module_param(disable_tpa, int, 0);
85
86 static int int_mode;
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
95
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597                          HC_CONFIG_0_REG_INT_LINE_EN_0);
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else if (msi) {
601                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605         } else {
606                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
609                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
610
611                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612                    val, port, addr);
613
614                 REG_WR(bp, addr, val);
615
616                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617         }
618
619         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
620            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
621
622         REG_WR(bp, addr, val);
623
624         if (CHIP_IS_E1H(bp)) {
625                 /* init leading/trailing edge */
626                 if (IS_E1HMF(bp)) {
627                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
628                         if (bp->port.pmf)
629                                 /* enable nig attention */
630                                 val |= 0x0100;
631                 } else
632                         val = 0xffff;
633
634                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636         }
637 }
638
639 static void bnx2x_int_disable(struct bnx2x *bp)
640 {
641         int port = BP_PORT(bp);
642         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643         u32 val = REG_RD(bp, addr);
644
645         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
648                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651            val, port, addr);
652
653         /* flush all outstanding writes */
654         mmiowb();
655
656         REG_WR(bp, addr, val);
657         if (REG_RD(bp, addr) != val)
658                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659 }
660
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
662 {
663         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
664         int i, offset;
665
666         /* disable interrupt handling */
667         atomic_inc(&bp->intr_sem);
668         if (disable_hw)
669                 /* prevent the HW from sending interrupts */
670                 bnx2x_int_disable(bp);
671
672         /* make sure all ISRs are done */
673         if (msix) {
674                 synchronize_irq(bp->msix_table[0].vector);
675                 offset = 1;
676                 for_each_queue(bp, i)
677                         synchronize_irq(bp->msix_table[i + offset].vector);
678         } else
679                 synchronize_irq(bp->pdev->irq);
680
681         /* make sure sp_task is not running */
682         cancel_delayed_work(&bp->sp_task);
683         flush_workqueue(bnx2x_wq);
684 }
685
686 /* fast path */
687
688 /*
689  * General service functions
690  */
691
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693                                 u8 storm, u16 index, u8 op, u8 update)
694 {
695         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696                        COMMAND_REG_INT_ACK);
697         struct igu_ack_register igu_ack;
698
699         igu_ack.status_block_index = index;
700         igu_ack.sb_id_and_flags =
701                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
706         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707            (*(u32 *)&igu_ack), hc_addr);
708         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
709 }
710
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712 {
713         struct host_status_block *fpsb = fp->status_blk;
714         u16 rc = 0;
715
716         barrier(); /* status block is written to by the chip */
717         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719                 rc |= 1;
720         }
721         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723                 rc |= 2;
724         }
725         return rc;
726 }
727
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
729 {
730         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731                        COMMAND_REG_SIMD_MASK);
732         u32 result = REG_RD(bp, hc_addr);
733
734         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735            result, hc_addr);
736
737         return result;
738 }
739
740
741 /*
742  * fast path service functions
743  */
744
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746 {
747         u16 tx_cons_sb;
748
749         /* Tell compiler that status block fields can change */
750         barrier();
751         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752         return (fp->tx_pkt_cons != tx_cons_sb);
753 }
754
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756 {
757         /* Tell compiler that consumer and producer can change */
758         barrier();
759         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
761 }
762
763 /* free skb in the packet ring at pos idx
764  * return idx of last bd freed
765  */
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767                              u16 idx)
768 {
769         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770         struct eth_tx_bd *tx_bd;
771         struct sk_buff *skb = tx_buf->skb;
772         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
773         int nbd;
774
775         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
776            idx, tx_buf, skb);
777
778         /* unmap first bd */
779         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780         tx_bd = &fp->tx_desc_ring[bd_idx];
781         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784         nbd = le16_to_cpu(tx_bd->nbd) - 1;
785         new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787         if (nbd > (MAX_SKB_FRAGS + 2)) {
788                 BNX2X_ERR("BAD nbd!\n");
789                 bnx2x_panic();
790         }
791 #endif
792
793         /* Skip a parse bd and the TSO split header bd
794            since they have no mapping */
795         if (nbd)
796                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799                                            ETH_TX_BD_FLAGS_TCP_CSUM |
800                                            ETH_TX_BD_FLAGS_SW_LSO)) {
801                 if (--nbd)
802                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803                 tx_bd = &fp->tx_desc_ring[bd_idx];
804                 /* is this a TSO split header bd? */
805                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806                         if (--nbd)
807                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808                 }
809         }
810
811         /* now free frags */
812         while (nbd > 0) {
813
814                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815                 tx_bd = &fp->tx_desc_ring[bd_idx];
816                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818                 if (--nbd)
819                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820         }
821
822         /* release skb */
823         WARN_ON(!skb);
824         dev_kfree_skb(skb);
825         tx_buf->first_bd = 0;
826         tx_buf->skb = NULL;
827
828         return new_cons;
829 }
830
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
832 {
833         s16 used;
834         u16 prod;
835         u16 cons;
836
837         barrier(); /* Tell compiler that prod and cons can change */
838         prod = fp->tx_bd_prod;
839         cons = fp->tx_bd_cons;
840
841         /* NUM_TX_RINGS = number of "next-page" entries
842            It will be used as a threshold */
843         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
844
845 #ifdef BNX2X_STOP_ON_ERROR
846         WARN_ON(used < 0);
847         WARN_ON(used > fp->bp->tx_ring_size);
848         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
849 #endif
850
851         return (s16)(fp->bp->tx_ring_size) - used;
852 }
853
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855 {
856         struct bnx2x *bp = fp->bp;
857         struct netdev_queue *txq;
858         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859         int done = 0;
860
861 #ifdef BNX2X_STOP_ON_ERROR
862         if (unlikely(bp->panic))
863                 return;
864 #endif
865
866         txq = netdev_get_tx_queue(bp->dev, fp->index);
867         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868         sw_cons = fp->tx_pkt_cons;
869
870         while (sw_cons != hw_cons) {
871                 u16 pkt_cons;
872
873                 pkt_cons = TX_BD(sw_cons);
874
875                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
877                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
878                    hw_cons, sw_cons, pkt_cons);
879
880 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
881                         rmb();
882                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883                 }
884 */
885                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886                 sw_cons++;
887                 done++;
888
889                 if (done == work)
890                         break;
891         }
892
893         fp->tx_pkt_cons = sw_cons;
894         fp->tx_bd_cons = bd_cons;
895
896         /* Need to make the tx_bd_cons update visible to start_xmit()
897          * before checking for netif_tx_queue_stopped().  Without the
898          * memory barrier, there is a small possibility that start_xmit()
899          * will miss it and cause the queue to be stopped forever.
900          */
901         smp_mb();
902
903         /* TBD need a thresh? */
904         if (unlikely(netif_tx_queue_stopped(txq))) {
905
906                 __netif_tx_lock(txq, smp_processor_id());
907
908                 if ((netif_tx_queue_stopped(txq)) &&
909                     (bp->state == BNX2X_STATE_OPEN) &&
910                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911                         netif_tx_wake_queue(txq);
912
913                 __netif_tx_unlock(txq);
914         }
915 }
916
917
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919                            union eth_rx_cqe *rr_cqe)
920 {
921         struct bnx2x *bp = fp->bp;
922         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
925         DP(BNX2X_MSG_SP,
926            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
927            FP_IDX(fp), cid, command, bp->state,
928            rr_cqe->ramrod_cqe.ramrod_type);
929
930         bp->spq_left++;
931
932         if (FP_IDX(fp)) {
933                 switch (command | fp->state) {
934                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935                                                 BNX2X_FP_STATE_OPENING):
936                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937                            cid);
938                         fp->state = BNX2X_FP_STATE_OPEN;
939                         break;
940
941                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943                            cid);
944                         fp->state = BNX2X_FP_STATE_HALTED;
945                         break;
946
947                 default:
948                         BNX2X_ERR("unexpected MC reply (%d)  "
949                                   "fp->state is %x\n", command, fp->state);
950                         break;
951                 }
952                 mb(); /* force bnx2x_wait_ramrod() to see the change */
953                 return;
954         }
955
956         switch (command | bp->state) {
957         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959                 bp->state = BNX2X_STATE_OPEN;
960                 break;
961
962         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965                 fp->state = BNX2X_FP_STATE_HALTED;
966                 break;
967
968         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
971                 break;
972
973
974         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977                 bp->set_mac_pending = 0;
978                 break;
979
980         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
982                 break;
983
984         default:
985                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
986                           command, bp->state);
987                 break;
988         }
989         mb(); /* force bnx2x_wait_ramrod() to see the change */
990 }
991
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996         struct page *page = sw_buf->page;
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999         /* Skip "next page" elements */
1000         if (!page)
1001                 return;
1002
1003         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005         __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007         sw_buf->page = NULL;
1008         sge->addr_hi = 0;
1009         sge->addr_lo = 0;
1010 }
1011
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013                                            struct bnx2x_fastpath *fp, int last)
1014 {
1015         int i;
1016
1017         for (i = 0; i < last; i++)
1018                 bnx2x_free_rx_sge(bp, fp, i);
1019 }
1020
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022                                      struct bnx2x_fastpath *fp, u16 index)
1023 {
1024         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027         dma_addr_t mapping;
1028
1029         if (unlikely(page == NULL))
1030                 return -ENOMEM;
1031
1032         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033                                PCI_DMA_FROMDEVICE);
1034         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036                 return -ENOMEM;
1037         }
1038
1039         sw_buf->page = page;
1040         pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045         return 0;
1046 }
1047
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049                                      struct bnx2x_fastpath *fp, u16 index)
1050 {
1051         struct sk_buff *skb;
1052         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054         dma_addr_t mapping;
1055
1056         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057         if (unlikely(skb == NULL))
1058                 return -ENOMEM;
1059
1060         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061                                  PCI_DMA_FROMDEVICE);
1062         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1063                 dev_kfree_skb(skb);
1064                 return -ENOMEM;
1065         }
1066
1067         rx_buf->skb = skb;
1068         pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073         return 0;
1074 }
1075
1076 /* note that we are not allocating a new skb,
1077  * we are just moving one from cons to prod
1078  * we are not creating a new mapping,
1079  * so there is no need to check for dma_mapping_error().
1080  */
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082                                struct sk_buff *skb, u16 cons, u16 prod)
1083 {
1084         struct bnx2x *bp = fp->bp;
1085         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090         pci_dma_sync_single_for_device(bp->pdev,
1091                                        pci_unmap_addr(cons_rx_buf, mapping),
1092                                        bp->rx_offset + RX_COPY_THRESH,
1093                                        PCI_DMA_FROMDEVICE);
1094
1095         prod_rx_buf->skb = cons_rx_buf->skb;
1096         pci_unmap_addr_set(prod_rx_buf, mapping,
1097                            pci_unmap_addr(cons_rx_buf, mapping));
1098         *prod_bd = *cons_bd;
1099 }
1100
1101 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102                                              u16 idx)
1103 {
1104         u16 last_max = fp->last_max_sge;
1105
1106         if (SUB_S16(idx, last_max) > 0)
1107                 fp->last_max_sge = idx;
1108 }
1109
1110 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111 {
1112         int i, j;
1113
1114         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115                 int idx = RX_SGE_CNT * i - 1;
1116
1117                 for (j = 0; j < 2; j++) {
1118                         SGE_MASK_CLEAR_BIT(fp, idx);
1119                         idx--;
1120                 }
1121         }
1122 }
1123
1124 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125                                   struct eth_fast_path_rx_cqe *fp_cqe)
1126 {
1127         struct bnx2x *bp = fp->bp;
1128         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1129                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1130                       SGE_PAGE_SHIFT;
1131         u16 last_max, last_elem, first_elem;
1132         u16 delta = 0;
1133         u16 i;
1134
1135         if (!sge_len)
1136                 return;
1137
1138         /* First mark all used pages */
1139         for (i = 0; i < sge_len; i++)
1140                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145         /* Here we assume that the last SGE index is the biggest */
1146         prefetch((void *)(fp->sge_mask));
1147         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149         last_max = RX_SGE(fp->last_max_sge);
1150         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153         /* If ring is not full */
1154         if (last_elem + 1 != first_elem)
1155                 last_elem++;
1156
1157         /* Now update the prod */
1158         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159                 if (likely(fp->sge_mask[i]))
1160                         break;
1161
1162                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163                 delta += RX_SGE_MASK_ELEM_SZ;
1164         }
1165
1166         if (delta > 0) {
1167                 fp->rx_sge_prod += delta;
1168                 /* clear page-end entries */
1169                 bnx2x_clear_sge_mask_next_elems(fp);
1170         }
1171
1172         DP(NETIF_MSG_RX_STATUS,
1173            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1174            fp->last_max_sge, fp->rx_sge_prod);
1175 }
1176
1177 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178 {
1179         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180         memset(fp->sge_mask, 0xff,
1181                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
1183         /* Clear the two last indices in the page to 1:
1184            these are the indices that correspond to the "next" element,
1185            hence will never be indicated and should be removed from
1186            the calculations. */
1187         bnx2x_clear_sge_mask_next_elems(fp);
1188 }
1189
1190 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191                             struct sk_buff *skb, u16 cons, u16 prod)
1192 {
1193         struct bnx2x *bp = fp->bp;
1194         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197         dma_addr_t mapping;
1198
1199         /* move empty skb from pool to prod and map it */
1200         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1202                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1203         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205         /* move partial skb from cons to pool (don't unmap yet) */
1206         fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208         /* mark bin state as start - print error if current state != stop */
1209         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212         fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214         /* point prod_bd to new skb */
1215         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218 #ifdef BNX2X_STOP_ON_ERROR
1219         fp->tpa_queue_used |= (1 << queue);
1220 #ifdef __powerpc64__
1221         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222 #else
1223         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224 #endif
1225            fp->tpa_queue_used);
1226 #endif
1227 }
1228
1229 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230                                struct sk_buff *skb,
1231                                struct eth_fast_path_rx_cqe *fp_cqe,
1232                                u16 cqe_idx)
1233 {
1234         struct sw_rx_page *rx_pg, old_rx_pg;
1235         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236         u32 i, frag_len, frag_size, pages;
1237         int err;
1238         int j;
1239
1240         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1241         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1242
1243         /* This is needed in order to enable forwarding support */
1244         if (frag_size)
1245                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1246                                                max(frag_size, (u32)len_on_bd));
1247
1248 #ifdef BNX2X_STOP_ON_ERROR
1249         if (pages >
1250             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1251                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252                           pages, cqe_idx);
1253                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1254                           fp_cqe->pkt_len, len_on_bd);
1255                 bnx2x_panic();
1256                 return -EINVAL;
1257         }
1258 #endif
1259
1260         /* Run through the SGL and compose the fragmented skb */
1261         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264                 /* FW gives the indices of the SGE as if the ring is an array
1265                    (meaning that "next" element will consume 2 indices) */
1266                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1267                 rx_pg = &fp->rx_page_ring[sge_idx];
1268                 old_rx_pg = *rx_pg;
1269
1270                 /* If we fail to allocate a substitute page, we simply stop
1271                    where we are and drop the whole packet */
1272                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273                 if (unlikely(err)) {
1274                         fp->eth_q_stats.rx_skb_alloc_failed++;
1275                         return err;
1276                 }
1277
1278                 /* Unmap the page as we r going to pass it to the stack */
1279                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1280                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1281
1282                 /* Add one frag and update the appropriate fields in the skb */
1283                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285                 skb->data_len += frag_len;
1286                 skb->truesize += frag_len;
1287                 skb->len += frag_len;
1288
1289                 frag_size -= frag_len;
1290         }
1291
1292         return 0;
1293 }
1294
1295 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297                            u16 cqe_idx)
1298 {
1299         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300         struct sk_buff *skb = rx_buf->skb;
1301         /* alloc new skb */
1302         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304         /* Unmap skb in the pool anyway, as we are going to change
1305            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306            fails. */
1307         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1308                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1309
1310         if (likely(new_skb)) {
1311                 /* fix ip xsum and give it to the stack */
1312                 /* (no need to map the new skb) */
1313 #ifdef BCM_VLAN
1314                 int is_vlan_cqe =
1315                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316                          PARSING_FLAGS_VLAN);
1317                 int is_not_hwaccel_vlan_cqe =
1318                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319 #endif
1320
1321                 prefetch(skb);
1322                 prefetch(((char *)(skb)) + 128);
1323
1324 #ifdef BNX2X_STOP_ON_ERROR
1325                 if (pad + len > bp->rx_buf_size) {
1326                         BNX2X_ERR("skb_put is about to fail...  "
1327                                   "pad %d  len %d  rx_buf_size %d\n",
1328                                   pad, len, bp->rx_buf_size);
1329                         bnx2x_panic();
1330                         return;
1331                 }
1332 #endif
1333
1334                 skb_reserve(skb, pad);
1335                 skb_put(skb, len);
1336
1337                 skb->protocol = eth_type_trans(skb, bp->dev);
1338                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339
1340                 {
1341                         struct iphdr *iph;
1342
1343                         iph = (struct iphdr *)skb->data;
1344 #ifdef BCM_VLAN
1345                         /* If there is no Rx VLAN offloading -
1346                            take VLAN tag into an account */
1347                         if (unlikely(is_not_hwaccel_vlan_cqe))
1348                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1349 #endif
1350                         iph->check = 0;
1351                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1352                 }
1353
1354                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1355                                          &cqe->fast_path_cqe, cqe_idx)) {
1356 #ifdef BCM_VLAN
1357                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1358                             (!is_not_hwaccel_vlan_cqe))
1359                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1360                                                 le16_to_cpu(cqe->fast_path_cqe.
1361                                                             vlan_tag));
1362                         else
1363 #endif
1364                                 netif_receive_skb(skb);
1365                 } else {
1366                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1367                            " - dropping packet!\n");
1368                         dev_kfree_skb(skb);
1369                 }
1370
1371
1372                 /* put new skb in bin */
1373                 fp->tpa_pool[queue].skb = new_skb;
1374
1375         } else {
1376                 /* else drop the packet and keep the buffer in the bin */
1377                 DP(NETIF_MSG_RX_STATUS,
1378                    "Failed to allocate new skb - dropping packet!\n");
1379                 fp->eth_q_stats.rx_skb_alloc_failed++;
1380         }
1381
1382         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1383 }
1384
1385 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1386                                         struct bnx2x_fastpath *fp,
1387                                         u16 bd_prod, u16 rx_comp_prod,
1388                                         u16 rx_sge_prod)
1389 {
1390         struct ustorm_eth_rx_producers rx_prods = {0};
1391         int i;
1392
1393         /* Update producers */
1394         rx_prods.bd_prod = bd_prod;
1395         rx_prods.cqe_prod = rx_comp_prod;
1396         rx_prods.sge_prod = rx_sge_prod;
1397
1398         /*
1399          * Make sure that the BD and SGE data is updated before updating the
1400          * producers since FW might read the BD/SGE right after the producer
1401          * is updated.
1402          * This is only applicable for weak-ordered memory model archs such
1403          * as IA-64. The following barrier is also mandatory since FW will
1404          * assumes BDs must have buffers.
1405          */
1406         wmb();
1407
1408         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1409                 REG_WR(bp, BAR_USTRORM_INTMEM +
1410                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1411                        ((u32 *)&rx_prods)[i]);
1412
1413         mmiowb(); /* keep prod updates ordered */
1414
1415         DP(NETIF_MSG_RX_STATUS,
1416            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1417            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1418 }
1419
1420 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421 {
1422         struct bnx2x *bp = fp->bp;
1423         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1424         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1425         int rx_pkt = 0;
1426
1427 #ifdef BNX2X_STOP_ON_ERROR
1428         if (unlikely(bp->panic))
1429                 return 0;
1430 #endif
1431
1432         /* CQ "next element" is of the size of the regular element,
1433            that's why it's ok here */
1434         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1435         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1436                 hw_comp_cons++;
1437
1438         bd_cons = fp->rx_bd_cons;
1439         bd_prod = fp->rx_bd_prod;
1440         bd_prod_fw = bd_prod;
1441         sw_comp_cons = fp->rx_comp_cons;
1442         sw_comp_prod = fp->rx_comp_prod;
1443
1444         /* Memory barrier necessary as speculative reads of the rx
1445          * buffer can be ahead of the index in the status block
1446          */
1447         rmb();
1448
1449         DP(NETIF_MSG_RX_STATUS,
1450            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1451            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1452
1453         while (sw_comp_cons != hw_comp_cons) {
1454                 struct sw_rx_bd *rx_buf = NULL;
1455                 struct sk_buff *skb;
1456                 union eth_rx_cqe *cqe;
1457                 u8 cqe_fp_flags;
1458                 u16 len, pad;
1459
1460                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1461                 bd_prod = RX_BD(bd_prod);
1462                 bd_cons = RX_BD(bd_cons);
1463
1464                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1465                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1466
1467                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1468                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1469                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1470                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1471                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1472                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1473
1474                 /* is this a slowpath msg? */
1475                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1476                         bnx2x_sp_event(fp, cqe);
1477                         goto next_cqe;
1478
1479                 /* this is an rx packet */
1480                 } else {
1481                         rx_buf = &fp->rx_buf_ring[bd_cons];
1482                         skb = rx_buf->skb;
1483                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1484                         pad = cqe->fast_path_cqe.placement_offset;
1485
1486                         /* If CQE is marked both TPA_START and TPA_END
1487                            it is a non-TPA CQE */
1488                         if ((!fp->disable_tpa) &&
1489                             (TPA_TYPE(cqe_fp_flags) !=
1490                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1491                                 u16 queue = cqe->fast_path_cqe.queue_index;
1492
1493                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1494                                         DP(NETIF_MSG_RX_STATUS,
1495                                            "calling tpa_start on queue %d\n",
1496                                            queue);
1497
1498                                         bnx2x_tpa_start(fp, queue, skb,
1499                                                         bd_cons, bd_prod);
1500                                         goto next_rx;
1501                                 }
1502
1503                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1504                                         DP(NETIF_MSG_RX_STATUS,
1505                                            "calling tpa_stop on queue %d\n",
1506                                            queue);
1507
1508                                         if (!BNX2X_RX_SUM_FIX(cqe))
1509                                                 BNX2X_ERR("STOP on none TCP "
1510                                                           "data\n");
1511
1512                                         /* This is a size of the linear data
1513                                            on this skb */
1514                                         len = le16_to_cpu(cqe->fast_path_cqe.
1515                                                                 len_on_bd);
1516                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1517                                                     len, cqe, comp_ring_cons);
1518 #ifdef BNX2X_STOP_ON_ERROR
1519                                         if (bp->panic)
1520                                                 return -EINVAL;
1521 #endif
1522
1523                                         bnx2x_update_sge_prod(fp,
1524                                                         &cqe->fast_path_cqe);
1525                                         goto next_cqe;
1526                                 }
1527                         }
1528
1529                         pci_dma_sync_single_for_device(bp->pdev,
1530                                         pci_unmap_addr(rx_buf, mapping),
1531                                                        pad + RX_COPY_THRESH,
1532                                                        PCI_DMA_FROMDEVICE);
1533                         prefetch(skb);
1534                         prefetch(((char *)(skb)) + 128);
1535
1536                         /* is this an error packet? */
1537                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1538                                 DP(NETIF_MSG_RX_ERR,
1539                                    "ERROR  flags %x  rx packet %u\n",
1540                                    cqe_fp_flags, sw_comp_cons);
1541                                 fp->eth_q_stats.rx_err_discard_pkt++;
1542                                 goto reuse_rx;
1543                         }
1544
1545                         /* Since we don't have a jumbo ring
1546                          * copy small packets if mtu > 1500
1547                          */
1548                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1549                             (len <= RX_COPY_THRESH)) {
1550                                 struct sk_buff *new_skb;
1551
1552                                 new_skb = netdev_alloc_skb(bp->dev,
1553                                                            len + pad);
1554                                 if (new_skb == NULL) {
1555                                         DP(NETIF_MSG_RX_ERR,
1556                                            "ERROR  packet dropped "
1557                                            "because of alloc failure\n");
1558                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1559                                         goto reuse_rx;
1560                                 }
1561
1562                                 /* aligned copy */
1563                                 skb_copy_from_linear_data_offset(skb, pad,
1564                                                     new_skb->data + pad, len);
1565                                 skb_reserve(new_skb, pad);
1566                                 skb_put(new_skb, len);
1567
1568                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569
1570                                 skb = new_skb;
1571
1572                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1573                                 pci_unmap_single(bp->pdev,
1574                                         pci_unmap_addr(rx_buf, mapping),
1575                                                  bp->rx_buf_size,
1576                                                  PCI_DMA_FROMDEVICE);
1577                                 skb_reserve(skb, pad);
1578                                 skb_put(skb, len);
1579
1580                         } else {
1581                                 DP(NETIF_MSG_RX_ERR,
1582                                    "ERROR  packet dropped because "
1583                                    "of alloc failure\n");
1584                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 reuse_rx:
1586                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1587                                 goto next_rx;
1588                         }
1589
1590                         skb->protocol = eth_type_trans(skb, bp->dev);
1591
1592                         skb->ip_summed = CHECKSUM_NONE;
1593                         if (bp->rx_csum) {
1594                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1595                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1596                                 else
1597                                         fp->eth_q_stats.hw_csum_err++;
1598                         }
1599                 }
1600
1601                 skb_record_rx_queue(skb, fp->index);
1602 #ifdef BCM_VLAN
1603                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1604                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605                      PARSING_FLAGS_VLAN))
1606                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608                 else
1609 #endif
1610                         netif_receive_skb(skb);
1611
1612
1613 next_rx:
1614                 rx_buf->skb = NULL;
1615
1616                 bd_cons = NEXT_RX_IDX(bd_cons);
1617                 bd_prod = NEXT_RX_IDX(bd_prod);
1618                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619                 rx_pkt++;
1620 next_cqe:
1621                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1623
1624                 if (rx_pkt == budget)
1625                         break;
1626         } /* while */
1627
1628         fp->rx_bd_cons = bd_cons;
1629         fp->rx_bd_prod = bd_prod_fw;
1630         fp->rx_comp_cons = sw_comp_cons;
1631         fp->rx_comp_prod = sw_comp_prod;
1632
1633         /* Update producers */
1634         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635                              fp->rx_sge_prod);
1636
1637         fp->rx_pkt += rx_pkt;
1638         fp->rx_calls++;
1639
1640         return rx_pkt;
1641 }
1642
1643 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644 {
1645         struct bnx2x_fastpath *fp = fp_cookie;
1646         struct bnx2x *bp = fp->bp;
1647         int index = FP_IDX(fp);
1648
1649         /* Return here if interrupt is disabled */
1650         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652                 return IRQ_HANDLED;
1653         }
1654
1655         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656            index, FP_SB_ID(fp));
1657         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1658
1659 #ifdef BNX2X_STOP_ON_ERROR
1660         if (unlikely(bp->panic))
1661                 return IRQ_HANDLED;
1662 #endif
1663
1664         prefetch(fp->rx_cons_sb);
1665         prefetch(fp->tx_cons_sb);
1666         prefetch(&fp->status_blk->c_status_block.status_block_index);
1667         prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
1669         napi_schedule(&bnx2x_fp(bp, index, napi));
1670
1671         return IRQ_HANDLED;
1672 }
1673
1674 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675 {
1676         struct bnx2x *bp = netdev_priv(dev_instance);
1677         u16 status = bnx2x_ack_int(bp);
1678         u16 mask;
1679
1680         /* Return here if interrupt is shared and it's not for us */
1681         if (unlikely(status == 0)) {
1682                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683                 return IRQ_NONE;
1684         }
1685         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1686
1687         /* Return here if interrupt is disabled */
1688         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690                 return IRQ_HANDLED;
1691         }
1692
1693 #ifdef BNX2X_STOP_ON_ERROR
1694         if (unlikely(bp->panic))
1695                 return IRQ_HANDLED;
1696 #endif
1697
1698         mask = 0x2 << bp->fp[0].sb_id;
1699         if (status & mask) {
1700                 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702                 prefetch(fp->rx_cons_sb);
1703                 prefetch(fp->tx_cons_sb);
1704                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
1707                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1708
1709                 status &= ~mask;
1710         }
1711
1712
1713         if (unlikely(status & 0x1)) {
1714                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1715
1716                 status &= ~0x1;
1717                 if (!status)
1718                         return IRQ_HANDLED;
1719         }
1720
1721         if (status)
1722                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723                    status);
1724
1725         return IRQ_HANDLED;
1726 }
1727
1728 /* end of fast path */
1729
1730 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1731
1732 /* Link */
1733
1734 /*
1735  * General service functions
1736  */
1737
1738 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1739 {
1740         u32 lock_status;
1741         u32 resource_bit = (1 << resource);
1742         int func = BP_FUNC(bp);
1743         u32 hw_lock_control_reg;
1744         int cnt;
1745
1746         /* Validating that the resource is within range */
1747         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748                 DP(NETIF_MSG_HW,
1749                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751                 return -EINVAL;
1752         }
1753
1754         if (func <= 5) {
1755                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756         } else {
1757                 hw_lock_control_reg =
1758                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759         }
1760
1761         /* Validating that the resource is not already taken */
1762         lock_status = REG_RD(bp, hw_lock_control_reg);
1763         if (lock_status & resource_bit) {
1764                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1765                    lock_status, resource_bit);
1766                 return -EEXIST;
1767         }
1768
1769         /* Try for 5 second every 5ms */
1770         for (cnt = 0; cnt < 1000; cnt++) {
1771                 /* Try to acquire the lock */
1772                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773                 lock_status = REG_RD(bp, hw_lock_control_reg);
1774                 if (lock_status & resource_bit)
1775                         return 0;
1776
1777                 msleep(5);
1778         }
1779         DP(NETIF_MSG_HW, "Timeout\n");
1780         return -EAGAIN;
1781 }
1782
1783 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1784 {
1785         u32 lock_status;
1786         u32 resource_bit = (1 << resource);
1787         int func = BP_FUNC(bp);
1788         u32 hw_lock_control_reg;
1789
1790         /* Validating that the resource is within range */
1791         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792                 DP(NETIF_MSG_HW,
1793                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795                 return -EINVAL;
1796         }
1797
1798         if (func <= 5) {
1799                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800         } else {
1801                 hw_lock_control_reg =
1802                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803         }
1804
1805         /* Validating that the resource is currently taken */
1806         lock_status = REG_RD(bp, hw_lock_control_reg);
1807         if (!(lock_status & resource_bit)) {
1808                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1809                    lock_status, resource_bit);
1810                 return -EFAULT;
1811         }
1812
1813         REG_WR(bp, hw_lock_control_reg, resource_bit);
1814         return 0;
1815 }
1816
1817 /* HW Lock for shared dual port PHYs */
1818 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1819 {
1820         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1821
1822         mutex_lock(&bp->port.phy_mutex);
1823
1824         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1826                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1827 }
1828
1829 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1830 {
1831         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1832
1833         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1835                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1836
1837         mutex_unlock(&bp->port.phy_mutex);
1838 }
1839
1840 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1841 {
1842         /* The GPIO should be swapped if swap register is set and active */
1843         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1844                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1845         int gpio_shift = gpio_num +
1846                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847         u32 gpio_mask = (1 << gpio_shift);
1848         u32 gpio_reg;
1849
1850         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1856         /* read GPIO and mask except the float bits */
1857         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862                    gpio_num, gpio_shift);
1863                 /* clear FLOAT and set CLR */
1864                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866                 break;
1867
1868         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870                    gpio_num, gpio_shift);
1871                 /* clear FLOAT and set SET */
1872                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874                 break;
1875
1876         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1877                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878                    gpio_num, gpio_shift);
1879                 /* set FLOAT */
1880                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881                 break;
1882
1883         default:
1884                 break;
1885         }
1886
1887         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1888         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1889
1890         return 0;
1891 }
1892
1893 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1894 {
1895         u32 spio_mask = (1 << spio_num);
1896         u32 spio_reg;
1897
1898         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899             (spio_num > MISC_REGISTERS_SPIO_7)) {
1900                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901                 return -EINVAL;
1902         }
1903
1904         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1905         /* read SPIO and mask except the float bits */
1906         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1907
1908         switch (mode) {
1909         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1910                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911                 /* clear FLOAT and set CLR */
1912                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914                 break;
1915
1916         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1917                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918                 /* clear FLOAT and set SET */
1919                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921                 break;
1922
1923         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925                 /* set FLOAT */
1926                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927                 break;
1928
1929         default:
1930                 break;
1931         }
1932
1933         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1934         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1935
1936         return 0;
1937 }
1938
1939 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1940 {
1941         switch (bp->link_vars.ieee_fc &
1942                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1943         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1944                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1945                                           ADVERTISED_Pause);
1946                 break;
1947         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1948                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1949                                          ADVERTISED_Pause);
1950                 break;
1951         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1952                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1953                 break;
1954         default:
1955                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1956                                           ADVERTISED_Pause);
1957                 break;
1958         }
1959 }
1960
1961 static void bnx2x_link_report(struct bnx2x *bp)
1962 {
1963         if (bp->link_vars.link_up) {
1964                 if (bp->state == BNX2X_STATE_OPEN)
1965                         netif_carrier_on(bp->dev);
1966                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1967
1968                 printk("%d Mbps ", bp->link_vars.line_speed);
1969
1970                 if (bp->link_vars.duplex == DUPLEX_FULL)
1971                         printk("full duplex");
1972                 else
1973                         printk("half duplex");
1974
1975                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1977                                 printk(", receive ");
1978                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1979                                         printk("& transmit ");
1980                         } else {
1981                                 printk(", transmit ");
1982                         }
1983                         printk("flow control ON");
1984                 }
1985                 printk("\n");
1986
1987         } else { /* link_down */
1988                 netif_carrier_off(bp->dev);
1989                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1990         }
1991 }
1992
1993 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994 {
1995         if (!BP_NOMCP(bp)) {
1996                 u8 rc;
1997
1998                 /* Initialize link parameters structure variables */
1999                 /* It is recommended to turn off RX FC for jumbo frames
2000                    for better performance */
2001                 if (IS_E1HMF(bp))
2002                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2003                 else if (bp->dev->mtu > 5000)
2004                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2005                 else
2006                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2007
2008                 bnx2x_acquire_phy_lock(bp);
2009                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2010                 bnx2x_release_phy_lock(bp);
2011
2012                 bnx2x_calc_fc_adv(bp);
2013
2014                 if (bp->link_vars.link_up)
2015                         bnx2x_link_report(bp);
2016
2017
2018                 return rc;
2019         }
2020         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021         return -EINVAL;
2022 }
2023
2024 static void bnx2x_link_set(struct bnx2x *bp)
2025 {
2026         if (!BP_NOMCP(bp)) {
2027                 bnx2x_acquire_phy_lock(bp);
2028                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2029                 bnx2x_release_phy_lock(bp);
2030
2031                 bnx2x_calc_fc_adv(bp);
2032         } else
2033                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2034 }
2035
2036 static void bnx2x__link_reset(struct bnx2x *bp)
2037 {
2038         if (!BP_NOMCP(bp)) {
2039                 bnx2x_acquire_phy_lock(bp);
2040                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2041                 bnx2x_release_phy_lock(bp);
2042         } else
2043                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2044 }
2045
2046 static u8 bnx2x_link_test(struct bnx2x *bp)
2047 {
2048         u8 rc;
2049
2050         bnx2x_acquire_phy_lock(bp);
2051         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2052         bnx2x_release_phy_lock(bp);
2053
2054         return rc;
2055 }
2056
2057 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2058 {
2059         u32 r_param = bp->link_vars.line_speed / 8;
2060         u32 fair_periodic_timeout_usec;
2061         u32 t_fair;
2062
2063         memset(&(bp->cmng.rs_vars), 0,
2064                sizeof(struct rate_shaping_vars_per_port));
2065         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2066
2067         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2068         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2069
2070         /* this is the threshold below which no timer arming will occur
2071            1.25 coefficient is for the threshold to be a little bigger
2072            than the real time, to compensate for timer in-accuracy */
2073         bp->cmng.rs_vars.rs_threshold =
2074                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2075
2076         /* resolution of fairness timer */
2077         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2078         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2079         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2080
2081         /* this is the threshold below which we won't arm the timer anymore */
2082         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2083
2084         /* we multiply by 1e3/8 to get bytes/msec.
2085            We don't want the credits to pass a credit
2086            of the t_fair*FAIR_MEM (algorithm resolution) */
2087         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2088         /* since each tick is 4 usec */
2089         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2090 }
2091
2092 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2093 {
2094         struct rate_shaping_vars_per_vn m_rs_vn;
2095         struct fairness_vars_per_vn m_fair_vn;
2096         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2097         u16 vn_min_rate, vn_max_rate;
2098         int i;
2099
2100         /* If function is hidden - set min and max to zeroes */
2101         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2102                 vn_min_rate = 0;
2103                 vn_max_rate = 0;
2104
2105         } else {
2106                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2107                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2108                 /* If fairness is enabled (not all min rates are zeroes) and
2109                    if current min rate is zero - set it to 1.
2110                    This is a requirement of the algorithm. */
2111                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2112                         vn_min_rate = DEF_MIN_RATE;
2113                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2114                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2115         }
2116
2117         DP(NETIF_MSG_IFUP,
2118            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2119            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2120
2121         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2122         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2123
2124         /* global vn counter - maximal Mbps for this vn */
2125         m_rs_vn.vn_counter.rate = vn_max_rate;
2126
2127         /* quota - number of bytes transmitted in this period */
2128         m_rs_vn.vn_counter.quota =
2129                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2130
2131         if (bp->vn_weight_sum) {
2132                 /* credit for each period of the fairness algorithm:
2133                    number of bytes in T_FAIR (the vn share the port rate).
2134                    vn_weight_sum should not be larger than 10000, thus
2135                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2136                    than zero */
2137                 m_fair_vn.vn_credit_delta =
2138                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2139                                                  (8 * bp->vn_weight_sum))),
2140                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2141                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2142                    m_fair_vn.vn_credit_delta);
2143         }
2144
2145         /* Store it to internal memory */
2146         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2147                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2148                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2149                        ((u32 *)(&m_rs_vn))[i]);
2150
2151         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2152                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2154                        ((u32 *)(&m_fair_vn))[i]);
2155 }
2156
2157
2158 /* This function is called upon link interrupt */
2159 static void bnx2x_link_attn(struct bnx2x *bp)
2160 {
2161         /* Make sure that we are synced with the current statistics */
2162         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2163
2164         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2165
2166         if (bp->link_vars.link_up) {
2167
2168                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2169                         struct host_port_stats *pstats;
2170
2171                         pstats = bnx2x_sp(bp, port_stats);
2172                         /* reset old bmac stats */
2173                         memset(&(pstats->mac_stx[0]), 0,
2174                                sizeof(struct mac_stx));
2175                 }
2176                 if ((bp->state == BNX2X_STATE_OPEN) ||
2177                     (bp->state == BNX2X_STATE_DISABLED))
2178                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2179         }
2180
2181         /* indicate link status */
2182         bnx2x_link_report(bp);
2183
2184         if (IS_E1HMF(bp)) {
2185                 int port = BP_PORT(bp);
2186                 int func;
2187                 int vn;
2188
2189                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2190                         if (vn == BP_E1HVN(bp))
2191                                 continue;
2192
2193                         func = ((vn << 1) | port);
2194
2195                         /* Set the attention towards other drivers
2196                            on the same port */
2197                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2198                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2199                 }
2200
2201                 if (bp->link_vars.link_up) {
2202                         int i;
2203
2204                         /* Init rate shaping and fairness contexts */
2205                         bnx2x_init_port_minmax(bp);
2206
2207                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2208                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2209
2210                         /* Store it to internal memory */
2211                         for (i = 0;
2212                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2213                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2215                                        ((u32 *)(&bp->cmng))[i]);
2216                 }
2217         }
2218 }
2219
2220 static void bnx2x__link_status_update(struct bnx2x *bp)
2221 {
2222         if (bp->state != BNX2X_STATE_OPEN)
2223                 return;
2224
2225         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2226
2227         if (bp->link_vars.link_up)
2228                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2229         else
2230                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2231
2232         /* indicate link status */
2233         bnx2x_link_report(bp);
2234 }
2235
2236 static void bnx2x_pmf_update(struct bnx2x *bp)
2237 {
2238         int port = BP_PORT(bp);
2239         u32 val;
2240
2241         bp->port.pmf = 1;
2242         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2243
2244         /* enable nig attention */
2245         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2246         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2247         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2248
2249         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2250 }
2251
2252 /* end of Link */
2253
2254 /* slow path */
2255
2256 /*
2257  * General service functions
2258  */
2259
2260 /* the slow path queue is odd since completions arrive on the fastpath ring */
2261 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2262                          u32 data_hi, u32 data_lo, int common)
2263 {
2264         int func = BP_FUNC(bp);
2265
2266         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2267            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2268            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2269            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2270            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2271
2272 #ifdef BNX2X_STOP_ON_ERROR
2273         if (unlikely(bp->panic))
2274                 return -EIO;
2275 #endif
2276
2277         spin_lock_bh(&bp->spq_lock);
2278
2279         if (!bp->spq_left) {
2280                 BNX2X_ERR("BUG! SPQ ring full!\n");
2281                 spin_unlock_bh(&bp->spq_lock);
2282                 bnx2x_panic();
2283                 return -EBUSY;
2284         }
2285
2286         /* CID needs port number to be encoded int it */
2287         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2288                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2289                                      HW_CID(bp, cid)));
2290         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2291         if (common)
2292                 bp->spq_prod_bd->hdr.type |=
2293                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2294
2295         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2296         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2297
2298         bp->spq_left--;
2299
2300         if (bp->spq_prod_bd == bp->spq_last_bd) {
2301                 bp->spq_prod_bd = bp->spq;
2302                 bp->spq_prod_idx = 0;
2303                 DP(NETIF_MSG_TIMER, "end of spq\n");
2304
2305         } else {
2306                 bp->spq_prod_bd++;
2307                 bp->spq_prod_idx++;
2308         }
2309
2310         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2311                bp->spq_prod_idx);
2312
2313         spin_unlock_bh(&bp->spq_lock);
2314         return 0;
2315 }
2316
2317 /* acquire split MCP access lock register */
2318 static int bnx2x_acquire_alr(struct bnx2x *bp)
2319 {
2320         u32 i, j, val;
2321         int rc = 0;
2322
2323         might_sleep();
2324         i = 100;
2325         for (j = 0; j < i*10; j++) {
2326                 val = (1UL << 31);
2327                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2328                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2329                 if (val & (1L << 31))
2330                         break;
2331
2332                 msleep(5);
2333         }
2334         if (!(val & (1L << 31))) {
2335                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2336                 rc = -EBUSY;
2337         }
2338
2339         return rc;
2340 }
2341
2342 /* release split MCP access lock register */
2343 static void bnx2x_release_alr(struct bnx2x *bp)
2344 {
2345         u32 val = 0;
2346
2347         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2348 }
2349
2350 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2351 {
2352         struct host_def_status_block *def_sb = bp->def_status_blk;
2353         u16 rc = 0;
2354
2355         barrier(); /* status block is written to by the chip */
2356         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2357                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2358                 rc |= 1;
2359         }
2360         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2361                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2362                 rc |= 2;
2363         }
2364         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2365                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2366                 rc |= 4;
2367         }
2368         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2369                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2370                 rc |= 8;
2371         }
2372         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2373                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2374                 rc |= 16;
2375         }
2376         return rc;
2377 }
2378
2379 /*
2380  * slow path service functions
2381  */
2382
2383 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2384 {
2385         int port = BP_PORT(bp);
2386         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2387                        COMMAND_REG_ATTN_BITS_SET);
2388         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2389                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2390         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2391                                        NIG_REG_MASK_INTERRUPT_PORT0;
2392         u32 aeu_mask;
2393
2394         if (bp->attn_state & asserted)
2395                 BNX2X_ERR("IGU ERROR\n");
2396
2397         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2398         aeu_mask = REG_RD(bp, aeu_addr);
2399
2400         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2401            aeu_mask, asserted);
2402         aeu_mask &= ~(asserted & 0xff);
2403         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2404
2405         REG_WR(bp, aeu_addr, aeu_mask);
2406         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2407
2408         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2409         bp->attn_state |= asserted;
2410         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2411
2412         if (asserted & ATTN_HARD_WIRED_MASK) {
2413                 if (asserted & ATTN_NIG_FOR_FUNC) {
2414
2415                         bnx2x_acquire_phy_lock(bp);
2416
2417                         /* save nig interrupt mask */
2418                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2419                         REG_WR(bp, nig_int_mask_addr, 0);
2420
2421                         bnx2x_link_attn(bp);
2422
2423                         /* handle unicore attn? */
2424                 }
2425                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2426                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2427
2428                 if (asserted & GPIO_2_FUNC)
2429                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2430
2431                 if (asserted & GPIO_3_FUNC)
2432                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2433
2434                 if (asserted & GPIO_4_FUNC)
2435                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2436
2437                 if (port == 0) {
2438                         if (asserted & ATTN_GENERAL_ATTN_1) {
2439                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2440                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2441                         }
2442                         if (asserted & ATTN_GENERAL_ATTN_2) {
2443                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2444                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2445                         }
2446                         if (asserted & ATTN_GENERAL_ATTN_3) {
2447                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2448                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2449                         }
2450                 } else {
2451                         if (asserted & ATTN_GENERAL_ATTN_4) {
2452                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2453                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2454                         }
2455                         if (asserted & ATTN_GENERAL_ATTN_5) {
2456                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2457                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2458                         }
2459                         if (asserted & ATTN_GENERAL_ATTN_6) {
2460                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2461                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2462                         }
2463                 }
2464
2465         } /* if hardwired */
2466
2467         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2468            asserted, hc_addr);
2469         REG_WR(bp, hc_addr, asserted);
2470
2471         /* now set back the mask */
2472         if (asserted & ATTN_NIG_FOR_FUNC) {
2473                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2474                 bnx2x_release_phy_lock(bp);
2475         }
2476 }
2477
2478 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2479 {
2480         int port = BP_PORT(bp);
2481         int reg_offset;
2482         u32 val;
2483
2484         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2485                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2486
2487         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2488
2489                 val = REG_RD(bp, reg_offset);
2490                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2491                 REG_WR(bp, reg_offset, val);
2492
2493                 BNX2X_ERR("SPIO5 hw attention\n");
2494
2495                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2496                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2497                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2498                         /* Fan failure attention */
2499
2500                         /* The PHY reset is controlled by GPIO 1 */
2501                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2502                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2503                         /* Low power mode is controlled by GPIO 2 */
2504                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2505                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2506                         /* mark the failure */
2507                         bp->link_params.ext_phy_config &=
2508                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2509                         bp->link_params.ext_phy_config |=
2510                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2511                         SHMEM_WR(bp,
2512                                  dev_info.port_hw_config[port].
2513                                                         external_phy_config,
2514                                  bp->link_params.ext_phy_config);
2515                         /* log the failure */
2516                         printk(KERN_ERR PFX "Fan Failure on Network"
2517                                " Controller %s has caused the driver to"
2518                                " shutdown the card to prevent permanent"
2519                                " damage.  Please contact Dell Support for"
2520                                " assistance\n", bp->dev->name);
2521                         break;
2522
2523                 default:
2524                         break;
2525                 }
2526         }
2527
2528         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2529
2530                 val = REG_RD(bp, reg_offset);
2531                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2532                 REG_WR(bp, reg_offset, val);
2533
2534                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2535                           (attn & HW_INTERRUT_ASSERT_SET_0));
2536                 bnx2x_panic();
2537         }
2538 }
2539
2540 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2541 {
2542         u32 val;
2543
2544         if (attn & BNX2X_DOORQ_ASSERT) {
2545
2546                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2547                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2548                 /* DORQ discard attention */
2549                 if (val & 0x2)
2550                         BNX2X_ERR("FATAL error from DORQ\n");
2551         }
2552
2553         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2554
2555                 int port = BP_PORT(bp);
2556                 int reg_offset;
2557
2558                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2559                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2560
2561                 val = REG_RD(bp, reg_offset);
2562                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2563                 REG_WR(bp, reg_offset, val);
2564
2565                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2566                           (attn & HW_INTERRUT_ASSERT_SET_1));
2567                 bnx2x_panic();
2568         }
2569 }
2570
2571 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2572 {
2573         u32 val;
2574
2575         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2576
2577                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2578                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2579                 /* CFC error attention */
2580                 if (val & 0x2)
2581                         BNX2X_ERR("FATAL error from CFC\n");
2582         }
2583
2584         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2585
2586                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2587                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2588                 /* RQ_USDMDP_FIFO_OVERFLOW */
2589                 if (val & 0x18000)
2590                         BNX2X_ERR("FATAL error from PXP\n");
2591         }
2592
2593         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2594
2595                 int port = BP_PORT(bp);
2596                 int reg_offset;
2597
2598                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2599                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2600
2601                 val = REG_RD(bp, reg_offset);
2602                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2603                 REG_WR(bp, reg_offset, val);
2604
2605                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2606                           (attn & HW_INTERRUT_ASSERT_SET_2));
2607                 bnx2x_panic();
2608         }
2609 }
2610
2611 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2612 {
2613         u32 val;
2614
2615         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2616
2617                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2618                         int func = BP_FUNC(bp);
2619
2620                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2621                         bnx2x__link_status_update(bp);
2622                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2623                                                         DRV_STATUS_PMF)
2624                                 bnx2x_pmf_update(bp);
2625
2626                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2627
2628                         BNX2X_ERR("MC assert!\n");
2629                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2630                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2631                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2632                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2633                         bnx2x_panic();
2634
2635                 } else if (attn & BNX2X_MCP_ASSERT) {
2636
2637                         BNX2X_ERR("MCP assert!\n");
2638                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2639                         bnx2x_fw_dump(bp);
2640
2641                 } else
2642                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2643         }
2644
2645         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2646                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2647                 if (attn & BNX2X_GRC_TIMEOUT) {
2648                         val = CHIP_IS_E1H(bp) ?
2649                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2650                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2651                 }
2652                 if (attn & BNX2X_GRC_RSV) {
2653                         val = CHIP_IS_E1H(bp) ?
2654                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2655                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2656                 }
2657                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2658         }
2659 }
2660
2661 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2662 {
2663         struct attn_route attn;
2664         struct attn_route group_mask;
2665         int port = BP_PORT(bp);
2666         int index;
2667         u32 reg_addr;
2668         u32 val;
2669         u32 aeu_mask;
2670
2671         /* need to take HW lock because MCP or other port might also
2672            try to handle this event */
2673         bnx2x_acquire_alr(bp);
2674
2675         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2676         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2677         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2678         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2679         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2680            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2681
2682         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2683                 if (deasserted & (1 << index)) {
2684                         group_mask = bp->attn_group[index];
2685
2686                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2687                            index, group_mask.sig[0], group_mask.sig[1],
2688                            group_mask.sig[2], group_mask.sig[3]);
2689
2690                         bnx2x_attn_int_deasserted3(bp,
2691                                         attn.sig[3] & group_mask.sig[3]);
2692                         bnx2x_attn_int_deasserted1(bp,
2693                                         attn.sig[1] & group_mask.sig[1]);
2694                         bnx2x_attn_int_deasserted2(bp,
2695                                         attn.sig[2] & group_mask.sig[2]);
2696                         bnx2x_attn_int_deasserted0(bp,
2697                                         attn.sig[0] & group_mask.sig[0]);
2698
2699                         if ((attn.sig[0] & group_mask.sig[0] &
2700                                                 HW_PRTY_ASSERT_SET_0) ||
2701                             (attn.sig[1] & group_mask.sig[1] &
2702                                                 HW_PRTY_ASSERT_SET_1) ||
2703                             (attn.sig[2] & group_mask.sig[2] &
2704                                                 HW_PRTY_ASSERT_SET_2))
2705                                 BNX2X_ERR("FATAL HW block parity attention\n");
2706                 }
2707         }
2708
2709         bnx2x_release_alr(bp);
2710
2711         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2712
2713         val = ~deasserted;
2714         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2715            val, reg_addr);
2716         REG_WR(bp, reg_addr, val);
2717
2718         if (~bp->attn_state & deasserted)
2719                 BNX2X_ERR("IGU ERROR\n");
2720
2721         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2722                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2723
2724         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2725         aeu_mask = REG_RD(bp, reg_addr);
2726
2727         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2728            aeu_mask, deasserted);
2729         aeu_mask |= (deasserted & 0xff);
2730         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2731
2732         REG_WR(bp, reg_addr, aeu_mask);
2733         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2734
2735         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2736         bp->attn_state &= ~deasserted;
2737         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2738 }
2739
2740 static void bnx2x_attn_int(struct bnx2x *bp)
2741 {
2742         /* read local copy of bits */
2743         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2744                                                                 attn_bits);
2745         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2746                                                                 attn_bits_ack);
2747         u32 attn_state = bp->attn_state;
2748
2749         /* look for changed bits */
2750         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2751         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2752
2753         DP(NETIF_MSG_HW,
2754            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2755            attn_bits, attn_ack, asserted, deasserted);
2756
2757         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2758                 BNX2X_ERR("BAD attention state\n");
2759
2760         /* handle bits that were raised */
2761         if (asserted)
2762                 bnx2x_attn_int_asserted(bp, asserted);
2763
2764         if (deasserted)
2765                 bnx2x_attn_int_deasserted(bp, deasserted);
2766 }
2767
2768 static void bnx2x_sp_task(struct work_struct *work)
2769 {
2770         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2771         u16 status;
2772
2773
2774         /* Return here if interrupt is disabled */
2775         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2776                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2777                 return;
2778         }
2779
2780         status = bnx2x_update_dsb_idx(bp);
2781 /*      if (status == 0)                                     */
2782 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2783
2784         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2785
2786         /* HW attentions */
2787         if (status & 0x1)
2788                 bnx2x_attn_int(bp);
2789
2790         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2791                      IGU_INT_NOP, 1);
2792         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2793                      IGU_INT_NOP, 1);
2794         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2795                      IGU_INT_NOP, 1);
2796         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2797                      IGU_INT_NOP, 1);
2798         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2799                      IGU_INT_ENABLE, 1);
2800
2801 }
2802
2803 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2804 {
2805         struct net_device *dev = dev_instance;
2806         struct bnx2x *bp = netdev_priv(dev);
2807
2808         /* Return here if interrupt is disabled */
2809         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2810                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2811                 return IRQ_HANDLED;
2812         }
2813
2814         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2815
2816 #ifdef BNX2X_STOP_ON_ERROR
2817         if (unlikely(bp->panic))
2818                 return IRQ_HANDLED;
2819 #endif
2820
2821         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2822
2823         return IRQ_HANDLED;
2824 }
2825
2826 /* end of slow path */
2827
2828 /* Statistics */
2829
2830 /****************************************************************************
2831 * Macros
2832 ****************************************************************************/
2833
2834 /* sum[hi:lo] += add[hi:lo] */
2835 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2836         do { \
2837                 s_lo += a_lo; \
2838                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2839         } while (0)
2840
2841 /* difference = minuend - subtrahend */
2842 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2843         do { \
2844                 if (m_lo < s_lo) { \
2845                         /* underflow */ \
2846                         d_hi = m_hi - s_hi; \
2847                         if (d_hi > 0) { \
2848                                 /* we can 'loan' 1 */ \
2849                                 d_hi--; \
2850                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2851                         } else { \
2852                                 /* m_hi <= s_hi */ \
2853                                 d_hi = 0; \
2854                                 d_lo = 0; \
2855                         } \
2856                 } else { \
2857                         /* m_lo >= s_lo */ \
2858                         if (m_hi < s_hi) { \
2859                                 d_hi = 0; \
2860                                 d_lo = 0; \
2861                         } else { \
2862                                 /* m_hi >= s_hi */ \
2863                                 d_hi = m_hi - s_hi; \
2864                                 d_lo = m_lo - s_lo; \
2865                         } \
2866                 } \
2867         } while (0)
2868
2869 #define UPDATE_STAT64(s, t) \
2870         do { \
2871                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2872                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2873                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2874                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2875                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2876                        pstats->mac_stx[1].t##_lo, diff.lo); \
2877         } while (0)
2878
2879 #define UPDATE_STAT64_NIG(s, t) \
2880         do { \
2881                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2882                         diff.lo, new->s##_lo, old->s##_lo); \
2883                 ADD_64(estats->t##_hi, diff.hi, \
2884                        estats->t##_lo, diff.lo); \
2885         } while (0)
2886
2887 /* sum[hi:lo] += add */
2888 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2889         do { \
2890                 s_lo += a; \
2891                 s_hi += (s_lo < a) ? 1 : 0; \
2892         } while (0)
2893
2894 #define UPDATE_EXTEND_STAT(s) \
2895         do { \
2896                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2897                               pstats->mac_stx[1].s##_lo, \
2898                               new->s); \
2899         } while (0)
2900
2901 #define UPDATE_EXTEND_TSTAT(s, t) \
2902         do { \
2903                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2904                 old_tclient->s = le32_to_cpu(tclient->s); \
2905                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2906         } while (0)
2907
2908 #define UPDATE_EXTEND_USTAT(s, t) \
2909         do { \
2910                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2911                 old_uclient->s = uclient->s; \
2912                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2913         } while (0)
2914
2915 #define UPDATE_EXTEND_XSTAT(s, t) \
2916         do { \
2917                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2918                 old_xclient->s = le32_to_cpu(xclient->s); \
2919                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2920         } while (0)
2921
2922 /* minuend -= subtrahend */
2923 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2924         do { \
2925                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2926         } while (0)
2927
2928 /* minuend[hi:lo] -= subtrahend */
2929 #define SUB_EXTEND_64(m_hi, m_lo, s) \
2930         do { \
2931                 SUB_64(m_hi, 0, m_lo, s); \
2932         } while (0)
2933
2934 #define SUB_EXTEND_USTAT(s, t) \
2935         do { \
2936                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2937                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2938         } while (0)
2939
2940 /*
2941  * General service functions
2942  */
2943
2944 static inline long bnx2x_hilo(u32 *hiref)
2945 {
2946         u32 lo = *(hiref + 1);
2947 #if (BITS_PER_LONG == 64)
2948         u32 hi = *hiref;
2949
2950         return HILO_U64(hi, lo);
2951 #else
2952         return lo;
2953 #endif
2954 }
2955
2956 /*
2957  * Init service functions
2958  */
2959
2960 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2961 {
2962         if (!bp->stats_pending) {
2963                 struct eth_query_ramrod_data ramrod_data = {0};
2964                 int i, rc;
2965
2966                 ramrod_data.drv_counter = bp->stats_counter++;
2967                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
2968                 for_each_queue(bp, i)
2969                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
2970
2971                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2972                                    ((u32 *)&ramrod_data)[1],
2973                                    ((u32 *)&ramrod_data)[0], 0);
2974                 if (rc == 0) {
2975                         /* stats ramrod has it's own slot on the spq */
2976                         bp->spq_left++;
2977                         bp->stats_pending = 1;
2978                 }
2979         }
2980 }
2981
2982 static void bnx2x_stats_init(struct bnx2x *bp)
2983 {
2984         int port = BP_PORT(bp);
2985         int i;
2986
2987         bp->stats_pending = 0;
2988         bp->executer_idx = 0;
2989         bp->stats_counter = 0;
2990
2991         /* port stats */
2992         if (!BP_NOMCP(bp))
2993                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
2994         else
2995                 bp->port.port_stx = 0;
2996         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
2997
2998         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
2999         bp->port.old_nig_stats.brb_discard =
3000                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3001         bp->port.old_nig_stats.brb_truncate =
3002                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3003         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3004                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3005         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3006                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3007
3008         /* function stats */
3009         for_each_queue(bp, i) {
3010                 struct bnx2x_fastpath *fp = &bp->fp[i];
3011
3012                 memset(&fp->old_tclient, 0,
3013                        sizeof(struct tstorm_per_client_stats));
3014                 memset(&fp->old_uclient, 0,
3015                        sizeof(struct ustorm_per_client_stats));
3016                 memset(&fp->old_xclient, 0,
3017                        sizeof(struct xstorm_per_client_stats));
3018                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3019         }
3020
3021         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3022         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3023
3024         bp->stats_state = STATS_STATE_DISABLED;
3025         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3026                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3027 }
3028
3029 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3030 {
3031         struct dmae_command *dmae = &bp->stats_dmae;
3032         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3033
3034         *stats_comp = DMAE_COMP_VAL;
3035         if (CHIP_REV_IS_SLOW(bp))
3036                 return;
3037
3038         /* loader */
3039         if (bp->executer_idx) {
3040                 int loader_idx = PMF_DMAE_C(bp);
3041
3042                 memset(dmae, 0, sizeof(struct dmae_command));
3043
3044                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3045                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3046                                 DMAE_CMD_DST_RESET |
3047 #ifdef __BIG_ENDIAN
3048                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3049 #else
3050                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3051 #endif
3052                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3053                                                DMAE_CMD_PORT_0) |
3054                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3055                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3056                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3057                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3058                                      sizeof(struct dmae_command) *
3059                                      (loader_idx + 1)) >> 2;
3060                 dmae->dst_addr_hi = 0;
3061                 dmae->len = sizeof(struct dmae_command) >> 2;
3062                 if (CHIP_IS_E1(bp))
3063                         dmae->len--;
3064                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3065                 dmae->comp_addr_hi = 0;
3066                 dmae->comp_val = 1;
3067
3068                 *stats_comp = 0;
3069                 bnx2x_post_dmae(bp, dmae, loader_idx);
3070
3071         } else if (bp->func_stx) {
3072                 *stats_comp = 0;
3073                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3074         }
3075 }
3076
3077 static int bnx2x_stats_comp(struct bnx2x *bp)
3078 {
3079         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080         int cnt = 10;
3081
3082         might_sleep();
3083         while (*stats_comp != DMAE_COMP_VAL) {
3084                 if (!cnt) {
3085                         BNX2X_ERR("timeout waiting for stats finished\n");
3086                         break;
3087                 }
3088                 cnt--;
3089                 msleep(1);
3090         }
3091         return 1;
3092 }
3093
3094 /*
3095  * Statistics service functions
3096  */
3097
3098 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3099 {
3100         struct dmae_command *dmae;
3101         u32 opcode;
3102         int loader_idx = PMF_DMAE_C(bp);
3103         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3104
3105         /* sanity */
3106         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3107                 BNX2X_ERR("BUG!\n");
3108                 return;
3109         }
3110
3111         bp->executer_idx = 0;
3112
3113         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3114                   DMAE_CMD_C_ENABLE |
3115                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3116 #ifdef __BIG_ENDIAN
3117                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3118 #else
3119                   DMAE_CMD_ENDIANITY_DW_SWAP |
3120 #endif
3121                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3122                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3123
3124         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3125         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3126         dmae->src_addr_lo = bp->port.port_stx >> 2;
3127         dmae->src_addr_hi = 0;
3128         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3129         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3130         dmae->len = DMAE_LEN32_RD_MAX;
3131         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3132         dmae->comp_addr_hi = 0;
3133         dmae->comp_val = 1;
3134
3135         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3137         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3138         dmae->src_addr_hi = 0;
3139         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3140                                    DMAE_LEN32_RD_MAX * 4);
3141         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3142                                    DMAE_LEN32_RD_MAX * 4);
3143         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3144         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3145         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3146         dmae->comp_val = DMAE_COMP_VAL;
3147
3148         *stats_comp = 0;
3149         bnx2x_hw_stats_post(bp);
3150         bnx2x_stats_comp(bp);
3151 }
3152
3153 static void bnx2x_port_stats_init(struct bnx2x *bp)
3154 {
3155         struct dmae_command *dmae;
3156         int port = BP_PORT(bp);
3157         int vn = BP_E1HVN(bp);
3158         u32 opcode;
3159         int loader_idx = PMF_DMAE_C(bp);
3160         u32 mac_addr;
3161         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162
3163         /* sanity */
3164         if (!bp->link_vars.link_up || !bp->port.pmf) {
3165                 BNX2X_ERR("BUG!\n");
3166                 return;
3167         }
3168
3169         bp->executer_idx = 0;
3170
3171         /* MCP */
3172         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3173                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3174                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3175 #ifdef __BIG_ENDIAN
3176                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3177 #else
3178                   DMAE_CMD_ENDIANITY_DW_SWAP |
3179 #endif
3180                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3181                   (vn << DMAE_CMD_E1HVN_SHIFT));
3182
3183         if (bp->port.port_stx) {
3184
3185                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3186                 dmae->opcode = opcode;
3187                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3188                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3189                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3190                 dmae->dst_addr_hi = 0;
3191                 dmae->len = sizeof(struct host_port_stats) >> 2;
3192                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3193                 dmae->comp_addr_hi = 0;
3194                 dmae->comp_val = 1;
3195         }
3196
3197         if (bp->func_stx) {
3198
3199                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3200                 dmae->opcode = opcode;
3201                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3202                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3203                 dmae->dst_addr_lo = bp->func_stx >> 2;
3204                 dmae->dst_addr_hi = 0;
3205                 dmae->len = sizeof(struct host_func_stats) >> 2;
3206                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3207                 dmae->comp_addr_hi = 0;
3208                 dmae->comp_val = 1;
3209         }
3210
3211         /* MAC */
3212         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3213                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3214                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3215 #ifdef __BIG_ENDIAN
3216                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3217 #else
3218                   DMAE_CMD_ENDIANITY_DW_SWAP |
3219 #endif
3220                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3221                   (vn << DMAE_CMD_E1HVN_SHIFT));
3222
3223         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3224
3225                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3226                                    NIG_REG_INGRESS_BMAC0_MEM);
3227
3228                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3229                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3230                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231                 dmae->opcode = opcode;
3232                 dmae->src_addr_lo = (mac_addr +
3233                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3234                 dmae->src_addr_hi = 0;
3235                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3236                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3237                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3238                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3239                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3240                 dmae->comp_addr_hi = 0;
3241                 dmae->comp_val = 1;
3242
3243                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3244                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3245                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246                 dmae->opcode = opcode;
3247                 dmae->src_addr_lo = (mac_addr +
3248                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3249                 dmae->src_addr_hi = 0;
3250                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3251                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3252                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3253                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3254                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3255                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3256                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257                 dmae->comp_addr_hi = 0;
3258                 dmae->comp_val = 1;
3259
3260         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3261
3262                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3263
3264                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3265                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266                 dmae->opcode = opcode;
3267                 dmae->src_addr_lo = (mac_addr +
3268                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3269                 dmae->src_addr_hi = 0;
3270                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3271                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3272                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3273                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3274                 dmae->comp_addr_hi = 0;
3275                 dmae->comp_val = 1;
3276
3277                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3278                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3279                 dmae->opcode = opcode;
3280                 dmae->src_addr_lo = (mac_addr +
3281                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3282                 dmae->src_addr_hi = 0;
3283                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3284                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3285                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3286                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3287                 dmae->len = 1;
3288                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3289                 dmae->comp_addr_hi = 0;
3290                 dmae->comp_val = 1;
3291
3292                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3293                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294                 dmae->opcode = opcode;
3295                 dmae->src_addr_lo = (mac_addr +
3296                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3297                 dmae->src_addr_hi = 0;
3298                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3299                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3300                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3301                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3302                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3303                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304                 dmae->comp_addr_hi = 0;
3305                 dmae->comp_val = 1;
3306         }
3307
3308         /* NIG */
3309         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310         dmae->opcode = opcode;
3311         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3312                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3313         dmae->src_addr_hi = 0;
3314         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3315         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3316         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3317         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3318         dmae->comp_addr_hi = 0;
3319         dmae->comp_val = 1;
3320
3321         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322         dmae->opcode = opcode;
3323         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3324                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3325         dmae->src_addr_hi = 0;
3326         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3327                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3328         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3329                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3330         dmae->len = (2*sizeof(u32)) >> 2;
3331         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3332         dmae->comp_addr_hi = 0;
3333         dmae->comp_val = 1;
3334
3335         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3336         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3337                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3338                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3339 #ifdef __BIG_ENDIAN
3340                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3341 #else
3342                         DMAE_CMD_ENDIANITY_DW_SWAP |
3343 #endif
3344                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3345                         (vn << DMAE_CMD_E1HVN_SHIFT));
3346         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3347                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3348         dmae->src_addr_hi = 0;
3349         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3350                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3351         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3352                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3353         dmae->len = (2*sizeof(u32)) >> 2;
3354         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3355         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3356         dmae->comp_val = DMAE_COMP_VAL;
3357
3358         *stats_comp = 0;
3359 }
3360
3361 static void bnx2x_func_stats_init(struct bnx2x *bp)
3362 {
3363         struct dmae_command *dmae = &bp->stats_dmae;
3364         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3365
3366         /* sanity */
3367         if (!bp->func_stx) {
3368                 BNX2X_ERR("BUG!\n");
3369                 return;
3370         }
3371
3372         bp->executer_idx = 0;
3373         memset(dmae, 0, sizeof(struct dmae_command));
3374
3375         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3376                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3377                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3378 #ifdef __BIG_ENDIAN
3379                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3380 #else
3381                         DMAE_CMD_ENDIANITY_DW_SWAP |
3382 #endif
3383                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3384                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3385         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3386         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3387         dmae->dst_addr_lo = bp->func_stx >> 2;
3388         dmae->dst_addr_hi = 0;
3389         dmae->len = sizeof(struct host_func_stats) >> 2;
3390         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3391         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3392         dmae->comp_val = DMAE_COMP_VAL;
3393
3394         *stats_comp = 0;
3395 }
3396
3397 static void bnx2x_stats_start(struct bnx2x *bp)
3398 {
3399         if (bp->port.pmf)
3400                 bnx2x_port_stats_init(bp);
3401
3402         else if (bp->func_stx)
3403                 bnx2x_func_stats_init(bp);
3404
3405         bnx2x_hw_stats_post(bp);
3406         bnx2x_storm_stats_post(bp);
3407 }
3408
3409 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3410 {
3411         bnx2x_stats_comp(bp);
3412         bnx2x_stats_pmf_update(bp);
3413         bnx2x_stats_start(bp);
3414 }
3415
3416 static void bnx2x_stats_restart(struct bnx2x *bp)
3417 {
3418         bnx2x_stats_comp(bp);
3419         bnx2x_stats_start(bp);
3420 }
3421
3422 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3423 {
3424         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3425         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3426         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3427         struct regpair diff;
3428
3429         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3430         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3431         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3432         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3433         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3434         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3435         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3436         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3437         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3438         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3439         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3440         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3441         UPDATE_STAT64(tx_stat_gt127,
3442                                 tx_stat_etherstatspkts65octetsto127octets);
3443         UPDATE_STAT64(tx_stat_gt255,
3444                                 tx_stat_etherstatspkts128octetsto255octets);
3445         UPDATE_STAT64(tx_stat_gt511,
3446                                 tx_stat_etherstatspkts256octetsto511octets);
3447         UPDATE_STAT64(tx_stat_gt1023,
3448                                 tx_stat_etherstatspkts512octetsto1023octets);
3449         UPDATE_STAT64(tx_stat_gt1518,
3450                                 tx_stat_etherstatspkts1024octetsto1522octets);
3451         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3452         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3453         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3454         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3455         UPDATE_STAT64(tx_stat_gterr,
3456                                 tx_stat_dot3statsinternalmactransmiterrors);
3457         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3458
3459         estats->pause_frames_received_hi =
3460                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3461         estats->pause_frames_received_lo =
3462                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3463
3464         estats->pause_frames_sent_hi =
3465                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3466         estats->pause_frames_sent_lo =
3467                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3468 }
3469
3470 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3471 {
3472         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3474         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3475
3476         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3477         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3478         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3479         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3480         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3481         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3482         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3483         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3484         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3485         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3486         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3487         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3488         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3489         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3490         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3491         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3492         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3493         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3494         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3495         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3496         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3497         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3498         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3501         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3502         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3503         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3504         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3505         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3506         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3507
3508         estats->pause_frames_received_hi =
3509                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3510         estats->pause_frames_received_lo =
3511                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3512         ADD_64(estats->pause_frames_received_hi,
3513                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3514                estats->pause_frames_received_lo,
3515                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3516
3517         estats->pause_frames_sent_hi =
3518                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3519         estats->pause_frames_sent_lo =
3520                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3521         ADD_64(estats->pause_frames_sent_hi,
3522                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3523                estats->pause_frames_sent_lo,
3524                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3525 }
3526
3527 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3528 {
3529         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3530         struct nig_stats *old = &(bp->port.old_nig_stats);
3531         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3532         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3533         struct regpair diff;
3534         u32 nig_timer_max;
3535
3536         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3537                 bnx2x_bmac_stats_update(bp);
3538
3539         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3540                 bnx2x_emac_stats_update(bp);
3541
3542         else { /* unreached */
3543                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3544                 return -1;
3545         }
3546
3547         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3548                       new->brb_discard - old->brb_discard);
3549         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3550                       new->brb_truncate - old->brb_truncate);
3551
3552         UPDATE_STAT64_NIG(egress_mac_pkt0,
3553                                         etherstatspkts1024octetsto1522octets);
3554         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3555
3556         memcpy(old, new, sizeof(struct nig_stats));
3557
3558         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3559                sizeof(struct mac_stx));
3560         estats->brb_drop_hi = pstats->brb_drop_hi;
3561         estats->brb_drop_lo = pstats->brb_drop_lo;
3562
3563         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3564
3565         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3566         if (nig_timer_max != estats->nig_timer_max) {
3567                 estats->nig_timer_max = nig_timer_max;
3568                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3569         }
3570
3571         return 0;
3572 }
3573
3574 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3575 {
3576         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3577         struct tstorm_per_port_stats *tport =
3578                                         &stats->tstorm_common.port_statistics;
3579         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3580         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3581         int i;
3582
3583         memset(&(fstats->total_bytes_received_hi), 0,
3584                sizeof(struct host_func_stats) - 2*sizeof(u32));
3585         estats->error_bytes_received_hi = 0;
3586         estats->error_bytes_received_lo = 0;
3587         estats->etherstatsoverrsizepkts_hi = 0;
3588         estats->etherstatsoverrsizepkts_lo = 0;
3589         estats->no_buff_discard_hi = 0;
3590         estats->no_buff_discard_lo = 0;
3591
3592         for_each_queue(bp, i) {
3593                 struct bnx2x_fastpath *fp = &bp->fp[i];
3594                 int cl_id = fp->cl_id;
3595                 struct tstorm_per_client_stats *tclient =
3596                                 &stats->tstorm_common.client_statistics[cl_id];
3597                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3598                 struct ustorm_per_client_stats *uclient =
3599                                 &stats->ustorm_common.client_statistics[cl_id];
3600                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3601                 struct xstorm_per_client_stats *xclient =
3602                                 &stats->xstorm_common.client_statistics[cl_id];
3603                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3604                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3605                 u32 diff;
3606
3607                 /* are storm stats valid? */
3608                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3609                                                         bp->stats_counter) {
3610                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3611                            "  xstorm counter (%d) != stats_counter (%d)\n",
3612                            i, xclient->stats_counter, bp->stats_counter);
3613                         return -1;
3614                 }
3615                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3616                                                         bp->stats_counter) {
3617                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3618                            "  tstorm counter (%d) != stats_counter (%d)\n",
3619                            i, tclient->stats_counter, bp->stats_counter);
3620                         return -2;
3621                 }
3622                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3623                                                         bp->stats_counter) {
3624                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3625                            "  ustorm counter (%d) != stats_counter (%d)\n",
3626                            i, uclient->stats_counter, bp->stats_counter);
3627                         return -4;
3628                 }
3629
3630                 qstats->total_bytes_received_hi =
3631                 qstats->valid_bytes_received_hi =
3632                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3633                 qstats->total_bytes_received_lo =
3634                 qstats->valid_bytes_received_lo =
3635                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3636
3637                 qstats->error_bytes_received_hi =
3638                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3639                 qstats->error_bytes_received_lo =
3640                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3641
3642                 ADD_64(qstats->total_bytes_received_hi,
3643                        qstats->error_bytes_received_hi,
3644                        qstats->total_bytes_received_lo,
3645                        qstats->error_bytes_received_lo);
3646
3647                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3648                                         total_unicast_packets_received);
3649                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3650                                         total_multicast_packets_received);
3651                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3652                                         total_broadcast_packets_received);
3653                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3654                                         etherstatsoverrsizepkts);
3655                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3656
3657                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3658                                         total_unicast_packets_received);
3659                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3660                                         total_multicast_packets_received);
3661                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3662                                         total_broadcast_packets_received);
3663                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3664                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3665                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3666
3667                 qstats->total_bytes_transmitted_hi =
3668                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3669                 qstats->total_bytes_transmitted_lo =
3670                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3671
3672                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3673                                         total_unicast_packets_transmitted);
3674                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3675                                         total_multicast_packets_transmitted);
3676                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3677                                         total_broadcast_packets_transmitted);
3678
3679                 old_tclient->checksum_discard = tclient->checksum_discard;
3680                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3681
3682                 ADD_64(fstats->total_bytes_received_hi,
3683                        qstats->total_bytes_received_hi,
3684                        fstats->total_bytes_received_lo,
3685                        qstats->total_bytes_received_lo);
3686                 ADD_64(fstats->total_bytes_transmitted_hi,
3687                        qstats->total_bytes_transmitted_hi,
3688                        fstats->total_bytes_transmitted_lo,
3689                        qstats->total_bytes_transmitted_lo);
3690                 ADD_64(fstats->total_unicast_packets_received_hi,
3691                        qstats->total_unicast_packets_received_hi,
3692                        fstats->total_unicast_packets_received_lo,
3693                        qstats->total_unicast_packets_received_lo);
3694                 ADD_64(fstats->total_multicast_packets_received_hi,
3695                        qstats->total_multicast_packets_received_hi,
3696                        fstats->total_multicast_packets_received_lo,
3697                        qstats->total_multicast_packets_received_lo);
3698                 ADD_64(fstats->total_broadcast_packets_received_hi,
3699                        qstats->total_broadcast_packets_received_hi,
3700                        fstats->total_broadcast_packets_received_lo,
3701                        qstats->total_broadcast_packets_received_lo);
3702                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3703                        qstats->total_unicast_packets_transmitted_hi,
3704                        fstats->total_unicast_packets_transmitted_lo,
3705                        qstats->total_unicast_packets_transmitted_lo);
3706                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3707                        qstats->total_multicast_packets_transmitted_hi,
3708                        fstats->total_multicast_packets_transmitted_lo,
3709                        qstats->total_multicast_packets_transmitted_lo);
3710                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3711                        qstats->total_broadcast_packets_transmitted_hi,
3712                        fstats->total_broadcast_packets_transmitted_lo,
3713                        qstats->total_broadcast_packets_transmitted_lo);
3714                 ADD_64(fstats->valid_bytes_received_hi,
3715                        qstats->valid_bytes_received_hi,
3716                        fstats->valid_bytes_received_lo,
3717                        qstats->valid_bytes_received_lo);
3718
3719                 ADD_64(estats->error_bytes_received_hi,
3720                        qstats->error_bytes_received_hi,
3721                        estats->error_bytes_received_lo,
3722                        qstats->error_bytes_received_lo);
3723                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3724                        qstats->etherstatsoverrsizepkts_hi,
3725                        estats->etherstatsoverrsizepkts_lo,
3726                        qstats->etherstatsoverrsizepkts_lo);
3727                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3728                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3729         }
3730
3731         ADD_64(fstats->total_bytes_received_hi,
3732                estats->rx_stat_ifhcinbadoctets_hi,
3733                fstats->total_bytes_received_lo,
3734                estats->rx_stat_ifhcinbadoctets_lo);
3735
3736         memcpy(estats, &(fstats->total_bytes_received_hi),
3737                sizeof(struct host_func_stats) - 2*sizeof(u32));
3738
3739         ADD_64(estats->etherstatsoverrsizepkts_hi,
3740                estats->rx_stat_dot3statsframestoolong_hi,
3741                estats->etherstatsoverrsizepkts_lo,
3742                estats->rx_stat_dot3statsframestoolong_lo);
3743         ADD_64(estats->error_bytes_received_hi,
3744                estats->rx_stat_ifhcinbadoctets_hi,
3745                estats->error_bytes_received_lo,
3746                estats->rx_stat_ifhcinbadoctets_lo);
3747
3748         if (bp->port.pmf) {
3749                 estats->mac_filter_discard =
3750                                 le32_to_cpu(tport->mac_filter_discard);
3751                 estats->xxoverflow_discard =
3752                                 le32_to_cpu(tport->xxoverflow_discard);
3753                 estats->brb_truncate_discard =
3754                                 le32_to_cpu(tport->brb_truncate_discard);
3755                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3756         }
3757
3758         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3759
3760         bp->stats_pending = 0;
3761
3762         return 0;
3763 }
3764
3765 static void bnx2x_net_stats_update(struct bnx2x *bp)
3766 {
3767         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3768         struct net_device_stats *nstats = &bp->dev->stats;
3769         int i;
3770
3771         nstats->rx_packets =
3772                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3773                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3774                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3775
3776         nstats->tx_packets =
3777                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3778                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3779                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3780
3781         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3782
3783         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3784
3785         nstats->rx_dropped = estats->mac_discard;
3786         for_each_queue(bp, i)
3787                 nstats->rx_dropped +=
3788                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3789
3790         nstats->tx_dropped = 0;
3791
3792         nstats->multicast =
3793                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3794
3795         nstats->collisions =
3796                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3797
3798         nstats->rx_length_errors =
3799                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3800                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3801         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3802                                  bnx2x_hilo(&estats->brb_truncate_hi);
3803         nstats->rx_crc_errors =
3804                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3805         nstats->rx_frame_errors =
3806                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3807         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3808         nstats->rx_missed_errors = estats->xxoverflow_discard;
3809
3810         nstats->rx_errors = nstats->rx_length_errors +
3811                             nstats->rx_over_errors +
3812                             nstats->rx_crc_errors +
3813                             nstats->rx_frame_errors +
3814                             nstats->rx_fifo_errors +
3815                             nstats->rx_missed_errors;
3816
3817         nstats->tx_aborted_errors =
3818                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3819                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3820         nstats->tx_carrier_errors =
3821                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3822         nstats->tx_fifo_errors = 0;
3823         nstats->tx_heartbeat_errors = 0;
3824         nstats->tx_window_errors = 0;
3825
3826         nstats->tx_errors = nstats->tx_aborted_errors +
3827                             nstats->tx_carrier_errors +
3828             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3829 }
3830
3831 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3832 {
3833         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3834         int i;
3835
3836         estats->driver_xoff = 0;
3837         estats->rx_err_discard_pkt = 0;
3838         estats->rx_skb_alloc_failed = 0;
3839         estats->hw_csum_err = 0;
3840         for_each_queue(bp, i) {
3841                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3842
3843                 estats->driver_xoff += qstats->driver_xoff;
3844                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3845                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3846                 estats->hw_csum_err += qstats->hw_csum_err;
3847         }
3848 }
3849
3850 static void bnx2x_stats_update(struct bnx2x *bp)
3851 {
3852         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3853
3854         if (*stats_comp != DMAE_COMP_VAL)
3855                 return;
3856
3857         if (bp->port.pmf)
3858                 bnx2x_hw_stats_update(bp);
3859
3860         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3861                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3862                 bnx2x_panic();
3863                 return;
3864         }
3865
3866         bnx2x_net_stats_update(bp);
3867         bnx2x_drv_stats_update(bp);
3868
3869         if (bp->msglevel & NETIF_MSG_TIMER) {
3870                 struct tstorm_per_client_stats *old_tclient =
3871                                                         &bp->fp->old_tclient;
3872                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3873                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3874                 struct net_device_stats *nstats = &bp->dev->stats;
3875                 int i;
3876
3877                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3878                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3879                                   "  tx pkt (%lx)\n",
3880                        bnx2x_tx_avail(bp->fp),
3881                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3882                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3883                                   "  rx pkt (%lx)\n",
3884                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3885                              bp->fp->rx_comp_cons),
3886                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3887                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
3888                                   "brb truncate %u\n",
3889                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3890                        qstats->driver_xoff,
3891                        estats->brb_drop_lo, estats->brb_truncate_lo);
3892                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3893                         "packets_too_big_discard %lu  no_buff_discard %lu  "
3894                         "mac_discard %u  mac_filter_discard %u  "
3895                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3896                         "ttl0_discard %u\n",
3897                        old_tclient->checksum_discard,
3898                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3899                        bnx2x_hilo(&qstats->no_buff_discard_hi),
3900                        estats->mac_discard, estats->mac_filter_discard,
3901                        estats->xxoverflow_discard, estats->brb_truncate_discard,
3902                        old_tclient->ttl0_discard);
3903
3904                 for_each_queue(bp, i) {
3905                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3906                                bnx2x_fp(bp, i, tx_pkt),
3907                                bnx2x_fp(bp, i, rx_pkt),
3908                                bnx2x_fp(bp, i, rx_calls));
3909                 }
3910         }
3911
3912         bnx2x_hw_stats_post(bp);
3913         bnx2x_storm_stats_post(bp);
3914 }
3915
3916 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3917 {
3918         struct dmae_command *dmae;
3919         u32 opcode;
3920         int loader_idx = PMF_DMAE_C(bp);
3921         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3922
3923         bp->executer_idx = 0;
3924
3925         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3926                   DMAE_CMD_C_ENABLE |
3927                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3928 #ifdef __BIG_ENDIAN
3929                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3930 #else
3931                   DMAE_CMD_ENDIANITY_DW_SWAP |
3932 #endif
3933                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3934                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3935
3936         if (bp->port.port_stx) {
3937
3938                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3939                 if (bp->func_stx)
3940                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3941                 else
3942                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3943                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3944                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3945                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3946                 dmae->dst_addr_hi = 0;
3947                 dmae->len = sizeof(struct host_port_stats) >> 2;
3948                 if (bp->func_stx) {
3949                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3950                         dmae->comp_addr_hi = 0;
3951                         dmae->comp_val = 1;
3952                 } else {
3953                         dmae->comp_addr_lo =
3954                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3955                         dmae->comp_addr_hi =
3956                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3957                         dmae->comp_val = DMAE_COMP_VAL;
3958
3959                         *stats_comp = 0;
3960                 }
3961         }
3962
3963         if (bp->func_stx) {
3964
3965                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3966                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3967                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3968                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3969                 dmae->dst_addr_lo = bp->func_stx >> 2;
3970                 dmae->dst_addr_hi = 0;
3971                 dmae->len = sizeof(struct host_func_stats) >> 2;
3972                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3973                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3974                 dmae->comp_val = DMAE_COMP_VAL;
3975
3976                 *stats_comp = 0;
3977         }
3978 }
3979
3980 static void bnx2x_stats_stop(struct bnx2x *bp)
3981 {
3982         int update = 0;
3983
3984         bnx2x_stats_comp(bp);
3985
3986         if (bp->port.pmf)
3987                 update = (bnx2x_hw_stats_update(bp) == 0);
3988
3989         update |= (bnx2x_storm_stats_update(bp) == 0);
3990
3991         if (update) {
3992                 bnx2x_net_stats_update(bp);
3993
3994                 if (bp->port.pmf)
3995                         bnx2x_port_stats_stop(bp);
3996
3997                 bnx2x_hw_stats_post(bp);
3998                 bnx2x_stats_comp(bp);
3999         }
4000 }
4001
4002 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4003 {
4004 }
4005
4006 static const struct {
4007         void (*action)(struct bnx2x *bp);
4008         enum bnx2x_stats_state next_state;
4009 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4010 /* state        event   */
4011 {
4012 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4013 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4014 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4015 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4016 },
4017 {
4018 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4019 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4020 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4021 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4022 }
4023 };
4024
4025 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4026 {
4027         enum bnx2x_stats_state state = bp->stats_state;
4028
4029         bnx2x_stats_stm[state][event].action(bp);
4030         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4031
4032         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4033                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4034                    state, event, bp->stats_state);
4035 }
4036
4037 static void bnx2x_timer(unsigned long data)
4038 {
4039         struct bnx2x *bp = (struct bnx2x *) data;
4040
4041         if (!netif_running(bp->dev))
4042                 return;
4043
4044         if (atomic_read(&bp->intr_sem) != 0)
4045                 goto timer_restart;
4046
4047         if (poll) {
4048                 struct bnx2x_fastpath *fp = &bp->fp[0];
4049                 int rc;
4050
4051                 bnx2x_tx_int(fp, 1000);
4052                 rc = bnx2x_rx_int(fp, 1000);
4053         }
4054
4055         if (!BP_NOMCP(bp)) {
4056                 int func = BP_FUNC(bp);
4057                 u32 drv_pulse;
4058                 u32 mcp_pulse;
4059
4060                 ++bp->fw_drv_pulse_wr_seq;
4061                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4062                 /* TBD - add SYSTEM_TIME */
4063                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4064                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4065
4066                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4067                              MCP_PULSE_SEQ_MASK);
4068                 /* The delta between driver pulse and mcp response
4069                  * should be 1 (before mcp response) or 0 (after mcp response)
4070                  */
4071                 if ((drv_pulse != mcp_pulse) &&
4072                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4073                         /* someone lost a heartbeat... */
4074                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4075                                   drv_pulse, mcp_pulse);
4076                 }
4077         }
4078
4079         if ((bp->state == BNX2X_STATE_OPEN) ||
4080             (bp->state == BNX2X_STATE_DISABLED))
4081                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4082
4083 timer_restart:
4084         mod_timer(&bp->timer, jiffies + bp->current_interval);
4085 }
4086
4087 /* end of Statistics */
4088
4089 /* nic init */
4090
4091 /*
4092  * nic init service functions
4093  */
4094
4095 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4096 {
4097         int port = BP_PORT(bp);
4098
4099         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4100                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4101                         sizeof(struct ustorm_status_block)/4);
4102         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4103                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4104                         sizeof(struct cstorm_status_block)/4);
4105 }
4106
4107 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4108                           dma_addr_t mapping, int sb_id)
4109 {
4110         int port = BP_PORT(bp);
4111         int func = BP_FUNC(bp);
4112         int index;
4113         u64 section;
4114
4115         /* USTORM */
4116         section = ((u64)mapping) + offsetof(struct host_status_block,
4117                                             u_status_block);
4118         sb->u_status_block.status_block_id = sb_id;
4119
4120         REG_WR(bp, BAR_USTRORM_INTMEM +
4121                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4122         REG_WR(bp, BAR_USTRORM_INTMEM +
4123                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4124                U64_HI(section));
4125         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4126                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4127
4128         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4129                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4130                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4131
4132         /* CSTORM */
4133         section = ((u64)mapping) + offsetof(struct host_status_block,
4134                                             c_status_block);
4135         sb->c_status_block.status_block_id = sb_id;
4136
4137         REG_WR(bp, BAR_CSTRORM_INTMEM +
4138                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4139         REG_WR(bp, BAR_CSTRORM_INTMEM +
4140                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4141                U64_HI(section));
4142         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4143                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4144
4145         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4146                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4147                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4148
4149         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4150 }
4151
4152 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4153 {
4154         int func = BP_FUNC(bp);
4155
4156         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4157                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4158                         sizeof(struct ustorm_def_status_block)/4);
4159         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4160                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4161                         sizeof(struct cstorm_def_status_block)/4);
4162         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4163                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4164                         sizeof(struct xstorm_def_status_block)/4);
4165         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4166                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4167                         sizeof(struct tstorm_def_status_block)/4);
4168 }
4169
4170 static void bnx2x_init_def_sb(struct bnx2x *bp,
4171                               struct host_def_status_block *def_sb,
4172                               dma_addr_t mapping, int sb_id)
4173 {
4174         int port = BP_PORT(bp);
4175         int func = BP_FUNC(bp);
4176         int index, val, reg_offset;
4177         u64 section;
4178
4179         /* ATTN */
4180         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181                                             atten_status_block);
4182         def_sb->atten_status_block.status_block_id = sb_id;
4183
4184         bp->attn_state = 0;
4185
4186         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4187                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4188
4189         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4190                 bp->attn_group[index].sig[0] = REG_RD(bp,
4191                                                      reg_offset + 0x10*index);
4192                 bp->attn_group[index].sig[1] = REG_RD(bp,
4193                                                reg_offset + 0x4 + 0x10*index);
4194                 bp->attn_group[index].sig[2] = REG_RD(bp,
4195                                                reg_offset + 0x8 + 0x10*index);
4196                 bp->attn_group[index].sig[3] = REG_RD(bp,
4197                                                reg_offset + 0xc + 0x10*index);
4198         }
4199
4200         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4201                              HC_REG_ATTN_MSG0_ADDR_L);
4202
4203         REG_WR(bp, reg_offset, U64_LO(section));
4204         REG_WR(bp, reg_offset + 4, U64_HI(section));
4205
4206         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4207
4208         val = REG_RD(bp, reg_offset);
4209         val |= sb_id;
4210         REG_WR(bp, reg_offset, val);
4211
4212         /* USTORM */
4213         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4214                                             u_def_status_block);
4215         def_sb->u_def_status_block.status_block_id = sb_id;
4216
4217         REG_WR(bp, BAR_USTRORM_INTMEM +
4218                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4219         REG_WR(bp, BAR_USTRORM_INTMEM +
4220                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4221                U64_HI(section));
4222         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4223                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4224
4225         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4226                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4227                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4228
4229         /* CSTORM */
4230         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4231                                             c_def_status_block);
4232         def_sb->c_def_status_block.status_block_id = sb_id;
4233
4234         REG_WR(bp, BAR_CSTRORM_INTMEM +
4235                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4236         REG_WR(bp, BAR_CSTRORM_INTMEM +
4237                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4238                U64_HI(section));
4239         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4240                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4241
4242         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4243                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4244                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4245
4246         /* TSTORM */
4247         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4248                                             t_def_status_block);
4249         def_sb->t_def_status_block.status_block_id = sb_id;
4250
4251         REG_WR(bp, BAR_TSTRORM_INTMEM +
4252                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4253         REG_WR(bp, BAR_TSTRORM_INTMEM +
4254                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4255                U64_HI(section));
4256         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4257                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4258
4259         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4260                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4261                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4262
4263         /* XSTORM */
4264         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4265                                             x_def_status_block);
4266         def_sb->x_def_status_block.status_block_id = sb_id;
4267
4268         REG_WR(bp, BAR_XSTRORM_INTMEM +
4269                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4270         REG_WR(bp, BAR_XSTRORM_INTMEM +
4271                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4272                U64_HI(section));
4273         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4274                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4275
4276         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4277                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4278                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4279
4280         bp->stats_pending = 0;
4281         bp->set_mac_pending = 0;
4282
4283         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4284 }
4285
4286 static void bnx2x_update_coalesce(struct bnx2x *bp)
4287 {
4288         int port = BP_PORT(bp);
4289         int i;
4290
4291         for_each_queue(bp, i) {
4292                 int sb_id = bp->fp[i].sb_id;
4293
4294                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4295                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4296                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4297                                                     U_SB_ETH_RX_CQ_INDEX),
4298                         bp->rx_ticks/12);
4299                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4300                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4301                                                      U_SB_ETH_RX_CQ_INDEX),
4302                          bp->rx_ticks ? 0 : 1);
4303
4304                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4305                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4306                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4307                                                     C_SB_ETH_TX_CQ_INDEX),
4308                         bp->tx_ticks/12);
4309                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4310                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4311                                                      C_SB_ETH_TX_CQ_INDEX),
4312                          bp->tx_ticks ? 0 : 1);
4313         }
4314 }
4315
4316 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4317                                        struct bnx2x_fastpath *fp, int last)
4318 {
4319         int i;
4320
4321         for (i = 0; i < last; i++) {
4322                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4323                 struct sk_buff *skb = rx_buf->skb;
4324
4325                 if (skb == NULL) {
4326                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4327                         continue;
4328                 }
4329
4330                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4331                         pci_unmap_single(bp->pdev,
4332                                          pci_unmap_addr(rx_buf, mapping),
4333                                          bp->rx_buf_size,
4334                                          PCI_DMA_FROMDEVICE);
4335
4336                 dev_kfree_skb(skb);
4337                 rx_buf->skb = NULL;
4338         }
4339 }
4340
4341 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4342 {
4343         int func = BP_FUNC(bp);
4344         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4345                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4346         u16 ring_prod, cqe_ring_prod;
4347         int i, j;
4348
4349         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4350         DP(NETIF_MSG_IFUP,
4351            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4352
4353         if (bp->flags & TPA_ENABLE_FLAG) {
4354
4355                 for_each_rx_queue(bp, j) {
4356                         struct bnx2x_fastpath *fp = &bp->fp[j];
4357
4358                         for (i = 0; i < max_agg_queues; i++) {
4359                                 fp->tpa_pool[i].skb =
4360                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4361                                 if (!fp->tpa_pool[i].skb) {
4362                                         BNX2X_ERR("Failed to allocate TPA "
4363                                                   "skb pool for queue[%d] - "
4364                                                   "disabling TPA on this "
4365                                                   "queue!\n", j);
4366                                         bnx2x_free_tpa_pool(bp, fp, i);
4367                                         fp->disable_tpa = 1;
4368                                         break;
4369                                 }
4370                                 pci_unmap_addr_set((struct sw_rx_bd *)
4371                                                         &bp->fp->tpa_pool[i],
4372                                                    mapping, 0);
4373                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4374                         }
4375                 }
4376         }
4377
4378         for_each_rx_queue(bp, j) {
4379                 struct bnx2x_fastpath *fp = &bp->fp[j];
4380
4381                 fp->rx_bd_cons = 0;
4382                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4383                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4384
4385                 /* "next page" elements initialization */
4386                 /* SGE ring */
4387                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4388                         struct eth_rx_sge *sge;
4389
4390                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4391                         sge->addr_hi =
4392                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4393                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4394                         sge->addr_lo =
4395                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4396                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4397                 }
4398
4399                 bnx2x_init_sge_ring_bit_mask(fp);
4400
4401                 /* RX BD ring */
4402                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4403                         struct eth_rx_bd *rx_bd;
4404
4405                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4406                         rx_bd->addr_hi =
4407                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4408                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4409                         rx_bd->addr_lo =
4410                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4411                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4412                 }
4413
4414                 /* CQ ring */
4415                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4416                         struct eth_rx_cqe_next_page *nextpg;
4417
4418                         nextpg = (struct eth_rx_cqe_next_page *)
4419                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4420                         nextpg->addr_hi =
4421                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4422                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4423                         nextpg->addr_lo =
4424                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4425                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4426                 }
4427
4428                 /* Allocate SGEs and initialize the ring elements */
4429                 for (i = 0, ring_prod = 0;
4430                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4431
4432                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4433                                 BNX2X_ERR("was only able to allocate "
4434                                           "%d rx sges\n", i);
4435                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4436                                 /* Cleanup already allocated elements */
4437                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4438                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4439                                 fp->disable_tpa = 1;
4440                                 ring_prod = 0;
4441                                 break;
4442                         }
4443                         ring_prod = NEXT_SGE_IDX(ring_prod);
4444                 }
4445                 fp->rx_sge_prod = ring_prod;
4446
4447                 /* Allocate BDs and initialize BD ring */
4448                 fp->rx_comp_cons = 0;
4449                 cqe_ring_prod = ring_prod = 0;
4450                 for (i = 0; i < bp->rx_ring_size; i++) {
4451                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4452                                 BNX2X_ERR("was only able to allocate "
4453                                           "%d rx skbs on queue[%d]\n", i, j);
4454                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4455                                 break;
4456                         }
4457                         ring_prod = NEXT_RX_IDX(ring_prod);
4458                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4459                         WARN_ON(ring_prod <= i);
4460                 }
4461
4462                 fp->rx_bd_prod = ring_prod;
4463                 /* must not have more available CQEs than BDs */
4464                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4465                                        cqe_ring_prod);
4466                 fp->rx_pkt = fp->rx_calls = 0;
4467
4468                 /* Warning!
4469                  * this will generate an interrupt (to the TSTORM)
4470                  * must only be done after chip is initialized
4471                  */
4472                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4473                                      fp->rx_sge_prod);
4474                 if (j != 0)
4475                         continue;
4476
4477                 REG_WR(bp, BAR_USTRORM_INTMEM +
4478                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4479                        U64_LO(fp->rx_comp_mapping));
4480                 REG_WR(bp, BAR_USTRORM_INTMEM +
4481                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4482                        U64_HI(fp->rx_comp_mapping));
4483         }
4484 }
4485
4486 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4487 {
4488         int i, j;
4489
4490         for_each_tx_queue(bp, j) {
4491                 struct bnx2x_fastpath *fp = &bp->fp[j];
4492
4493                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4494                         struct eth_tx_bd *tx_bd =
4495                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4496
4497                         tx_bd->addr_hi =
4498                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4499                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4500                         tx_bd->addr_lo =
4501                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4502                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4503                 }
4504
4505                 fp->tx_pkt_prod = 0;
4506                 fp->tx_pkt_cons = 0;
4507                 fp->tx_bd_prod = 0;
4508                 fp->tx_bd_cons = 0;
4509                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4510                 fp->tx_pkt = 0;
4511         }
4512 }
4513
4514 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4515 {
4516         int func = BP_FUNC(bp);
4517
4518         spin_lock_init(&bp->spq_lock);
4519
4520         bp->spq_left = MAX_SPQ_PENDING;
4521         bp->spq_prod_idx = 0;
4522         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4523         bp->spq_prod_bd = bp->spq;
4524         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4525
4526         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4527                U64_LO(bp->spq_mapping));
4528         REG_WR(bp,
4529                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4530                U64_HI(bp->spq_mapping));
4531
4532         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4533                bp->spq_prod_idx);
4534 }
4535
4536 static void bnx2x_init_context(struct bnx2x *bp)
4537 {
4538         int i;
4539
4540         for_each_queue(bp, i) {
4541                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4542                 struct bnx2x_fastpath *fp = &bp->fp[i];
4543                 u8 cl_id = fp->cl_id;
4544                 u8 sb_id = FP_SB_ID(fp);
4545
4546                 context->ustorm_st_context.common.sb_index_numbers =
4547                                                 BNX2X_RX_SB_INDEX_NUM;
4548                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4549                 context->ustorm_st_context.common.status_block_id = sb_id;
4550                 context->ustorm_st_context.common.flags =
4551                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4552                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4553                 context->ustorm_st_context.common.statistics_counter_id =
4554                                                 cl_id;
4555                 context->ustorm_st_context.common.mc_alignment_log_size =
4556                                                 BNX2X_RX_ALIGN_SHIFT;
4557                 context->ustorm_st_context.common.bd_buff_size =
4558                                                 bp->rx_buf_size;
4559                 context->ustorm_st_context.common.bd_page_base_hi =
4560                                                 U64_HI(fp->rx_desc_mapping);
4561                 context->ustorm_st_context.common.bd_page_base_lo =
4562                                                 U64_LO(fp->rx_desc_mapping);
4563                 if (!fp->disable_tpa) {
4564                         context->ustorm_st_context.common.flags |=
4565                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4566                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4567                         context->ustorm_st_context.common.sge_buff_size =
4568                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4569                                          (u32)0xffff);
4570                         context->ustorm_st_context.common.sge_page_base_hi =
4571                                                 U64_HI(fp->rx_sge_mapping);
4572                         context->ustorm_st_context.common.sge_page_base_lo =
4573                                                 U64_LO(fp->rx_sge_mapping);
4574                 }
4575
4576                 context->ustorm_ag_context.cdu_usage =
4577                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4578                                                CDU_REGION_NUMBER_UCM_AG,
4579                                                ETH_CONNECTION_TYPE);
4580
4581                 context->xstorm_st_context.tx_bd_page_base_hi =
4582                                                 U64_HI(fp->tx_desc_mapping);
4583                 context->xstorm_st_context.tx_bd_page_base_lo =
4584                                                 U64_LO(fp->tx_desc_mapping);
4585                 context->xstorm_st_context.db_data_addr_hi =
4586                                                 U64_HI(fp->tx_prods_mapping);
4587                 context->xstorm_st_context.db_data_addr_lo =
4588                                                 U64_LO(fp->tx_prods_mapping);
4589                 context->xstorm_st_context.statistics_data = (fp->cl_id |
4590                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4591                 context->cstorm_st_context.sb_index_number =
4592                                                 C_SB_ETH_TX_CQ_INDEX;
4593                 context->cstorm_st_context.status_block_id = sb_id;
4594
4595                 context->xstorm_ag_context.cdu_reserved =
4596                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4597                                                CDU_REGION_NUMBER_XCM_AG,
4598                                                ETH_CONNECTION_TYPE);
4599         }
4600 }
4601
4602 static void bnx2x_init_ind_table(struct bnx2x *bp)
4603 {
4604         int func = BP_FUNC(bp);
4605         int i;
4606
4607         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4608                 return;
4609
4610         DP(NETIF_MSG_IFUP,
4611            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4612         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4613                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4614                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4615                         BP_CL_ID(bp) + (i % bp->num_rx_queues));
4616 }
4617
4618 static void bnx2x_set_client_config(struct bnx2x *bp)
4619 {
4620         struct tstorm_eth_client_config tstorm_client = {0};
4621         int port = BP_PORT(bp);
4622         int i;
4623
4624         tstorm_client.mtu = bp->dev->mtu;
4625         tstorm_client.config_flags =
4626                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4627                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4628 #ifdef BCM_VLAN
4629         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4630                 tstorm_client.config_flags |=
4631                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4632                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4633         }
4634 #endif
4635
4636         if (bp->flags & TPA_ENABLE_FLAG) {
4637                 tstorm_client.max_sges_for_packet =
4638                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4639                 tstorm_client.max_sges_for_packet =
4640                         ((tstorm_client.max_sges_for_packet +
4641                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4642                         PAGES_PER_SGE_SHIFT;
4643
4644                 tstorm_client.config_flags |=
4645                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4646         }
4647
4648         for_each_queue(bp, i) {
4649                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4650
4651                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4652                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4653                        ((u32 *)&tstorm_client)[0]);
4654                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4655                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4656                        ((u32 *)&tstorm_client)[1]);
4657         }
4658
4659         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4660            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4661 }
4662
4663 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4664 {
4665         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4666         int mode = bp->rx_mode;
4667         int mask = (1 << BP_L_ID(bp));
4668         int func = BP_FUNC(bp);
4669         int i;
4670
4671         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4672
4673         switch (mode) {
4674         case BNX2X_RX_MODE_NONE: /* no Rx */
4675                 tstorm_mac_filter.ucast_drop_all = mask;
4676                 tstorm_mac_filter.mcast_drop_all = mask;
4677                 tstorm_mac_filter.bcast_drop_all = mask;
4678                 break;
4679         case BNX2X_RX_MODE_NORMAL:
4680                 tstorm_mac_filter.bcast_accept_all = mask;
4681                 break;
4682         case BNX2X_RX_MODE_ALLMULTI:
4683                 tstorm_mac_filter.mcast_accept_all = mask;
4684                 tstorm_mac_filter.bcast_accept_all = mask;
4685                 break;
4686         case BNX2X_RX_MODE_PROMISC:
4687                 tstorm_mac_filter.ucast_accept_all = mask;
4688                 tstorm_mac_filter.mcast_accept_all = mask;
4689                 tstorm_mac_filter.bcast_accept_all = mask;
4690                 break;
4691         default:
4692                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4693                 break;
4694         }
4695
4696         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4697                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4698                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4699                        ((u32 *)&tstorm_mac_filter)[i]);
4700
4701 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4702                    ((u32 *)&tstorm_mac_filter)[i]); */
4703         }
4704
4705         if (mode != BNX2X_RX_MODE_NONE)
4706                 bnx2x_set_client_config(bp);
4707 }
4708
4709 static void bnx2x_init_internal_common(struct bnx2x *bp)
4710 {
4711         int i;
4712
4713         if (bp->flags & TPA_ENABLE_FLAG) {
4714                 struct tstorm_eth_tpa_exist tpa = {0};
4715
4716                 tpa.tpa_exist = 1;
4717
4718                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4719                        ((u32 *)&tpa)[0]);
4720                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4721                        ((u32 *)&tpa)[1]);
4722         }
4723
4724         /* Zero this manually as its initialization is
4725            currently missing in the initTool */
4726         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4727                 REG_WR(bp, BAR_USTRORM_INTMEM +
4728                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4729 }
4730
4731 static void bnx2x_init_internal_port(struct bnx2x *bp)
4732 {
4733         int port = BP_PORT(bp);
4734
4735         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4736         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4737         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4738         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4739 }
4740
4741 /* Calculates the sum of vn_min_rates.
4742    It's needed for further normalizing of the min_rates.
4743    Returns:
4744      sum of vn_min_rates.
4745        or
4746      0 - if all the min_rates are 0.
4747      In the later case fainess algorithm should be deactivated.
4748      If not all min_rates are zero then those that are zeroes will be set to 1.
4749  */
4750 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4751 {
4752         int all_zero = 1;
4753         int port = BP_PORT(bp);
4754         int vn;
4755
4756         bp->vn_weight_sum = 0;
4757         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4758                 int func = 2*vn + port;
4759                 u32 vn_cfg =
4760                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4761                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4762                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4763
4764                 /* Skip hidden vns */
4765                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4766                         continue;
4767
4768                 /* If min rate is zero - set it to 1 */
4769                 if (!vn_min_rate)
4770                         vn_min_rate = DEF_MIN_RATE;
4771                 else
4772                         all_zero = 0;
4773
4774                 bp->vn_weight_sum += vn_min_rate;
4775         }
4776
4777         /* ... only if all min rates are zeros - disable fairness */
4778         if (all_zero)
4779                 bp->vn_weight_sum = 0;
4780 }
4781
4782 static void bnx2x_init_internal_func(struct bnx2x *bp)
4783 {
4784         struct tstorm_eth_function_common_config tstorm_config = {0};
4785         struct stats_indication_flags stats_flags = {0};
4786         int port = BP_PORT(bp);
4787         int func = BP_FUNC(bp);
4788         int i, j;
4789         u32 offset;
4790         u16 max_agg_size;
4791
4792         if (is_multi(bp)) {
4793                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4794                 tstorm_config.rss_result_mask = MULTI_MASK;
4795         }
4796         if (IS_E1HMF(bp))
4797                 tstorm_config.config_flags |=
4798                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4799
4800         tstorm_config.leading_client_id = BP_L_ID(bp);
4801
4802         REG_WR(bp, BAR_TSTRORM_INTMEM +
4803                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4804                (*(u32 *)&tstorm_config));
4805
4806         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4807         bnx2x_set_storm_rx_mode(bp);
4808
4809         for_each_queue(bp, i) {
4810                 u8 cl_id = bp->fp[i].cl_id;
4811
4812                 /* reset xstorm per client statistics */
4813                 offset = BAR_XSTRORM_INTMEM +
4814                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4815                 for (j = 0;
4816                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4817                         REG_WR(bp, offset + j*4, 0);
4818
4819                 /* reset tstorm per client statistics */
4820                 offset = BAR_TSTRORM_INTMEM +
4821                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4822                 for (j = 0;
4823                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4824                         REG_WR(bp, offset + j*4, 0);
4825
4826                 /* reset ustorm per client statistics */
4827                 offset = BAR_USTRORM_INTMEM +
4828                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4829                 for (j = 0;
4830                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4831                         REG_WR(bp, offset + j*4, 0);
4832         }
4833
4834         /* Init statistics related context */
4835         stats_flags.collect_eth = 1;
4836
4837         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4838                ((u32 *)&stats_flags)[0]);
4839         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4840                ((u32 *)&stats_flags)[1]);
4841
4842         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4843                ((u32 *)&stats_flags)[0]);
4844         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4845                ((u32 *)&stats_flags)[1]);
4846
4847         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4848                ((u32 *)&stats_flags)[0]);
4849         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4850                ((u32 *)&stats_flags)[1]);
4851
4852         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4853                ((u32 *)&stats_flags)[0]);
4854         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4855                ((u32 *)&stats_flags)[1]);
4856
4857         REG_WR(bp, BAR_XSTRORM_INTMEM +
4858                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4859                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4860         REG_WR(bp, BAR_XSTRORM_INTMEM +
4861                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4862                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4863
4864         REG_WR(bp, BAR_TSTRORM_INTMEM +
4865                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4866                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4867         REG_WR(bp, BAR_TSTRORM_INTMEM +
4868                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4869                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4870
4871         REG_WR(bp, BAR_USTRORM_INTMEM +
4872                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4873                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4874         REG_WR(bp, BAR_USTRORM_INTMEM +
4875                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4876                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4877
4878         if (CHIP_IS_E1H(bp)) {
4879                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4880                         IS_E1HMF(bp));
4881                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4882                         IS_E1HMF(bp));
4883                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4884                         IS_E1HMF(bp));
4885                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4886                         IS_E1HMF(bp));
4887
4888                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4889                          bp->e1hov);
4890         }
4891
4892         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4893         max_agg_size =
4894                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4895                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4896                     (u32)0xffff);
4897         for_each_rx_queue(bp, i) {
4898                 struct bnx2x_fastpath *fp = &bp->fp[i];
4899
4900                 REG_WR(bp, BAR_USTRORM_INTMEM +
4901                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4902                        U64_LO(fp->rx_comp_mapping));
4903                 REG_WR(bp, BAR_USTRORM_INTMEM +
4904                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4905                        U64_HI(fp->rx_comp_mapping));
4906
4907                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4908                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4909                          max_agg_size);
4910         }
4911
4912         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4913
4914         /* Init rate shaping and fairness contexts */
4915         if (IS_E1HMF(bp)) {
4916                 int vn;
4917
4918                 /* During init there is no active link
4919                    Until link is up, set link rate to 10Gbps */
4920                 bp->link_vars.line_speed = SPEED_10000;
4921                 bnx2x_init_port_minmax(bp);
4922
4923                 bnx2x_calc_vn_weight_sum(bp);
4924
4925                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4926                         bnx2x_init_vn_minmax(bp, 2*vn + port);
4927
4928                 /* Enable rate shaping and fairness */
4929                 bp->cmng.flags.cmng_enables =
4930                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4931                 if (bp->vn_weight_sum)
4932                         bp->cmng.flags.cmng_enables |=
4933                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
4934                 else
4935                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
4936                            "  fairness will be disabled\n");
4937         } else {
4938                 /* rate shaping and fairness are disabled */
4939                 DP(NETIF_MSG_IFUP,
4940                    "single function mode  minmax will be disabled\n");
4941         }
4942
4943
4944         /* Store it to internal memory */
4945         if (bp->port.pmf)
4946                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4947                         REG_WR(bp, BAR_XSTRORM_INTMEM +
4948                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4949                                ((u32 *)(&bp->cmng))[i]);
4950 }
4951
4952 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4953 {
4954         switch (load_code) {
4955         case FW_MSG_CODE_DRV_LOAD_COMMON:
4956                 bnx2x_init_internal_common(bp);
4957                 /* no break */
4958
4959         case FW_MSG_CODE_DRV_LOAD_PORT:
4960                 bnx2x_init_internal_port(bp);
4961                 /* no break */
4962
4963         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4964                 bnx2x_init_internal_func(bp);
4965                 break;
4966
4967         default:
4968                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4969                 break;
4970         }
4971 }
4972
4973 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4974 {
4975         int i;
4976
4977         for_each_queue(bp, i) {
4978                 struct bnx2x_fastpath *fp = &bp->fp[i];
4979
4980                 fp->bp = bp;
4981                 fp->state = BNX2X_FP_STATE_CLOSED;
4982                 fp->index = i;
4983                 fp->cl_id = BP_L_ID(bp) + i;
4984                 fp->sb_id = fp->cl_id;
4985                 DP(NETIF_MSG_IFUP,
4986                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4987                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4988                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4989                               FP_SB_ID(fp));
4990                 bnx2x_update_fpsb_idx(fp);
4991         }
4992
4993         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4994                           DEF_SB_ID);
4995         bnx2x_update_dsb_idx(bp);
4996         bnx2x_update_coalesce(bp);
4997         bnx2x_init_rx_rings(bp);
4998         bnx2x_init_tx_ring(bp);
4999         bnx2x_init_sp_ring(bp);
5000         bnx2x_init_context(bp);
5001         bnx2x_init_internal(bp, load_code);
5002         bnx2x_init_ind_table(bp);
5003         bnx2x_stats_init(bp);
5004
5005         /* At this point, we are ready for interrupts */
5006         atomic_set(&bp->intr_sem, 0);
5007
5008         /* flush all before enabling interrupts */
5009         mb();
5010         mmiowb();
5011
5012         bnx2x_int_enable(bp);
5013 }
5014
5015 /* end of nic init */
5016
5017 /*
5018  * gzip service functions
5019  */
5020
5021 static int bnx2x_gunzip_init(struct bnx2x *bp)
5022 {
5023         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5024                                               &bp->gunzip_mapping);
5025         if (bp->gunzip_buf  == NULL)
5026                 goto gunzip_nomem1;
5027
5028         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5029         if (bp->strm  == NULL)
5030                 goto gunzip_nomem2;
5031
5032         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5033                                       GFP_KERNEL);
5034         if (bp->strm->workspace == NULL)
5035                 goto gunzip_nomem3;
5036
5037         return 0;
5038
5039 gunzip_nomem3:
5040         kfree(bp->strm);
5041         bp->strm = NULL;
5042
5043 gunzip_nomem2:
5044         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5045                             bp->gunzip_mapping);
5046         bp->gunzip_buf = NULL;
5047
5048 gunzip_nomem1:
5049         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5050                " un-compression\n", bp->dev->name);
5051         return -ENOMEM;
5052 }
5053
5054 static void bnx2x_gunzip_end(struct bnx2x *bp)
5055 {
5056         kfree(bp->strm->workspace);
5057
5058         kfree(bp->strm);
5059         bp->strm = NULL;
5060
5061         if (bp->gunzip_buf) {
5062                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5063                                     bp->gunzip_mapping);
5064                 bp->gunzip_buf = NULL;
5065         }
5066 }
5067
5068 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5069 {
5070         int n, rc;
5071
5072         /* check gzip header */
5073         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5074                 return -EINVAL;
5075
5076         n = 10;
5077
5078 #define FNAME                           0x8
5079
5080         if (zbuf[3] & FNAME)
5081                 while ((zbuf[n++] != 0) && (n < len));
5082
5083         bp->strm->next_in = zbuf + n;
5084         bp->strm->avail_in = len - n;
5085         bp->strm->next_out = bp->gunzip_buf;
5086         bp->strm->avail_out = FW_BUF_SIZE;
5087
5088         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5089         if (rc != Z_OK)
5090                 return rc;
5091
5092         rc = zlib_inflate(bp->strm, Z_FINISH);
5093         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5094                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5095                        bp->dev->name, bp->strm->msg);
5096
5097         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5098         if (bp->gunzip_outlen & 0x3)
5099                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5100                                     " gunzip_outlen (%d) not aligned\n",
5101                        bp->dev->name, bp->gunzip_outlen);
5102         bp->gunzip_outlen >>= 2;
5103
5104         zlib_inflateEnd(bp->strm);
5105
5106         if (rc == Z_STREAM_END)
5107                 return 0;
5108
5109         return rc;
5110 }
5111
5112 /* nic load/unload */
5113
5114 /*
5115  * General service functions
5116  */
5117
5118 /* send a NIG loopback debug packet */
5119 static void bnx2x_lb_pckt(struct bnx2x *bp)
5120 {
5121         u32 wb_write[3];
5122
5123         /* Ethernet source and destination addresses */
5124         wb_write[0] = 0x55555555;
5125         wb_write[1] = 0x55555555;
5126         wb_write[2] = 0x20;             /* SOP */
5127         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5128
5129         /* NON-IP protocol */
5130         wb_write[0] = 0x09000000;
5131         wb_write[1] = 0x55555555;
5132         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5133         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5134 }
5135
5136 /* some of the internal memories
5137  * are not directly readable from the driver
5138  * to test them we send debug packets
5139  */
5140 static int bnx2x_int_mem_test(struct bnx2x *bp)
5141 {
5142         int factor;
5143         int count, i;
5144         u32 val = 0;
5145
5146         if (CHIP_REV_IS_FPGA(bp))
5147                 factor = 120;
5148         else if (CHIP_REV_IS_EMUL(bp))
5149                 factor = 200;
5150         else
5151                 factor = 1;
5152
5153         DP(NETIF_MSG_HW, "start part1\n");
5154
5155         /* Disable inputs of parser neighbor blocks */
5156         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5157         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5158         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5159         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5160
5161         /*  Write 0 to parser credits for CFC search request */
5162         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5163
5164         /* send Ethernet packet */
5165         bnx2x_lb_pckt(bp);
5166
5167         /* TODO do i reset NIG statistic? */
5168         /* Wait until NIG register shows 1 packet of size 0x10 */
5169         count = 1000 * factor;
5170         while (count) {
5171
5172                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5173                 val = *bnx2x_sp(bp, wb_data[0]);
5174                 if (val == 0x10)
5175                         break;
5176
5177                 msleep(10);
5178                 count--;
5179         }
5180         if (val != 0x10) {
5181                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5182                 return -1;
5183         }
5184
5185         /* Wait until PRS register shows 1 packet */
5186         count = 1000 * factor;
5187         while (count) {
5188                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5189                 if (val == 1)
5190                         break;
5191
5192                 msleep(10);
5193                 count--;
5194         }
5195         if (val != 0x1) {
5196                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5197                 return -2;
5198         }
5199
5200         /* Reset and init BRB, PRS */
5201         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5202         msleep(50);
5203         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5204         msleep(50);
5205         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5206         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5207
5208         DP(NETIF_MSG_HW, "part2\n");
5209
5210         /* Disable inputs of parser neighbor blocks */
5211         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5212         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5213         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5214         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5215
5216         /* Write 0 to parser credits for CFC search request */
5217         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5218
5219         /* send 10 Ethernet packets */
5220         for (i = 0; i < 10; i++)
5221                 bnx2x_lb_pckt(bp);
5222
5223         /* Wait until NIG register shows 10 + 1
5224            packets of size 11*0x10 = 0xb0 */
5225         count = 1000 * factor;
5226         while (count) {
5227
5228                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5229                 val = *bnx2x_sp(bp, wb_data[0]);
5230                 if (val == 0xb0)
5231                         break;
5232
5233                 msleep(10);
5234                 count--;
5235         }
5236         if (val != 0xb0) {
5237                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5238                 return -3;
5239         }
5240
5241         /* Wait until PRS register shows 2 packets */
5242         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5243         if (val != 2)
5244                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5245
5246         /* Write 1 to parser credits for CFC search request */
5247         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5248
5249         /* Wait until PRS register shows 3 packets */
5250         msleep(10 * factor);
5251         /* Wait until NIG register shows 1 packet of size 0x10 */
5252         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5253         if (val != 3)
5254                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5255
5256         /* clear NIG EOP FIFO */
5257         for (i = 0; i < 11; i++)
5258                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5259         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5260         if (val != 1) {
5261                 BNX2X_ERR("clear of NIG failed\n");
5262                 return -4;
5263         }
5264
5265         /* Reset and init BRB, PRS, NIG */
5266         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5267         msleep(50);
5268         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5269         msleep(50);
5270         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5271         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5272 #ifndef BCM_ISCSI
5273         /* set NIC mode */
5274         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5275 #endif
5276
5277         /* Enable inputs of parser neighbor blocks */
5278         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5279         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5280         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5281         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5282
5283         DP(NETIF_MSG_HW, "done\n");
5284
5285         return 0; /* OK */
5286 }
5287
5288 static void enable_blocks_attention(struct bnx2x *bp)
5289 {
5290         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5291         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5292         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5293         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5294         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5295         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5296         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5297         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5298         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5299 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5300 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5301         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5302         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5303         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5304 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5305 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5306         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5307         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5308         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5309         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5310 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5311 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5312         if (CHIP_REV_IS_FPGA(bp))
5313                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5314         else
5315                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5316         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5317         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5318         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5319 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5320 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5321         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5322         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5323 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5324         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5325 }
5326
5327
5328 static void bnx2x_reset_common(struct bnx2x *bp)
5329 {
5330         /* reset_common */
5331         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5332                0xd3ffff7f);
5333         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5334 }
5335
5336 static int bnx2x_init_common(struct bnx2x *bp)
5337 {
5338         u32 val, i;
5339
5340         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5341
5342         bnx2x_reset_common(bp);
5343         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5344         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5345
5346         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5347         if (CHIP_IS_E1H(bp))
5348                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5349
5350         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5351         msleep(30);
5352         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5353
5354         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5355         if (CHIP_IS_E1(bp)) {
5356                 /* enable HW interrupt from PXP on USDM overflow
5357                    bit 16 on INT_MASK_0 */
5358                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5359         }
5360
5361         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5362         bnx2x_init_pxp(bp);
5363
5364 #ifdef __BIG_ENDIAN
5365         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5366         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5367         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5368         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5369         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5370         /* make sure this value is 0 */
5371         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5372
5373 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5374         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5375         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5376         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5377         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5378 #endif
5379
5380         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5381 #ifdef BCM_ISCSI
5382         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5383         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5384         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5385 #endif
5386
5387         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5388                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5389
5390         /* let the HW do it's magic ... */
5391         msleep(100);
5392         /* finish PXP init */
5393         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5394         if (val != 1) {
5395                 BNX2X_ERR("PXP2 CFG failed\n");
5396                 return -EBUSY;
5397         }
5398         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5399         if (val != 1) {
5400                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5401                 return -EBUSY;
5402         }
5403
5404         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5405         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5406
5407         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5408
5409         /* clean the DMAE memory */
5410         bp->dmae_ready = 1;
5411         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5412
5413         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5414         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5415         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5416         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5417
5418         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5419         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5420         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5421         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5422
5423         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5424         /* soft reset pulse */
5425         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5426         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5427
5428 #ifdef BCM_ISCSI
5429         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5430 #endif
5431
5432         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5433         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5434         if (!CHIP_REV_IS_SLOW(bp)) {
5435                 /* enable hw interrupt from doorbell Q */
5436                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5437         }
5438
5439         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5440         if (CHIP_REV_IS_SLOW(bp)) {
5441                 /* fix for emulation and FPGA for no pause */
5442                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5443                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5444                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5445                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5446         }
5447
5448         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5449         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5450         /* set NIC mode */
5451         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5452         if (CHIP_IS_E1H(bp))
5453                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5454
5455         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5456         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5457         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5458         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5459
5460         if (CHIP_IS_E1H(bp)) {
5461                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5462                                 STORM_INTMEM_SIZE_E1H/2);
5463                 bnx2x_init_fill(bp,
5464                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5465                                 0, STORM_INTMEM_SIZE_E1H/2);
5466                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5467                                 STORM_INTMEM_SIZE_E1H/2);
5468                 bnx2x_init_fill(bp,
5469                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5470                                 0, STORM_INTMEM_SIZE_E1H/2);
5471                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5472                                 STORM_INTMEM_SIZE_E1H/2);
5473                 bnx2x_init_fill(bp,
5474                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5475                                 0, STORM_INTMEM_SIZE_E1H/2);
5476                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5477                                 STORM_INTMEM_SIZE_E1H/2);
5478                 bnx2x_init_fill(bp,
5479                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5480                                 0, STORM_INTMEM_SIZE_E1H/2);
5481         } else { /* E1 */
5482                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5483                                 STORM_INTMEM_SIZE_E1);
5484                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5485                                 STORM_INTMEM_SIZE_E1);
5486                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5487                                 STORM_INTMEM_SIZE_E1);
5488                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5489                                 STORM_INTMEM_SIZE_E1);
5490         }
5491
5492         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5493         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5494         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5495         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5496
5497         /* sync semi rtc */
5498         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5499                0x80000000);
5500         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5501                0x80000000);
5502
5503         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5504         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5505         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5506
5507         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5508         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5509                 REG_WR(bp, i, 0xc0cac01a);
5510                 /* TODO: replace with something meaningful */
5511         }
5512         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5513         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5514
5515         if (sizeof(union cdu_context) != 1024)
5516                 /* we currently assume that a context is 1024 bytes */
5517                 printk(KERN_ALERT PFX "please adjust the size of"
5518                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5519
5520         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5521         val = (4 << 24) + (0 << 12) + 1024;
5522         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5523         if (CHIP_IS_E1(bp)) {
5524                 /* !!! fix pxp client crdit until excel update */
5525                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5526                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5527         }
5528
5529         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5530         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5531         /* enable context validation interrupt from CFC */
5532         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5533
5534         /* set the thresholds to prevent CFC/CDU race */
5535         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5536
5537         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5538         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5539
5540         /* PXPCS COMMON comes here */
5541         /* Reset PCIE errors for debug */
5542         REG_WR(bp, 0x2814, 0xffffffff);
5543         REG_WR(bp, 0x3820, 0xffffffff);
5544
5545         /* EMAC0 COMMON comes here */
5546         /* EMAC1 COMMON comes here */
5547         /* DBU COMMON comes here */
5548         /* DBG COMMON comes here */
5549
5550         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5551         if (CHIP_IS_E1H(bp)) {
5552                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5553                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5554         }
5555
5556         if (CHIP_REV_IS_SLOW(bp))
5557                 msleep(200);
5558
5559         /* finish CFC init */
5560         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5561         if (val != 1) {
5562                 BNX2X_ERR("CFC LL_INIT failed\n");
5563                 return -EBUSY;
5564         }
5565         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5566         if (val != 1) {
5567                 BNX2X_ERR("CFC AC_INIT failed\n");
5568                 return -EBUSY;
5569         }
5570         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5571         if (val != 1) {
5572                 BNX2X_ERR("CFC CAM_INIT failed\n");
5573                 return -EBUSY;
5574         }
5575         REG_WR(bp, CFC_REG_DEBUG0, 0);
5576
5577         /* read NIG statistic
5578            to see if this is our first up since powerup */
5579         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5580         val = *bnx2x_sp(bp, wb_data[0]);
5581
5582         /* do internal memory self test */
5583         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5584                 BNX2X_ERR("internal mem self test failed\n");
5585                 return -EBUSY;
5586         }
5587
5588         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5589         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5590         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5591                 /* Fan failure is indicated by SPIO 5 */
5592                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5593                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5594
5595                 /* set to active low mode */
5596                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5597                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5598                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5599                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5600
5601                 /* enable interrupt to signal the IGU */
5602                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5603                 val |= (1 << MISC_REGISTERS_SPIO_5);
5604                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5605                 break;
5606
5607         default:
5608                 break;
5609         }
5610
5611         /* clear PXP2 attentions */
5612         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5613
5614         enable_blocks_attention(bp);
5615
5616         if (!BP_NOMCP(bp)) {
5617                 bnx2x_acquire_phy_lock(bp);
5618                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5619                 bnx2x_release_phy_lock(bp);
5620         } else
5621                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5622
5623         return 0;
5624 }
5625
5626 static int bnx2x_init_port(struct bnx2x *bp)
5627 {
5628         int port = BP_PORT(bp);
5629         u32 val;
5630
5631         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5632
5633         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5634
5635         /* Port PXP comes here */
5636         /* Port PXP2 comes here */
5637 #ifdef BCM_ISCSI
5638         /* Port0  1
5639          * Port1  385 */
5640         i++;
5641         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5642         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5643         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5644         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5645
5646         /* Port0  2
5647          * Port1  386 */
5648         i++;
5649         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5650         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5651         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5652         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5653
5654         /* Port0  3
5655          * Port1  387 */
5656         i++;
5657         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5658         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5659         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5660         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5661 #endif
5662         /* Port CMs come here */
5663         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5664                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5665
5666         /* Port QM comes here */
5667 #ifdef BCM_ISCSI
5668         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5669         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5670
5671         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5672                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5673 #endif
5674         /* Port DQ comes here */
5675         /* Port BRB1 comes here */
5676         /* Port PRS comes here */
5677         /* Port TSDM comes here */
5678         /* Port CSDM comes here */
5679         /* Port USDM comes here */
5680         /* Port XSDM comes here */
5681         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5682                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5683         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5684                              port ? USEM_PORT1_END : USEM_PORT0_END);
5685         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5686                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5687         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5688                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5689         /* Port UPB comes here */
5690         /* Port XPB comes here */
5691
5692         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5693                              port ? PBF_PORT1_END : PBF_PORT0_END);
5694
5695         /* configure PBF to work without PAUSE mtu 9000 */
5696         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5697
5698         /* update threshold */
5699         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5700         /* update init credit */
5701         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5702
5703         /* probe changes */
5704         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5705         msleep(5);
5706         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5707
5708 #ifdef BCM_ISCSI
5709         /* tell the searcher where the T2 table is */
5710         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5711
5712         wb_write[0] = U64_LO(bp->t2_mapping);
5713         wb_write[1] = U64_HI(bp->t2_mapping);
5714         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5715         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5716         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5717         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5718
5719         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5720         /* Port SRCH comes here */
5721 #endif
5722         /* Port CDU comes here */
5723         /* Port CFC comes here */
5724
5725         if (CHIP_IS_E1(bp)) {
5726                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5727                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5728         }
5729         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5730                              port ? HC_PORT1_END : HC_PORT0_END);
5731
5732         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5733                                     MISC_AEU_PORT0_START,
5734                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5735         /* init aeu_mask_attn_func_0/1:
5736          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5737          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5738          *             bits 4-7 are used for "per vn group attention" */
5739         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5740                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5741
5742         /* Port PXPCS comes here */
5743         /* Port EMAC0 comes here */
5744         /* Port EMAC1 comes here */
5745         /* Port DBU comes here */
5746         /* Port DBG comes here */
5747         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5748                              port ? NIG_PORT1_END : NIG_PORT0_END);
5749
5750         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5751
5752         if (CHIP_IS_E1H(bp)) {
5753                 /* 0x2 disable e1hov, 0x1 enable */
5754                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5755                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5756
5757         }
5758
5759         /* Port MCP comes here */
5760         /* Port DMAE comes here */
5761
5762         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_M