bnx2x: Spelling mistakes
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = RX_SGE(fp->rx_sge_prod);
559                 end = RX_SGE(fp->last_max_sge);
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         /* prevent the HW from sending interrupts */
661         bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       BCM_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         struct page *sge;
1207         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208         u32 i, frag_len, frag_size, pages;
1209         int err;
1210         int j;
1211
1212         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215         /* This is needed in order to enable forwarding support */
1216         if (frag_size)
1217                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218                                                max(frag_size, (u32)len_on_bd));
1219
1220 #ifdef BNX2X_STOP_ON_ERROR
1221         if (pages > 8*PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 sge = rx_pg->page;
1240                 old_rx_pg = *rx_pg;
1241
1242                 /* If we fail to allocate a substitute page, we simply stop
1243                    where we are and drop the whole packet */
1244                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245                 if (unlikely(err)) {
1246                         bp->eth_stats.rx_skb_alloc_failed++;
1247                         return err;
1248                 }
1249
1250                 /* Unmap the page as we r going to pass it to the stack */
1251                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254                 /* Add one frag and update the appropriate fields in the skb */
1255                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257                 skb->data_len += frag_len;
1258                 skb->truesize += frag_len;
1259                 skb->len += frag_len;
1260
1261                 frag_size -= frag_len;
1262         }
1263
1264         return 0;
1265 }
1266
1267 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269                            u16 cqe_idx)
1270 {
1271         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272         struct sk_buff *skb = rx_buf->skb;
1273         /* alloc new skb */
1274         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276         /* Unmap skb in the pool anyway, as we are going to change
1277            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278            fails. */
1279         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1281
1282         if (likely(new_skb)) {
1283                 /* fix ip xsum and give it to the stack */
1284                 /* (no need to map the new skb) */
1285
1286                 prefetch(skb);
1287                 prefetch(((char *)(skb)) + 128);
1288
1289 #ifdef BNX2X_STOP_ON_ERROR
1290                 if (pad + len > bp->rx_buf_size) {
1291                         BNX2X_ERR("skb_put is about to fail...  "
1292                                   "pad %d  len %d  rx_buf_size %d\n",
1293                                   pad, len, bp->rx_buf_size);
1294                         bnx2x_panic();
1295                         return;
1296                 }
1297 #endif
1298
1299                 skb_reserve(skb, pad);
1300                 skb_put(skb, len);
1301
1302                 skb->protocol = eth_type_trans(skb, bp->dev);
1303                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305                 {
1306                         struct iphdr *iph;
1307
1308                         iph = (struct iphdr *)skb->data;
1309                         iph->check = 0;
1310                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311                 }
1312
1313                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314                                          &cqe->fast_path_cqe, cqe_idx)) {
1315 #ifdef BCM_VLAN
1316                         if ((bp->vlgrp != NULL) &&
1317                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318                              PARSING_FLAGS_VLAN))
1319                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320                                                 le16_to_cpu(cqe->fast_path_cqe.
1321                                                             vlan_tag));
1322                         else
1323 #endif
1324                                 netif_receive_skb(skb);
1325                 } else {
1326                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327                            " - dropping packet!\n");
1328                         dev_kfree_skb(skb);
1329                 }
1330
1331                 bp->dev->last_rx = jiffies;
1332
1333                 /* put new skb in bin */
1334                 fp->tpa_pool[queue].skb = new_skb;
1335
1336         } else {
1337                 /* else drop the packet and keep the buffer in the bin */
1338                 DP(NETIF_MSG_RX_STATUS,
1339                    "Failed to allocate new skb - dropping packet!\n");
1340                 bp->eth_stats.rx_skb_alloc_failed++;
1341         }
1342
1343         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344 }
1345
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347                                         struct bnx2x_fastpath *fp,
1348                                         u16 bd_prod, u16 rx_comp_prod,
1349                                         u16 rx_sge_prod)
1350 {
1351         struct tstorm_eth_rx_producers rx_prods = {0};
1352         int i;
1353
1354         /* Update producers */
1355         rx_prods.bd_prod = bd_prod;
1356         rx_prods.cqe_prod = rx_comp_prod;
1357         rx_prods.sge_prod = rx_sge_prod;
1358
1359         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1360                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1361                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1362                        ((u32 *)&rx_prods)[i]);
1363
1364         DP(NETIF_MSG_RX_STATUS,
1365            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1366            bd_prod, rx_comp_prod, rx_sge_prod);
1367 }
1368
1369 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370 {
1371         struct bnx2x *bp = fp->bp;
1372         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1373         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1374         int rx_pkt = 0;
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (unlikely(bp->panic))
1378                 return 0;
1379 #endif
1380
1381         /* CQ "next element" is of the size of the regular element,
1382            that's why it's ok here */
1383         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1384         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1385                 hw_comp_cons++;
1386
1387         bd_cons = fp->rx_bd_cons;
1388         bd_prod = fp->rx_bd_prod;
1389         bd_prod_fw = bd_prod;
1390         sw_comp_cons = fp->rx_comp_cons;
1391         sw_comp_prod = fp->rx_comp_prod;
1392
1393         /* Memory barrier necessary as speculative reads of the rx
1394          * buffer can be ahead of the index in the status block
1395          */
1396         rmb();
1397
1398         DP(NETIF_MSG_RX_STATUS,
1399            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1400            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1401
1402         while (sw_comp_cons != hw_comp_cons) {
1403                 struct sw_rx_bd *rx_buf = NULL;
1404                 struct sk_buff *skb;
1405                 union eth_rx_cqe *cqe;
1406                 u8 cqe_fp_flags;
1407                 u16 len, pad;
1408
1409                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1410                 bd_prod = RX_BD(bd_prod);
1411                 bd_cons = RX_BD(bd_cons);
1412
1413                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1414                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1415
1416                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1417                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1418                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1419                    cqe->fast_path_cqe.rss_hash_result,
1420                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1421                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1422
1423                 /* is this a slowpath msg? */
1424                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1425                         bnx2x_sp_event(fp, cqe);
1426                         goto next_cqe;
1427
1428                 /* this is an rx packet */
1429                 } else {
1430                         rx_buf = &fp->rx_buf_ring[bd_cons];
1431                         skb = rx_buf->skb;
1432                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1433                         pad = cqe->fast_path_cqe.placement_offset;
1434
1435                         /* If CQE is marked both TPA_START and TPA_END
1436                            it is a non-TPA CQE */
1437                         if ((!fp->disable_tpa) &&
1438                             (TPA_TYPE(cqe_fp_flags) !=
1439                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1440                                 u16 queue = cqe->fast_path_cqe.queue_index;
1441
1442                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1443                                         DP(NETIF_MSG_RX_STATUS,
1444                                            "calling tpa_start on queue %d\n",
1445                                            queue);
1446
1447                                         bnx2x_tpa_start(fp, queue, skb,
1448                                                         bd_cons, bd_prod);
1449                                         goto next_rx;
1450                                 }
1451
1452                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1453                                         DP(NETIF_MSG_RX_STATUS,
1454                                            "calling tpa_stop on queue %d\n",
1455                                            queue);
1456
1457                                         if (!BNX2X_RX_SUM_FIX(cqe))
1458                                                 BNX2X_ERR("STOP on none TCP "
1459                                                           "data\n");
1460
1461                                         /* This is a size of the linear data
1462                                            on this skb */
1463                                         len = le16_to_cpu(cqe->fast_path_cqe.
1464                                                                 len_on_bd);
1465                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1466                                                     len, cqe, comp_ring_cons);
1467 #ifdef BNX2X_STOP_ON_ERROR
1468                                         if (bp->panic)
1469                                                 return -EINVAL;
1470 #endif
1471
1472                                         bnx2x_update_sge_prod(fp,
1473                                                         &cqe->fast_path_cqe);
1474                                         goto next_cqe;
1475                                 }
1476                         }
1477
1478                         pci_dma_sync_single_for_device(bp->pdev,
1479                                         pci_unmap_addr(rx_buf, mapping),
1480                                                        pad + RX_COPY_THRESH,
1481                                                        PCI_DMA_FROMDEVICE);
1482                         prefetch(skb);
1483                         prefetch(((char *)(skb)) + 128);
1484
1485                         /* is this an error packet? */
1486                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1487                                 DP(NETIF_MSG_RX_ERR,
1488                                    "ERROR  flags %x  rx packet %u\n",
1489                                    cqe_fp_flags, sw_comp_cons);
1490                                 bp->eth_stats.rx_err_discard_pkt++;
1491                                 goto reuse_rx;
1492                         }
1493
1494                         /* Since we don't have a jumbo ring
1495                          * copy small packets if mtu > 1500
1496                          */
1497                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1498                             (len <= RX_COPY_THRESH)) {
1499                                 struct sk_buff *new_skb;
1500
1501                                 new_skb = netdev_alloc_skb(bp->dev,
1502                                                            len + pad);
1503                                 if (new_skb == NULL) {
1504                                         DP(NETIF_MSG_RX_ERR,
1505                                            "ERROR  packet dropped "
1506                                            "because of alloc failure\n");
1507                                         bp->eth_stats.rx_skb_alloc_failed++;
1508                                         goto reuse_rx;
1509                                 }
1510
1511                                 /* aligned copy */
1512                                 skb_copy_from_linear_data_offset(skb, pad,
1513                                                     new_skb->data + pad, len);
1514                                 skb_reserve(new_skb, pad);
1515                                 skb_put(new_skb, len);
1516
1517                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1518
1519                                 skb = new_skb;
1520
1521                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1522                                 pci_unmap_single(bp->pdev,
1523                                         pci_unmap_addr(rx_buf, mapping),
1524                                                  bp->rx_buf_use_size,
1525                                                  PCI_DMA_FROMDEVICE);
1526                                 skb_reserve(skb, pad);
1527                                 skb_put(skb, len);
1528
1529                         } else {
1530                                 DP(NETIF_MSG_RX_ERR,
1531                                    "ERROR  packet dropped because "
1532                                    "of alloc failure\n");
1533                                 bp->eth_stats.rx_skb_alloc_failed++;
1534 reuse_rx:
1535                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1536                                 goto next_rx;
1537                         }
1538
1539                         skb->protocol = eth_type_trans(skb, bp->dev);
1540
1541                         skb->ip_summed = CHECKSUM_NONE;
1542                         if (bp->rx_csum) {
1543                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1544                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1545                                 else
1546                                         bp->eth_stats.hw_csum_err++;
1547                         }
1548                 }
1549
1550 #ifdef BCM_VLAN
1551                 if ((bp->vlgrp != NULL) &&
1552                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1553                      PARSING_FLAGS_VLAN))
1554                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1555                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1556                 else
1557 #endif
1558                         netif_receive_skb(skb);
1559
1560                 bp->dev->last_rx = jiffies;
1561
1562 next_rx:
1563                 rx_buf->skb = NULL;
1564
1565                 bd_cons = NEXT_RX_IDX(bd_cons);
1566                 bd_prod = NEXT_RX_IDX(bd_prod);
1567                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1568                 rx_pkt++;
1569 next_cqe:
1570                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1571                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572
1573                 if (rx_pkt == budget)
1574                         break;
1575         } /* while */
1576
1577         fp->rx_bd_cons = bd_cons;
1578         fp->rx_bd_prod = bd_prod_fw;
1579         fp->rx_comp_cons = sw_comp_cons;
1580         fp->rx_comp_prod = sw_comp_prod;
1581
1582         /* Update producers */
1583         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584                              fp->rx_sge_prod);
1585         mmiowb(); /* keep prod updates ordered */
1586
1587         fp->rx_pkt += rx_pkt;
1588         fp->rx_calls++;
1589
1590         return rx_pkt;
1591 }
1592
1593 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 {
1595         struct bnx2x_fastpath *fp = fp_cookie;
1596         struct bnx2x *bp = fp->bp;
1597         struct net_device *dev = bp->dev;
1598         int index = FP_IDX(fp);
1599
1600         /* Return here if interrupt is disabled */
1601         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603                 return IRQ_HANDLED;
1604         }
1605
1606         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1607            index, FP_SB_ID(fp));
1608         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1609
1610 #ifdef BNX2X_STOP_ON_ERROR
1611         if (unlikely(bp->panic))
1612                 return IRQ_HANDLED;
1613 #endif
1614
1615         prefetch(fp->rx_cons_sb);
1616         prefetch(fp->tx_cons_sb);
1617         prefetch(&fp->status_blk->c_status_block.status_block_index);
1618         prefetch(&fp->status_blk->u_status_block.status_block_index);
1619
1620         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1621
1622         return IRQ_HANDLED;
1623 }
1624
1625 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626 {
1627         struct net_device *dev = dev_instance;
1628         struct bnx2x *bp = netdev_priv(dev);
1629         u16 status = bnx2x_ack_int(bp);
1630         u16 mask;
1631
1632         /* Return here if interrupt is shared and it's not for us */
1633         if (unlikely(status == 0)) {
1634                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1635                 return IRQ_NONE;
1636         }
1637         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1638
1639         /* Return here if interrupt is disabled */
1640         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1641                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1642                 return IRQ_HANDLED;
1643         }
1644
1645 #ifdef BNX2X_STOP_ON_ERROR
1646         if (unlikely(bp->panic))
1647                 return IRQ_HANDLED;
1648 #endif
1649
1650         mask = 0x2 << bp->fp[0].sb_id;
1651         if (status & mask) {
1652                 struct bnx2x_fastpath *fp = &bp->fp[0];
1653
1654                 prefetch(fp->rx_cons_sb);
1655                 prefetch(fp->tx_cons_sb);
1656                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1657                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658
1659                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1660
1661                 status &= ~mask;
1662         }
1663
1664
1665         if (unlikely(status & 0x1)) {
1666                 schedule_work(&bp->sp_task);
1667
1668                 status &= ~0x1;
1669                 if (!status)
1670                         return IRQ_HANDLED;
1671         }
1672
1673         if (status)
1674                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1675                    status);
1676
1677         return IRQ_HANDLED;
1678 }
1679
1680 /* end of fast path */
1681
1682 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1683
1684 /* Link */
1685
1686 /*
1687  * General service functions
1688  */
1689
1690 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1691 {
1692         u32 lock_status;
1693         u32 resource_bit = (1 << resource);
1694         int func = BP_FUNC(bp);
1695         u32 hw_lock_control_reg;
1696         int cnt;
1697
1698         /* Validating that the resource is within range */
1699         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1700                 DP(NETIF_MSG_HW,
1701                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1702                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1703                 return -EINVAL;
1704         }
1705
1706         if (func <= 5) {
1707                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708         } else {
1709                 hw_lock_control_reg =
1710                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711         }
1712
1713         /* Validating that the resource is not already taken */
1714         lock_status = REG_RD(bp, hw_lock_control_reg);
1715         if (lock_status & resource_bit) {
1716                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1717                    lock_status, resource_bit);
1718                 return -EEXIST;
1719         }
1720
1721         /* Try for 1 second every 5ms */
1722         for (cnt = 0; cnt < 200; cnt++) {
1723                 /* Try to acquire the lock */
1724                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1725                 lock_status = REG_RD(bp, hw_lock_control_reg);
1726                 if (lock_status & resource_bit)
1727                         return 0;
1728
1729                 msleep(5);
1730         }
1731         DP(NETIF_MSG_HW, "Timeout\n");
1732         return -EAGAIN;
1733 }
1734
1735 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1736 {
1737         u32 lock_status;
1738         u32 resource_bit = (1 << resource);
1739         int func = BP_FUNC(bp);
1740         u32 hw_lock_control_reg;
1741
1742         /* Validating that the resource is within range */
1743         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1744                 DP(NETIF_MSG_HW,
1745                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1746                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1747                 return -EINVAL;
1748         }
1749
1750         if (func <= 5) {
1751                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752         } else {
1753                 hw_lock_control_reg =
1754                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755         }
1756
1757         /* Validating that the resource is currently taken */
1758         lock_status = REG_RD(bp, hw_lock_control_reg);
1759         if (!(lock_status & resource_bit)) {
1760                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1761                    lock_status, resource_bit);
1762                 return -EFAULT;
1763         }
1764
1765         REG_WR(bp, hw_lock_control_reg, resource_bit);
1766         return 0;
1767 }
1768
1769 /* HW Lock for shared dual port PHYs */
1770 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1771 {
1772         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773
1774         mutex_lock(&bp->port.phy_mutex);
1775
1776         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1778                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1779 }
1780
1781 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1782 {
1783         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784
1785         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788
1789         mutex_unlock(&bp->port.phy_mutex);
1790 }
1791
1792 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1793 {
1794         /* The GPIO should be swapped if swap register is set and active */
1795         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1796                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1797         int gpio_shift = gpio_num +
1798                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1799         u32 gpio_mask = (1 << gpio_shift);
1800         u32 gpio_reg;
1801
1802         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1803                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1804                 return -EINVAL;
1805         }
1806
1807         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1808         /* read GPIO and mask except the float bits */
1809         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1810
1811         switch (mode) {
1812         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1813                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1814                    gpio_num, gpio_shift);
1815                 /* clear FLOAT and set CLR */
1816                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1817                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1818                 break;
1819
1820         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1821                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1822                    gpio_num, gpio_shift);
1823                 /* clear FLOAT and set SET */
1824                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1825                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1826                 break;
1827
1828         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1829                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1830                    gpio_num, gpio_shift);
1831                 /* set FLOAT */
1832                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1833                 break;
1834
1835         default:
1836                 break;
1837         }
1838
1839         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1840         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1841
1842         return 0;
1843 }
1844
1845 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 {
1847         u32 spio_mask = (1 << spio_num);
1848         u32 spio_reg;
1849
1850         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1851             (spio_num > MISC_REGISTERS_SPIO_7)) {
1852                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1853                 return -EINVAL;
1854         }
1855
1856         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1857         /* read SPIO and mask except the float bits */
1858         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1859
1860         switch (mode) {
1861         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1862                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1863                 /* clear FLOAT and set CLR */
1864                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1865                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1866                 break;
1867
1868         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1869                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1870                 /* clear FLOAT and set SET */
1871                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1872                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1873                 break;
1874
1875         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1876                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877                 /* set FLOAT */
1878                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1879                 break;
1880
1881         default:
1882                 break;
1883         }
1884
1885         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1886         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887
1888         return 0;
1889 }
1890
1891 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 {
1893         switch (bp->link_vars.ieee_fc) {
1894         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1895                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1896                                           ADVERTISED_Pause);
1897                 break;
1898         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1899                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1900                                          ADVERTISED_Pause);
1901                 break;
1902         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1903                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1904                 break;
1905         default:
1906                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1907                                           ADVERTISED_Pause);
1908                 break;
1909         }
1910 }
1911
1912 static void bnx2x_link_report(struct bnx2x *bp)
1913 {
1914         if (bp->link_vars.link_up) {
1915                 if (bp->state == BNX2X_STATE_OPEN)
1916                         netif_carrier_on(bp->dev);
1917                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918
1919                 printk("%d Mbps ", bp->link_vars.line_speed);
1920
1921                 if (bp->link_vars.duplex == DUPLEX_FULL)
1922                         printk("full duplex");
1923                 else
1924                         printk("half duplex");
1925
1926                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1927                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1928                                 printk(", receive ");
1929                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1930                                         printk("& transmit ");
1931                         } else {
1932                                 printk(", transmit ");
1933                         }
1934                         printk("flow control ON");
1935                 }
1936                 printk("\n");
1937
1938         } else { /* link_down */
1939                 netif_carrier_off(bp->dev);
1940                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1941         }
1942 }
1943
1944 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 {
1946         if (!BP_NOMCP(bp)) {
1947                 u8 rc;
1948
1949                 /* Initialize link parameters structure variables */
1950                 /* It is recommended to turn off RX FC for jumbo frames
1951                    for better performance */
1952                 if (IS_E1HMF(bp))
1953                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954                 else if (bp->dev->mtu > 5000)
1955                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956                 else
1957                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1958
1959                 bnx2x_acquire_phy_lock(bp);
1960                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1961                 bnx2x_release_phy_lock(bp);
1962
1963                 if (bp->link_vars.link_up)
1964                         bnx2x_link_report(bp);
1965
1966                 bnx2x_calc_fc_adv(bp);
1967
1968                 return rc;
1969         }
1970         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971         return -EINVAL;
1972 }
1973
1974 static void bnx2x_link_set(struct bnx2x *bp)
1975 {
1976         if (!BP_NOMCP(bp)) {
1977                 bnx2x_acquire_phy_lock(bp);
1978                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979                 bnx2x_release_phy_lock(bp);
1980
1981                 bnx2x_calc_fc_adv(bp);
1982         } else
1983                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1984 }
1985
1986 static void bnx2x__link_reset(struct bnx2x *bp)
1987 {
1988         if (!BP_NOMCP(bp)) {
1989                 bnx2x_acquire_phy_lock(bp);
1990                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991                 bnx2x_release_phy_lock(bp);
1992         } else
1993                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1994 }
1995
1996 static u8 bnx2x_link_test(struct bnx2x *bp)
1997 {
1998         u8 rc;
1999
2000         bnx2x_acquire_phy_lock(bp);
2001         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2002         bnx2x_release_phy_lock(bp);
2003
2004         return rc;
2005 }
2006
2007 /* Calculates the sum of vn_min_rates.
2008    It's needed for further normalizing of the min_rates.
2009
2010    Returns:
2011      sum of vn_min_rates
2012        or
2013      0 - if all the min_rates are 0.
2014      In the later case fairness algorithm should be deactivated.
2015      If not all min_rates are zero then those that are zeroes will
2016      be set to 1.
2017  */
2018 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 {
2020         int i, port = BP_PORT(bp);
2021         u32 wsum = 0;
2022         int all_zero = 1;
2023
2024         for (i = 0; i < E1HVN_MAX; i++) {
2025                 u32 vn_cfg =
2026                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2027                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2028                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2029                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2030                         /* If min rate is zero - set it to 1 */
2031                         if (!vn_min_rate)
2032                                 vn_min_rate = DEF_MIN_RATE;
2033                         else
2034                                 all_zero = 0;
2035
2036                         wsum += vn_min_rate;
2037                 }
2038         }
2039
2040         /* ... only if all min rates are zeros - disable FAIRNESS */
2041         if (all_zero)
2042                 return 0;
2043
2044         return wsum;
2045 }
2046
2047 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2048                                    int en_fness,
2049                                    u16 port_rate,
2050                                    struct cmng_struct_per_port *m_cmng_port)
2051 {
2052         u32 r_param = port_rate / 8;
2053         int port = BP_PORT(bp);
2054         int i;
2055
2056         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057
2058         /* Enable minmax only if we are in e1hmf mode */
2059         if (IS_E1HMF(bp)) {
2060                 u32 fair_periodic_timeout_usec;
2061                 u32 t_fair;
2062
2063                 /* Enable rate shaping and fairness */
2064                 m_cmng_port->flags.cmng_vn_enable = 1;
2065                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2066                 m_cmng_port->flags.rate_shaping_enable = 1;
2067
2068                 if (!en_fness)
2069                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2070                            "  fairness will be disabled\n");
2071
2072                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2073                 m_cmng_port->rs_vars.rs_periodic_timeout =
2074                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2075
2076                 /* this is the threshold below which no timer arming will occur
2077                    1.25 coefficient is for the threshold to be a little bigger
2078                    than the real time, to compensate for timer in-accuracy */
2079                 m_cmng_port->rs_vars.rs_threshold =
2080                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081
2082                 /* resolution of fairness timer */
2083                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2084                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2085                 t_fair = T_FAIR_COEF / port_rate;
2086
2087                 /* this is the threshold below which we won't arm
2088                    the timer anymore */
2089                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090
2091                 /* we multiply by 1e3/8 to get bytes/msec.
2092                    We don't want the credits to pass a credit
2093                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2094                 m_cmng_port->fair_vars.upper_bound =
2095                                                 r_param * t_fair * FAIR_MEM;
2096                 /* since each tick is 4 usec */
2097                 m_cmng_port->fair_vars.fairness_timeout =
2098                                                 fair_periodic_timeout_usec / 4;
2099
2100         } else {
2101                 /* Disable rate shaping and fairness */
2102                 m_cmng_port->flags.cmng_vn_enable = 0;
2103                 m_cmng_port->flags.fairness_enable = 0;
2104                 m_cmng_port->flags.rate_shaping_enable = 0;
2105
2106                 DP(NETIF_MSG_IFUP,
2107                    "Single function mode  minmax will be disabled\n");
2108         }
2109
2110         /* Store it to internal memory */
2111         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2112                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2113                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2114                        ((u32 *)(m_cmng_port))[i]);
2115 }
2116
2117 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2118                                    u32 wsum, u16 port_rate,
2119                                  struct cmng_struct_per_port *m_cmng_port)
2120 {
2121         struct rate_shaping_vars_per_vn m_rs_vn;
2122         struct fairness_vars_per_vn m_fair_vn;
2123         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2124         u16 vn_min_rate, vn_max_rate;
2125         int i;
2126
2127         /* If function is hidden - set min and max to zeroes */
2128         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2129                 vn_min_rate = 0;
2130                 vn_max_rate = 0;
2131
2132         } else {
2133                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2134                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2135                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2136                    if current min rate is zero - set it to 1.
2137                    This is a requirement of the algorithm. */
2138                 if ((vn_min_rate == 0) && wsum)
2139                         vn_min_rate = DEF_MIN_RATE;
2140                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2141                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2142         }
2143
2144         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2145            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146
2147         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2148         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149
2150         /* global vn counter - maximal Mbps for this vn */
2151         m_rs_vn.vn_counter.rate = vn_max_rate;
2152
2153         /* quota - number of bytes transmitted in this period */
2154         m_rs_vn.vn_counter.quota =
2155                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156
2157 #ifdef BNX2X_PER_PROT_QOS
2158         /* per protocol counter */
2159         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2160                 /* maximal Mbps for this protocol */
2161                 m_rs_vn.protocol_counters[protocol].rate =
2162                                                 protocol_max_rate[protocol];
2163                 /* the quota in each timer period -
2164                    number of bytes transmitted in this period */
2165                 m_rs_vn.protocol_counters[protocol].quota =
2166                         (u32)(rs_periodic_timeout_usec *
2167                           ((double)m_rs_vn.
2168                                    protocol_counters[protocol].rate/8));
2169         }
2170 #endif
2171
2172         if (wsum) {
2173                 /* credit for each period of the fairness algorithm:
2174                    number of bytes in T_FAIR (the vn share the port rate).
2175                    wsum should not be larger than 10000, thus
2176                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2177                 m_fair_vn.vn_credit_delta =
2178                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2179                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2180                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2181                    m_fair_vn.vn_credit_delta);
2182         }
2183
2184 #ifdef BNX2X_PER_PROT_QOS
2185         do {
2186                 u32 protocolWeightSum = 0;
2187
2188                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2189                         protocolWeightSum +=
2190                                         drvInit.protocol_min_rate[protocol];
2191                 /* per protocol counter -
2192                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2193                 if (protocolWeightSum > 0) {
2194                         for (protocol = 0;
2195                              protocol < NUM_OF_PROTOCOLS; protocol++)
2196                                 /* credit for each period of the
2197                                    fairness algorithm - number of bytes in
2198                                    T_FAIR (the protocol share the vn rate) */
2199                                 m_fair_vn.protocol_credit_delta[protocol] =
2200                                         (u32)((vn_min_rate / 8) * t_fair *
2201                                         protocol_min_rate / protocolWeightSum);
2202                 }
2203         } while (0);
2204 #endif
2205
2206         /* Store it to internal memory */
2207         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2208                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2209                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2210                        ((u32 *)(&m_rs_vn))[i]);
2211
2212         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2213                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2215                        ((u32 *)(&m_fair_vn))[i]);
2216 }
2217
2218 /* This function is called upon link interrupt */
2219 static void bnx2x_link_attn(struct bnx2x *bp)
2220 {
2221         int vn;
2222
2223         /* Make sure that we are synced with the current statistics */
2224         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225
2226         bnx2x_acquire_phy_lock(bp);
2227         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2228         bnx2x_release_phy_lock(bp);
2229
2230         if (bp->link_vars.link_up) {
2231
2232                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2233                         struct host_port_stats *pstats;
2234
2235                         pstats = bnx2x_sp(bp, port_stats);
2236                         /* reset old bmac stats */
2237                         memset(&(pstats->mac_stx[0]), 0,
2238                                sizeof(struct mac_stx));
2239                 }
2240                 if ((bp->state == BNX2X_STATE_OPEN) ||
2241                     (bp->state == BNX2X_STATE_DISABLED))
2242                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2243         }
2244
2245         /* indicate link status */
2246         bnx2x_link_report(bp);
2247
2248         if (IS_E1HMF(bp)) {
2249                 int func;
2250
2251                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2252                         if (vn == BP_E1HVN(bp))
2253                                 continue;
2254
2255                         func = ((vn << 1) | BP_PORT(bp));
2256
2257                         /* Set the attention towards other drivers
2258                            on the same port */
2259                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2260                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2261                 }
2262         }
2263
2264         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2265                 struct cmng_struct_per_port m_cmng_port;
2266                 u32 wsum;
2267                 int port = BP_PORT(bp);
2268
2269                 /* Init RATE SHAPING and FAIRNESS contexts */
2270                 wsum = bnx2x_calc_vn_wsum(bp);
2271                 bnx2x_init_port_minmax(bp, (int)wsum,
2272                                         bp->link_vars.line_speed,
2273                                         &m_cmng_port);
2274                 if (IS_E1HMF(bp))
2275                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2276                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2277                                         wsum, bp->link_vars.line_speed,
2278                                                      &m_cmng_port);
2279         }
2280 }
2281
2282 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 {
2284         if (bp->state != BNX2X_STATE_OPEN)
2285                 return;
2286
2287         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288
2289         if (bp->link_vars.link_up)
2290                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291         else
2292                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293
2294         /* indicate link status */
2295         bnx2x_link_report(bp);
2296 }
2297
2298 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 {
2300         int port = BP_PORT(bp);
2301         u32 val;
2302
2303         bp->port.pmf = 1;
2304         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305
2306         /* enable nig attention */
2307         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2308         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2309         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310
2311         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2312 }
2313
2314 /* end of Link */
2315
2316 /* slow path */
2317
2318 /*
2319  * General service functions
2320  */
2321
2322 /* the slow path queue is odd since completions arrive on the fastpath ring */
2323 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2324                          u32 data_hi, u32 data_lo, int common)
2325 {
2326         int func = BP_FUNC(bp);
2327
2328         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2329            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2330            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2331            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2332            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333
2334 #ifdef BNX2X_STOP_ON_ERROR
2335         if (unlikely(bp->panic))
2336                 return -EIO;
2337 #endif
2338
2339         spin_lock_bh(&bp->spq_lock);
2340
2341         if (!bp->spq_left) {
2342                 BNX2X_ERR("BUG! SPQ ring full!\n");
2343                 spin_unlock_bh(&bp->spq_lock);
2344                 bnx2x_panic();
2345                 return -EBUSY;
2346         }
2347
2348         /* CID needs port number to be encoded int it */
2349         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2350                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351                                      HW_CID(bp, cid)));
2352         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353         if (common)
2354                 bp->spq_prod_bd->hdr.type |=
2355                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356
2357         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2358         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2359
2360         bp->spq_left--;
2361
2362         if (bp->spq_prod_bd == bp->spq_last_bd) {
2363                 bp->spq_prod_bd = bp->spq;
2364                 bp->spq_prod_idx = 0;
2365                 DP(NETIF_MSG_TIMER, "end of spq\n");
2366
2367         } else {
2368                 bp->spq_prod_bd++;
2369                 bp->spq_prod_idx++;
2370         }
2371
2372         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2373                bp->spq_prod_idx);
2374
2375         spin_unlock_bh(&bp->spq_lock);
2376         return 0;
2377 }
2378
2379 /* acquire split MCP access lock register */
2380 static int bnx2x_acquire_alr(struct bnx2x *bp)
2381 {
2382         u32 i, j, val;
2383         int rc = 0;
2384
2385         might_sleep();
2386         i = 100;
2387         for (j = 0; j < i*10; j++) {
2388                 val = (1UL << 31);
2389                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2390                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2391                 if (val & (1L << 31))
2392                         break;
2393
2394                 msleep(5);
2395         }
2396         if (!(val & (1L << 31))) {
2397                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2398                 rc = -EBUSY;
2399         }
2400
2401         return rc;
2402 }
2403
2404 /* release split MCP access lock register */
2405 static void bnx2x_release_alr(struct bnx2x *bp)
2406 {
2407         u32 val = 0;
2408
2409         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410 }
2411
2412 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 {
2414         struct host_def_status_block *def_sb = bp->def_status_blk;
2415         u16 rc = 0;
2416
2417         barrier(); /* status block is written to by the chip */
2418         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2419                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2420                 rc |= 1;
2421         }
2422         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2423                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2424                 rc |= 2;
2425         }
2426         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2427                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2428                 rc |= 4;
2429         }
2430         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2431                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2432                 rc |= 8;
2433         }
2434         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2435                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2436                 rc |= 16;
2437         }
2438         return rc;
2439 }
2440
2441 /*
2442  * slow path service functions
2443  */
2444
2445 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 {
2447         int port = BP_PORT(bp);
2448         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2449                        COMMAND_REG_ATTN_BITS_SET);
2450         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2451                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2452         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2453                                        NIG_REG_MASK_INTERRUPT_PORT0;
2454         u32 aeu_mask;
2455
2456         if (bp->attn_state & asserted)
2457                 BNX2X_ERR("IGU ERROR\n");
2458
2459         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460         aeu_mask = REG_RD(bp, aeu_addr);
2461
2462         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2463            aeu_mask, asserted);
2464         aeu_mask &= ~(asserted & 0xff);
2465         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2466
2467         REG_WR(bp, aeu_addr, aeu_mask);
2468         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469
2470         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2471         bp->attn_state |= asserted;
2472         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2473
2474         if (asserted & ATTN_HARD_WIRED_MASK) {
2475                 if (asserted & ATTN_NIG_FOR_FUNC) {
2476
2477                         /* save nig interrupt mask */
2478                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2479                         REG_WR(bp, nig_int_mask_addr, 0);
2480
2481                         bnx2x_link_attn(bp);
2482
2483                         /* handle unicore attn? */
2484                 }
2485                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2486                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487
2488                 if (asserted & GPIO_2_FUNC)
2489                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490
2491                 if (asserted & GPIO_3_FUNC)
2492                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493
2494                 if (asserted & GPIO_4_FUNC)
2495                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2496
2497                 if (port == 0) {
2498                         if (asserted & ATTN_GENERAL_ATTN_1) {
2499                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2500                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501                         }
2502                         if (asserted & ATTN_GENERAL_ATTN_2) {
2503                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2504                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505                         }
2506                         if (asserted & ATTN_GENERAL_ATTN_3) {
2507                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2508                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2509                         }
2510                 } else {
2511                         if (asserted & ATTN_GENERAL_ATTN_4) {
2512                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2513                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514                         }
2515                         if (asserted & ATTN_GENERAL_ATTN_5) {
2516                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2517                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518                         }
2519                         if (asserted & ATTN_GENERAL_ATTN_6) {
2520                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2521                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2522                         }
2523                 }
2524
2525         } /* if hardwired */
2526
2527         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2528            asserted, hc_addr);
2529         REG_WR(bp, hc_addr, asserted);
2530
2531         /* now set back the mask */
2532         if (asserted & ATTN_NIG_FOR_FUNC)
2533                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2534 }
2535
2536 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2537 {
2538         int port = BP_PORT(bp);
2539         int reg_offset;
2540         u32 val;
2541
2542         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2543                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2544
2545         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2546
2547                 val = REG_RD(bp, reg_offset);
2548                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2549                 REG_WR(bp, reg_offset, val);
2550
2551                 BNX2X_ERR("SPIO5 hw attention\n");
2552
2553                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2554                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555                         /* Fan failure attention */
2556
2557                         /* The PHY reset is controlled by GPIO 1 */
2558                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2559                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560                         /* Low power mode is controlled by GPIO 2 */
2561                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2562                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2563                         /* mark the failure */
2564                         bp->link_params.ext_phy_config &=
2565                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2566                         bp->link_params.ext_phy_config |=
2567                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2568                         SHMEM_WR(bp,
2569                                  dev_info.port_hw_config[port].
2570                                                         external_phy_config,
2571                                  bp->link_params.ext_phy_config);
2572                         /* log the failure */
2573                         printk(KERN_ERR PFX "Fan Failure on Network"
2574                                " Controller %s has caused the driver to"
2575                                " shutdown the card to prevent permanent"
2576                                " damage.  Please contact Dell Support for"
2577                                " assistance\n", bp->dev->name);
2578                         break;
2579
2580                 default:
2581                         break;
2582                 }
2583         }
2584
2585         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2586
2587                 val = REG_RD(bp, reg_offset);
2588                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589                 REG_WR(bp, reg_offset, val);
2590
2591                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592                           (attn & HW_INTERRUT_ASSERT_SET_0));
2593                 bnx2x_panic();
2594         }
2595 }
2596
2597 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2598 {
2599         u32 val;
2600
2601         if (attn & BNX2X_DOORQ_ASSERT) {
2602
2603                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605                 /* DORQ discard attention */
2606                 if (val & 0x2)
2607                         BNX2X_ERR("FATAL error from DORQ\n");
2608         }
2609
2610         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2611
2612                 int port = BP_PORT(bp);
2613                 int reg_offset;
2614
2615                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2617
2618                 val = REG_RD(bp, reg_offset);
2619                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620                 REG_WR(bp, reg_offset, val);
2621
2622                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623                           (attn & HW_INTERRUT_ASSERT_SET_1));
2624                 bnx2x_panic();
2625         }
2626 }
2627
2628 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2629 {
2630         u32 val;
2631
2632         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2633
2634                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636                 /* CFC error attention */
2637                 if (val & 0x2)
2638                         BNX2X_ERR("FATAL error from CFC\n");
2639         }
2640
2641         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2642
2643                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645                 /* RQ_USDMDP_FIFO_OVERFLOW */
2646                 if (val & 0x18000)
2647                         BNX2X_ERR("FATAL error from PXP\n");
2648         }
2649
2650         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2651
2652                 int port = BP_PORT(bp);
2653                 int reg_offset;
2654
2655                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2657
2658                 val = REG_RD(bp, reg_offset);
2659                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660                 REG_WR(bp, reg_offset, val);
2661
2662                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663                           (attn & HW_INTERRUT_ASSERT_SET_2));
2664                 bnx2x_panic();
2665         }
2666 }
2667
2668 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2669 {
2670         u32 val;
2671
2672         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2673
2674                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675                         int func = BP_FUNC(bp);
2676
2677                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678                         bnx2x__link_status_update(bp);
2679                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2680                                                         DRV_STATUS_PMF)
2681                                 bnx2x_pmf_update(bp);
2682
2683                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2684
2685                         BNX2X_ERR("MC assert!\n");
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2690                         bnx2x_panic();
2691
2692                 } else if (attn & BNX2X_MCP_ASSERT) {
2693
2694                         BNX2X_ERR("MCP assert!\n");
2695                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2696                         bnx2x_fw_dump(bp);
2697
2698                 } else
2699                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2700         }
2701
2702         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2703                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704                 if (attn & BNX2X_GRC_TIMEOUT) {
2705                         val = CHIP_IS_E1H(bp) ?
2706                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2708                 }
2709                 if (attn & BNX2X_GRC_RSV) {
2710                         val = CHIP_IS_E1H(bp) ?
2711                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2713                 }
2714                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2715         }
2716 }
2717
2718 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2719 {
2720         struct attn_route attn;
2721         struct attn_route group_mask;
2722         int port = BP_PORT(bp);
2723         int index;
2724         u32 reg_addr;
2725         u32 val;
2726         u32 aeu_mask;
2727
2728         /* need to take HW lock because MCP or other port might also
2729            try to handle this event */
2730         bnx2x_acquire_alr(bp);
2731
2732         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2736         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2738
2739         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740                 if (deasserted & (1 << index)) {
2741                         group_mask = bp->attn_group[index];
2742
2743                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744                            index, group_mask.sig[0], group_mask.sig[1],
2745                            group_mask.sig[2], group_mask.sig[3]);
2746
2747                         bnx2x_attn_int_deasserted3(bp,
2748                                         attn.sig[3] & group_mask.sig[3]);
2749                         bnx2x_attn_int_deasserted1(bp,
2750                                         attn.sig[1] & group_mask.sig[1]);
2751                         bnx2x_attn_int_deasserted2(bp,
2752                                         attn.sig[2] & group_mask.sig[2]);
2753                         bnx2x_attn_int_deasserted0(bp,
2754                                         attn.sig[0] & group_mask.sig[0]);
2755
2756                         if ((attn.sig[0] & group_mask.sig[0] &
2757                                                 HW_PRTY_ASSERT_SET_0) ||
2758                             (attn.sig[1] & group_mask.sig[1] &
2759                                                 HW_PRTY_ASSERT_SET_1) ||
2760                             (attn.sig[2] & group_mask.sig[2] &
2761                                                 HW_PRTY_ASSERT_SET_2))
2762                                BNX2X_ERR("FATAL HW block parity attention\n");
2763                 }
2764         }
2765
2766         bnx2x_release_alr(bp);
2767
2768         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2769
2770         val = ~deasserted;
2771         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2772            val, reg_addr);
2773         REG_WR(bp, reg_addr, val);
2774
2775         if (~bp->attn_state & deasserted)
2776                 BNX2X_ERR("IGU ERROR\n");
2777
2778         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2780
2781         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782         aeu_mask = REG_RD(bp, reg_addr);
2783
2784         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2785            aeu_mask, deasserted);
2786         aeu_mask |= (deasserted & 0xff);
2787         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2788
2789         REG_WR(bp, reg_addr, aeu_mask);
2790         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2791
2792         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793         bp->attn_state &= ~deasserted;
2794         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2795 }
2796
2797 static void bnx2x_attn_int(struct bnx2x *bp)
2798 {
2799         /* read local copy of bits */
2800         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802         u32 attn_state = bp->attn_state;
2803
2804         /* look for changed bits */
2805         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2806         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2807
2808         DP(NETIF_MSG_HW,
2809            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2810            attn_bits, attn_ack, asserted, deasserted);
2811
2812         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2813                 BNX2X_ERR("BAD attention state\n");
2814
2815         /* handle bits that were raised */
2816         if (asserted)
2817                 bnx2x_attn_int_asserted(bp, asserted);
2818
2819         if (deasserted)
2820                 bnx2x_attn_int_deasserted(bp, deasserted);
2821 }
2822
2823 static void bnx2x_sp_task(struct work_struct *work)
2824 {
2825         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2826         u16 status;
2827
2828
2829         /* Return here if interrupt is disabled */
2830         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2831                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2832                 return;
2833         }
2834
2835         status = bnx2x_update_dsb_idx(bp);
2836 /*      if (status == 0)                                     */
2837 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2838
2839         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2840
2841         /* HW attentions */
2842         if (status & 0x1)
2843                 bnx2x_attn_int(bp);
2844
2845         /* CStorm events: query_stats, port delete ramrod */
2846         if (status & 0x2)
2847                 bp->stats_pending = 0;
2848
2849         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2850                      IGU_INT_NOP, 1);
2851         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2852                      IGU_INT_NOP, 1);
2853         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2854                      IGU_INT_NOP, 1);
2855         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2856                      IGU_INT_NOP, 1);
2857         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2858                      IGU_INT_ENABLE, 1);
2859
2860 }
2861
2862 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2863 {
2864         struct net_device *dev = dev_instance;
2865         struct bnx2x *bp = netdev_priv(dev);
2866
2867         /* Return here if interrupt is disabled */
2868         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2869                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2870                 return IRQ_HANDLED;
2871         }
2872
2873         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2874
2875 #ifdef BNX2X_STOP_ON_ERROR
2876         if (unlikely(bp->panic))
2877                 return IRQ_HANDLED;
2878 #endif
2879
2880         schedule_work(&bp->sp_task);
2881
2882         return IRQ_HANDLED;
2883 }
2884
2885 /* end of slow path */
2886
2887 /* Statistics */
2888
2889 /****************************************************************************
2890 * Macros
2891 ****************************************************************************/
2892
2893 /* sum[hi:lo] += add[hi:lo] */
2894 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2895         do { \
2896                 s_lo += a_lo; \
2897                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2898         } while (0)
2899
2900 /* difference = minuend - subtrahend */
2901 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2902         do { \
2903                 if (m_lo < s_lo) { \
2904                         /* underflow */ \
2905                         d_hi = m_hi - s_hi; \
2906                         if (d_hi > 0) { \
2907                         /* we can 'loan' 1 */ \
2908                                 d_hi--; \
2909                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2910                         } else { \
2911                         /* m_hi <= s_hi */ \
2912                                 d_hi = 0; \
2913                                 d_lo = 0; \
2914                         } \
2915                 } else { \
2916                         /* m_lo >= s_lo */ \
2917                         if (m_hi < s_hi) { \
2918                                 d_hi = 0; \
2919                                 d_lo = 0; \
2920                         } else { \
2921                         /* m_hi >= s_hi */ \
2922                                 d_hi = m_hi - s_hi; \
2923                                 d_lo = m_lo - s_lo; \
2924                         } \
2925                 } \
2926         } while (0)
2927
2928 #define UPDATE_STAT64(s, t) \
2929         do { \
2930                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935                        pstats->mac_stx[1].t##_lo, diff.lo); \
2936         } while (0)
2937
2938 #define UPDATE_STAT64_NIG(s, t) \
2939         do { \
2940                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941                         diff.lo, new->s##_lo, old->s##_lo); \
2942                 ADD_64(estats->t##_hi, diff.hi, \
2943                        estats->t##_lo, diff.lo); \
2944         } while (0)
2945
2946 /* sum[hi:lo] += add */
2947 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2948         do { \
2949                 s_lo += a; \
2950                 s_hi += (s_lo < a) ? 1 : 0; \
2951         } while (0)
2952
2953 #define UPDATE_EXTEND_STAT(s) \
2954         do { \
2955                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956                               pstats->mac_stx[1].s##_lo, \
2957                               new->s); \
2958         } while (0)
2959
2960 #define UPDATE_EXTEND_TSTAT(s, t) \
2961         do { \
2962                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963                 old_tclient->s = le32_to_cpu(tclient->s); \
2964                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2965         } while (0)
2966
2967 #define UPDATE_EXTEND_XSTAT(s, t) \
2968         do { \
2969                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970                 old_xclient->s = le32_to_cpu(xclient->s); \
2971                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2972         } while (0)
2973
2974 /*
2975  * General service functions
2976  */
2977
2978 static inline long bnx2x_hilo(u32 *hiref)
2979 {
2980         u32 lo = *(hiref + 1);
2981 #if (BITS_PER_LONG == 64)
2982         u32 hi = *hiref;
2983
2984         return HILO_U64(hi, lo);
2985 #else
2986         return lo;
2987 #endif
2988 }
2989
2990 /*
2991  * Init service functions
2992  */
2993
2994 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2995 {
2996         if (!bp->stats_pending) {
2997                 struct eth_query_ramrod_data ramrod_data = {0};
2998                 int rc;
2999
3000                 ramrod_data.drv_counter = bp->stats_counter++;
3001                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3003
3004                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005                                    ((u32 *)&ramrod_data)[1],
3006                                    ((u32 *)&ramrod_data)[0], 0);
3007                 if (rc == 0) {
3008                         /* stats ramrod has it's own slot on the spq */
3009                         bp->spq_left++;
3010                         bp->stats_pending = 1;
3011                 }
3012         }
3013 }
3014
3015 static void bnx2x_stats_init(struct bnx2x *bp)
3016 {
3017         int port = BP_PORT(bp);
3018
3019         bp->executer_idx = 0;
3020         bp->stats_counter = 0;
3021
3022         /* port stats */
3023         if (!BP_NOMCP(bp))
3024                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3025         else
3026                 bp->port.port_stx = 0;
3027         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3028
3029         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030         bp->port.old_nig_stats.brb_discard =
3031                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032         bp->port.old_nig_stats.brb_truncate =
3033                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3034         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3038
3039         /* function stats */
3040         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3044
3045         bp->stats_state = STATS_STATE_DISABLED;
3046         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3048 }
3049
3050 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3051 {
3052         struct dmae_command *dmae = &bp->stats_dmae;
3053         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3054
3055         *stats_comp = DMAE_COMP_VAL;
3056
3057         /* loader */
3058         if (bp->executer_idx) {
3059                 int loader_idx = PMF_DMAE_C(bp);
3060
3061                 memset(dmae, 0, sizeof(struct dmae_command));
3062
3063                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065                                 DMAE_CMD_DST_RESET |
3066 #ifdef __BIG_ENDIAN
3067                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3068 #else
3069                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3070 #endif
3071                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3072                                                DMAE_CMD_PORT_0) |
3073                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077                                      sizeof(struct dmae_command) *
3078                                      (loader_idx + 1)) >> 2;
3079                 dmae->dst_addr_hi = 0;
3080                 dmae->len = sizeof(struct dmae_command) >> 2;
3081                 if (CHIP_IS_E1(bp))
3082                         dmae->len--;
3083                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084                 dmae->comp_addr_hi = 0;
3085                 dmae->comp_val = 1;
3086
3087                 *stats_comp = 0;
3088                 bnx2x_post_dmae(bp, dmae, loader_idx);
3089
3090         } else if (bp->func_stx) {
3091                 *stats_comp = 0;
3092                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3093         }
3094 }
3095
3096 static int bnx2x_stats_comp(struct bnx2x *bp)
3097 {
3098         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099         int cnt = 10;
3100
3101         might_sleep();
3102         while (*stats_comp != DMAE_COMP_VAL) {
3103                 if (!cnt) {
3104                         BNX2X_ERR("timeout waiting for stats finished\n");
3105                         break;
3106                 }
3107                 cnt--;
3108                 msleep(1);
3109         }
3110         return 1;
3111 }
3112
3113 /*
3114  * Statistics service functions
3115  */
3116
3117 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3118 {
3119         struct dmae_command *dmae;
3120         u32 opcode;
3121         int loader_idx = PMF_DMAE_C(bp);
3122         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3123
3124         /* sanity */
3125         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126                 BNX2X_ERR("BUG!\n");
3127                 return;
3128         }
3129
3130         bp->executer_idx = 0;
3131
3132         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3133                   DMAE_CMD_C_ENABLE |
3134                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3135 #ifdef __BIG_ENDIAN
3136                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137 #else
3138                   DMAE_CMD_ENDIANITY_DW_SWAP |
3139 #endif
3140                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3142
3143         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145         dmae->src_addr_lo = bp->port.port_stx >> 2;
3146         dmae->src_addr_hi = 0;
3147         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149         dmae->len = DMAE_LEN32_RD_MAX;
3150         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151         dmae->comp_addr_hi = 0;
3152         dmae->comp_val = 1;
3153
3154         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157         dmae->src_addr_hi = 0;
3158         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159                                    DMAE_LEN32_RD_MAX * 4);
3160         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161                                    DMAE_LEN32_RD_MAX * 4);
3162         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165         dmae->comp_val = DMAE_COMP_VAL;
3166
3167         *stats_comp = 0;
3168         bnx2x_hw_stats_post(bp);
3169         bnx2x_stats_comp(bp);
3170 }
3171
3172 static void bnx2x_port_stats_init(struct bnx2x *bp)
3173 {
3174         struct dmae_command *dmae;
3175         int port = BP_PORT(bp);
3176         int vn = BP_E1HVN(bp);
3177         u32 opcode;
3178         int loader_idx = PMF_DMAE_C(bp);
3179         u32 mac_addr;
3180         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3181
3182         /* sanity */
3183         if (!bp->link_vars.link_up || !bp->port.pmf) {
3184                 BNX2X_ERR("BUG!\n");
3185                 return;
3186         }
3187
3188         bp->executer_idx = 0;
3189
3190         /* MCP */
3191         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3194 #ifdef __BIG_ENDIAN
3195                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196 #else
3197                   DMAE_CMD_ENDIANITY_DW_SWAP |
3198 #endif
3199                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200                   (vn << DMAE_CMD_E1HVN_SHIFT));
3201
3202         if (bp->port.port_stx) {
3203
3204                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205                 dmae->opcode = opcode;
3206                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3209                 dmae->dst_addr_hi = 0;
3210                 dmae->len = sizeof(struct host_port_stats) >> 2;
3211                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212                 dmae->comp_addr_hi = 0;
3213                 dmae->comp_val = 1;
3214         }
3215
3216         if (bp->func_stx) {
3217
3218                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219                 dmae->opcode = opcode;
3220                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222                 dmae->dst_addr_lo = bp->func_stx >> 2;
3223                 dmae->dst_addr_hi = 0;
3224                 dmae->len = sizeof(struct host_func_stats) >> 2;
3225                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226                 dmae->comp_addr_hi = 0;
3227                 dmae->comp_val = 1;
3228         }
3229
3230         /* MAC */
3231         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234 #ifdef __BIG_ENDIAN
3235                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236 #else
3237                   DMAE_CMD_ENDIANITY_DW_SWAP |
3238 #endif
3239                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240                   (vn << DMAE_CMD_E1HVN_SHIFT));
3241
3242         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3243
3244                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245                                    NIG_REG_INGRESS_BMAC0_MEM);
3246
3247                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3249                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250                 dmae->opcode = opcode;
3251                 dmae->src_addr_lo = (mac_addr +
3252                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253                 dmae->src_addr_hi = 0;
3254                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259                 dmae->comp_addr_hi = 0;
3260                 dmae->comp_val = 1;
3261
3262                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265                 dmae->opcode = opcode;
3266                 dmae->src_addr_lo = (mac_addr +
3267                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268                 dmae->src_addr_hi = 0;
3269                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3270                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3272                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3273                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276                 dmae->comp_addr_hi = 0;
3277                 dmae->comp_val = 1;
3278
3279         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3280
3281                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3282
3283                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285                 dmae->opcode = opcode;
3286                 dmae->src_addr_lo = (mac_addr +
3287                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288                 dmae->src_addr_hi = 0;
3289                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293                 dmae->comp_addr_hi = 0;
3294                 dmae->comp_val = 1;
3295
3296                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298                 dmae->opcode = opcode;
3299                 dmae->src_addr_lo = (mac_addr +
3300                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301                 dmae->src_addr_hi = 0;
3302                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3303                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3305                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3306                 dmae->len = 1;
3307                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308                 dmae->comp_addr_hi = 0;
3309                 dmae->comp_val = 1;
3310
3311                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313                 dmae->opcode = opcode;
3314                 dmae->src_addr_lo = (mac_addr +
3315                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316                 dmae->src_addr_hi = 0;
3317                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3321                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323                 dmae->comp_addr_hi = 0;
3324                 dmae->comp_val = 1;
3325         }
3326
3327         /* NIG */
3328         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329         dmae->opcode = opcode;
3330         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332         dmae->src_addr_hi = 0;
3333         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337         dmae->comp_addr_hi = 0;
3338         dmae->comp_val = 1;
3339
3340         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341         dmae->opcode = opcode;
3342         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344         dmae->src_addr_hi = 0;
3345         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349         dmae->len = (2*sizeof(u32)) >> 2;
3350         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351         dmae->comp_addr_hi = 0;
3352         dmae->comp_val = 1;
3353
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3358 #ifdef __BIG_ENDIAN
3359                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3360 #else
3361                         DMAE_CMD_ENDIANITY_DW_SWAP |
3362 #endif
3363                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364                         (vn << DMAE_CMD_E1HVN_SHIFT));
3365         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3367         dmae->src_addr_hi = 0;
3368         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372         dmae->len = (2*sizeof(u32)) >> 2;
3373         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375         dmae->comp_val = DMAE_COMP_VAL;
3376
3377         *stats_comp = 0;
3378 }
3379
3380 static void bnx2x_func_stats_init(struct bnx2x *bp)
3381 {
3382         struct dmae_command *dmae = &bp->stats_dmae;
3383         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385         /* sanity */
3386         if (!bp->func_stx) {
3387                 BNX2X_ERR("BUG!\n");
3388                 return;
3389         }
3390
3391         bp->executer_idx = 0;
3392         memset(dmae, 0, sizeof(struct dmae_command));
3393
3394         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3397 #ifdef __BIG_ENDIAN
3398                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3399 #else
3400                         DMAE_CMD_ENDIANITY_DW_SWAP |
3401 #endif
3402                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406         dmae->dst_addr_lo = bp->func_stx >> 2;
3407         dmae->dst_addr_hi = 0;
3408         dmae->len = sizeof(struct host_func_stats) >> 2;
3409         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411         dmae->comp_val = DMAE_COMP_VAL;
3412
3413         *stats_comp = 0;
3414 }
3415
3416 static void bnx2x_stats_start(struct bnx2x *bp)
3417 {
3418         if (bp->port.pmf)
3419                 bnx2x_port_stats_init(bp);
3420
3421         else if (bp->func_stx)
3422                 bnx2x_func_stats_init(bp);
3423
3424         bnx2x_hw_stats_post(bp);
3425         bnx2x_storm_stats_post(bp);
3426 }
3427
3428 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3429 {
3430         bnx2x_stats_comp(bp);
3431         bnx2x_stats_pmf_update(bp);
3432         bnx2x_stats_start(bp);
3433 }
3434
3435 static void bnx2x_stats_restart(struct bnx2x *bp)
3436 {
3437         bnx2x_stats_comp(bp);
3438         bnx2x_stats_start(bp);
3439 }
3440
3441 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3442 {
3443         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445         struct regpair diff;
3446
3447         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3453         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459         UPDATE_STAT64(tx_stat_gt127,
3460                                 tx_stat_etherstatspkts65octetsto127octets);
3461         UPDATE_STAT64(tx_stat_gt255,
3462                                 tx_stat_etherstatspkts128octetsto255octets);
3463         UPDATE_STAT64(tx_stat_gt511,
3464                                 tx_stat_etherstatspkts256octetsto511octets);
3465         UPDATE_STAT64(tx_stat_gt1023,
3466                                 tx_stat_etherstatspkts512octetsto1023octets);
3467         UPDATE_STAT64(tx_stat_gt1518,
3468                                 tx_stat_etherstatspkts1024octetsto1522octets);
3469         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473         UPDATE_STAT64(tx_stat_gterr,
3474                                 tx_stat_dot3statsinternalmactransmiterrors);
3475         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3476 }
3477
3478 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3479 {
3480         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482
3483         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3514 }
3515
3516 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3517 {
3518         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519         struct nig_stats *old = &(bp->port.old_nig_stats);
3520         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522         struct regpair diff;
3523
3524         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525                 bnx2x_bmac_stats_update(bp);
3526
3527         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528                 bnx2x_emac_stats_update(bp);
3529
3530         else { /* unreached */
3531                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3532                 return -1;
3533         }
3534
3535         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536                       new->brb_discard - old->brb_discard);
3537         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538                       new->brb_truncate - old->brb_truncate);
3539
3540         UPDATE_STAT64_NIG(egress_mac_pkt0,
3541                                         etherstatspkts1024octetsto1522octets);
3542         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3543
3544         memcpy(old, new, sizeof(struct nig_stats));
3545
3546         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547                sizeof(struct mac_stx));
3548         estats->brb_drop_hi = pstats->brb_drop_hi;
3549         estats->brb_drop_lo = pstats->brb_drop_lo;
3550
3551         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3552
3553         return 0;
3554 }
3555
3556 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3557 {
3558         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3559         int cl_id = BP_CL_ID(bp);
3560         struct tstorm_per_port_stats *tport =
3561                                 &stats->tstorm_common.port_statistics;
3562         struct tstorm_per_client_stats *tclient =
3563                         &stats->tstorm_common.client_statistics[cl_id];
3564         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3565         struct xstorm_per_client_stats *xclient =
3566                         &stats->xstorm_common.client_statistics[cl_id];
3567         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3570         u32 diff;
3571
3572         /* are storm stats valid? */
3573         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574                                                         bp->stats_counter) {
3575                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576                    "  tstorm counter (%d) != stats_counter (%d)\n",
3577                    tclient->stats_counter, bp->stats_counter);
3578                 return -1;
3579         }
3580         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581                                                         bp->stats_counter) {
3582                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583                    "  xstorm counter (%d) != stats_counter (%d)\n",
3584                    xclient->stats_counter, bp->stats_counter);
3585                 return -2;
3586         }
3587
3588         fstats->total_bytes_received_hi =
3589         fstats->valid_bytes_received_hi =
3590                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3591         fstats->total_bytes_received_lo =
3592         fstats->valid_bytes_received_lo =
3593                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3594
3595         estats->error_bytes_received_hi =
3596                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597         estats->error_bytes_received_lo =
3598                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599         ADD_64(estats->error_bytes_received_hi,
3600                estats->rx_stat_ifhcinbadoctets_hi,
3601                estats->error_bytes_received_lo,
3602                estats->rx_stat_ifhcinbadoctets_lo);
3603
3604         ADD_64(fstats->total_bytes_received_hi,
3605                estats->error_bytes_received_hi,
3606                fstats->total_bytes_received_lo,
3607                estats->error_bytes_received_lo);
3608
3609         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3610         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3611                                 total_multicast_packets_received);
3612         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3613                                 total_broadcast_packets_received);
3614
3615         fstats->total_bytes_transmitted_hi =
3616                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3617         fstats->total_bytes_transmitted_lo =
3618                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3619
3620         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621                                 total_unicast_packets_transmitted);
3622         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623                                 total_multicast_packets_transmitted);
3624         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625                                 total_broadcast_packets_transmitted);
3626
3627         memcpy(estats, &(fstats->total_bytes_received_hi),
3628                sizeof(struct host_func_stats) - 2*sizeof(u32));
3629
3630         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632         estats->brb_truncate_discard =
3633                                 le32_to_cpu(tport->brb_truncate_discard);
3634         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3635
3636         old_tclient->rcv_unicast_bytes.hi =
3637                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3638         old_tclient->rcv_unicast_bytes.lo =
3639                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3640         old_tclient->rcv_broadcast_bytes.hi =
3641                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3642         old_tclient->rcv_broadcast_bytes.lo =
3643                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3644         old_tclient->rcv_multicast_bytes.hi =
3645                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3646         old_tclient->rcv_multicast_bytes.lo =
3647                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3648         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3649
3650         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651         old_tclient->packets_too_big_discard =
3652                                 le32_to_cpu(tclient->packets_too_big_discard);
3653         estats->no_buff_discard =
3654         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3656
3657         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658         old_xclient->unicast_bytes_sent.hi =
3659                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660         old_xclient->unicast_bytes_sent.lo =
3661                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662         old_xclient->multicast_bytes_sent.hi =
3663                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664         old_xclient->multicast_bytes_sent.lo =
3665                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666         old_xclient->broadcast_bytes_sent.hi =
3667                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668         old_xclient->broadcast_bytes_sent.lo =
3669                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3670
3671         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3672
3673         return 0;
3674 }
3675
3676 static void bnx2x_net_stats_update(struct bnx2x *bp)
3677 {
3678         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3680         struct net_device_stats *nstats = &bp->dev->stats;
3681
3682         nstats->rx_packets =
3683                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3686
3687         nstats->tx_packets =
3688                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3691
3692         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3693
3694         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3695
3696         nstats->rx_dropped = old_tclient->checksum_discard +
3697                              estats->mac_discard;
3698         nstats->tx_dropped = 0;
3699
3700         nstats->multicast =
3701                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3702
3703         nstats->collisions =
3704                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3705                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706                         estats->tx_stat_dot3statslatecollisions_lo +
3707                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3708
3709         estats->jabber_packets_received =
3710                                 old_tclient->packets_too_big_discard +
3711                                 estats->rx_stat_dot3statsframestoolong_lo;
3712
3713         nstats->rx_length_errors =
3714                                 estats->rx_stat_etherstatsundersizepkts_lo +
3715                                 estats->jabber_packets_received;
3716         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3720         nstats->rx_missed_errors = estats->xxoverflow_discard;
3721
3722         nstats->rx_errors = nstats->rx_length_errors +
3723                             nstats->rx_over_errors +
3724                             nstats->rx_crc_errors +
3725                             nstats->rx_frame_errors +
3726                             nstats->rx_fifo_errors +
3727                             nstats->rx_missed_errors;
3728
3729         nstats->tx_aborted_errors =
3730                         estats->tx_stat_dot3statslatecollisions_lo +
3731                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3732         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3733         nstats->tx_fifo_errors = 0;
3734         nstats->tx_heartbeat_errors = 0;
3735         nstats->tx_window_errors = 0;
3736
3737         nstats->tx_errors = nstats->tx_aborted_errors +
3738                             nstats->tx_carrier_errors;
3739 }
3740
3741 static void bnx2x_stats_update(struct bnx2x *bp)
3742 {
3743         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3744         int update = 0;
3745
3746         if (*stats_comp != DMAE_COMP_VAL)
3747                 return;
3748
3749         if (bp->port.pmf)
3750                 update = (bnx2x_hw_stats_update(bp) == 0);
3751
3752         update |= (bnx2x_storm_stats_update(bp) == 0);
3753
3754         if (update)
3755                 bnx2x_net_stats_update(bp);
3756
3757         else {
3758                 if (bp->stats_pending) {
3759                         bp->stats_pending++;
3760                         if (bp->stats_pending == 3) {
3761                                 BNX2X_ERR("stats not updated for 3 times\n");
3762                                 bnx2x_panic();
3763                                 return;
3764                         }
3765                 }
3766         }
3767
3768         if (bp->msglevel & NETIF_MSG_TIMER) {
3769                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771                 struct net_device_stats *nstats = &bp->dev->stats;
3772                 int i;
3773
3774                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3776                                   "  tx pkt (%lx)\n",
3777                        bnx2x_tx_avail(bp->fp),
3778                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3779                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3780                                   "  rx pkt (%lx)\n",
3781                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782                              bp->fp->rx_comp_cons),
3783                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3784                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3785                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3786                        estats->driver_xoff, estats->brb_drop_lo);
3787                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3788                         "packets_too_big_discard %u  no_buff_discard %u  "
3789                         "mac_discard %u  mac_filter_discard %u  "
3790                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3791                         "ttl0_discard %u\n",
3792                        old_tclient->checksum_discard,
3793                        old_tclient->packets_too_big_discard,
3794                        old_tclient->no_buff_discard, estats->mac_discard,
3795                        estats->mac_filter_discard, estats->xxoverflow_discard,
3796                        estats->brb_truncate_discard,
3797                        old_tclient->ttl0_discard);
3798
3799                 for_each_queue(bp, i) {
3800                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801                                bnx2x_fp(bp, i, tx_pkt),
3802                                bnx2x_fp(bp, i, rx_pkt),
3803                                bnx2x_fp(bp, i, rx_calls));
3804                 }
3805         }
3806
3807         bnx2x_hw_stats_post(bp);
3808         bnx2x_storm_stats_post(bp);
3809 }
3810
3811 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3812 {
3813         struct dmae_command *dmae;
3814         u32 opcode;
3815         int loader_idx = PMF_DMAE_C(bp);
3816         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3817
3818         bp->executer_idx = 0;
3819
3820         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3821                   DMAE_CMD_C_ENABLE |
3822                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3823 #ifdef __BIG_ENDIAN
3824                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3825 #else
3826                   DMAE_CMD_ENDIANITY_DW_SWAP |
3827 #endif
3828                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3830
3831         if (bp->port.port_stx) {
3832
3833                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3834                 if (bp->func_stx)
3835                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3836                 else
3837                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3841                 dmae->dst_addr_hi = 0;
3842                 dmae->len = sizeof(struct host_port_stats) >> 2;
3843                 if (bp->func_stx) {
3844                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845                         dmae->comp_addr_hi = 0;
3846                         dmae->comp_val = 1;
3847                 } else {
3848                         dmae->comp_addr_lo =
3849                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850                         dmae->comp_addr_hi =
3851                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852                         dmae->comp_val = DMAE_COMP_VAL;
3853
3854                         *stats_comp = 0;
3855                 }
3856         }
3857
3858         if (bp->func_stx) {
3859
3860                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864                 dmae->dst_addr_lo = bp->func_stx >> 2;
3865                 dmae->dst_addr_hi = 0;
3866                 dmae->len = sizeof(struct host_func_stats) >> 2;
3867                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869                 dmae->comp_val = DMAE_COMP_VAL;
3870
3871                 *stats_comp = 0;
3872         }
3873 }
3874
3875 static void bnx2x_stats_stop(struct bnx2x *bp)
3876 {
3877         int update = 0;
3878
3879         bnx2x_stats_comp(bp);
3880
3881         if (bp->port.pmf)
3882                 update = (bnx2x_hw_stats_update(bp) == 0);
3883
3884         update |= (bnx2x_storm_stats_update(bp) == 0);
3885
3886         if (update) {
3887                 bnx2x_net_stats_update(bp);
3888
3889                 if (bp->port.pmf)
3890                         bnx2x_port_stats_stop(bp);
3891
3892                 bnx2x_hw_stats_post(bp);
3893                 bnx2x_stats_comp(bp);
3894         }
3895 }
3896
3897 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3898 {
3899 }
3900
3901 static const struct {
3902         void (*action)(struct bnx2x *bp);
3903         enum bnx2x_stats_state next_state;
3904 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3905 /* state        event   */
3906 {
3907 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3909 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3911 },
3912 {
3913 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3914 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3915 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3916 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3917 }
3918 };
3919
3920 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3921 {
3922         enum bnx2x_stats_state state = bp->stats_state;
3923
3924         bnx2x_stats_stm[state][event].action(bp);
3925         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3926
3927         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929                    state, event, bp->stats_state);
3930 }
3931
3932 static void bnx2x_timer(unsigned long data)
3933 {
3934         struct bnx2x *bp = (struct bnx2x *) data;
3935
3936         if (!netif_running(bp->dev))
3937                 return;
3938
3939         if (atomic_read(&bp->intr_sem) != 0)
3940                 goto timer_restart;
3941
3942         if (poll) {
3943                 struct bnx2x_fastpath *fp = &bp->fp[0];
3944                 int rc;
3945
3946                 bnx2x_tx_int(fp, 1000);
3947                 rc = bnx2x_rx_int(fp, 1000);
3948         }
3949
3950         if (!BP_NOMCP(bp)) {
3951                 int func = BP_FUNC(bp);
3952                 u32 drv_pulse;
3953                 u32 mcp_pulse;
3954
3955                 ++bp->fw_drv_pulse_wr_seq;
3956                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957                 /* TBD - add SYSTEM_TIME */
3958                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3959                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3960
3961                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3962                              MCP_PULSE_SEQ_MASK);
3963                 /* The delta between driver pulse and mcp response
3964                  * should be 1 (before mcp response) or 0 (after mcp response)
3965                  */
3966                 if ((drv_pulse != mcp_pulse) &&
3967                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968                         /* someone lost a heartbeat... */
3969                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970                                   drv_pulse, mcp_pulse);
3971                 }
3972         }
3973
3974         if ((bp->state == BNX2X_STATE_OPEN) ||
3975             (bp->state == BNX2X_STATE_DISABLED))
3976                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3977
3978 timer_restart:
3979         mod_timer(&bp->timer, jiffies + bp->current_interval);
3980 }
3981
3982 /* end of Statistics */
3983
3984 /* nic init */
3985
3986 /*
3987  * nic init service functions
3988  */
3989
3990 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3991 {
3992         int port = BP_PORT(bp);
3993
3994         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3996                         sizeof(struct ustorm_status_block)/4);
3997         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3999                         sizeof(struct cstorm_status_block)/4);
4000 }
4001
4002 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003                           dma_addr_t mapping, int sb_id)
4004 {
4005         int port = BP_PORT(bp);
4006         int func = BP_FUNC(bp);
4007         int index;
4008         u64 section;
4009
4010         /* USTORM */
4011         section = ((u64)mapping) + offsetof(struct host_status_block,
4012                                             u_status_block);
4013         sb->u_status_block.status_block_id = sb_id;
4014
4015         REG_WR(bp, BAR_USTRORM_INTMEM +
4016                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4017         REG_WR(bp, BAR_USTRORM_INTMEM +
4018                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4019                U64_HI(section));
4020         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4022
4023         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4025                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4026
4027         /* CSTORM */
4028         section = ((u64)mapping) + offsetof(struct host_status_block,
4029                                             c_status_block);
4030         sb->c_status_block.status_block_id = sb_id;
4031
4032         REG_WR(bp, BAR_CSTRORM_INTMEM +
4033                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4034         REG_WR(bp, BAR_CSTRORM_INTMEM +
4035                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4036                U64_HI(section));
4037         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4039
4040         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4042                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4043
4044         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4045 }
4046
4047 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4048 {
4049         int func = BP_FUNC(bp);
4050
4051         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053                         sizeof(struct ustorm_def_status_block)/4);
4054         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056                         sizeof(struct cstorm_def_status_block)/4);
4057         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059                         sizeof(struct xstorm_def_status_block)/4);
4060         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062                         sizeof(struct tstorm_def_status_block)/4);
4063 }
4064
4065 static void bnx2x_init_def_sb(struct bnx2x *bp,
4066                               struct host_def_status_block *def_sb,
4067                               dma_addr_t mapping, int sb_id)
4068 {
4069         int port = BP_PORT(bp);
4070         int func = BP_FUNC(bp);
4071         int index, val, reg_offset;
4072         u64 section;
4073
4074         /* ATTN */
4075         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076                                             atten_status_block);
4077         def_sb->atten_status_block.status_block_id = sb_id;
4078
4079         bp->attn_state = 0;
4080
4081         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4083
4084         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085                 bp->attn_group[index].sig[0] = REG_RD(bp,
4086                                                      reg_offset + 0x10*index);
4087                 bp->attn_group[index].sig[1] = REG_RD(bp,
4088                                                reg_offset + 0x4 + 0x10*index);
4089                 bp->attn_group[index].sig[2] = REG_RD(bp,
4090                                                reg_offset + 0x8 + 0x10*index);
4091                 bp->attn_group[index].sig[3] = REG_RD(bp,
4092                                                reg_offset + 0xc + 0x10*index);
4093         }
4094
4095         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096                              HC_REG_ATTN_MSG0_ADDR_L);
4097
4098         REG_WR(bp, reg_offset, U64_LO(section));
4099         REG_WR(bp, reg_offset + 4, U64_HI(section));
4100
4101         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4102
4103         val = REG_RD(bp, reg_offset);
4104         val |= sb_id;
4105         REG_WR(bp, reg_offset, val);
4106
4107         /* USTORM */
4108         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109                                             u_def_status_block);
4110         def_sb->u_def_status_block.status_block_id = sb_id;
4111
4112         REG_WR(bp, BAR_USTRORM_INTMEM +
4113                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4114         REG_WR(bp, BAR_USTRORM_INTMEM +
4115                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4116                U64_HI(section));
4117         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4118                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4119
4120         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4122                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4123
4124         /* CSTORM */
4125         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126                                             c_def_status_block);
4127         def_sb->c_def_status_block.status_block_id = sb_id;
4128
4129         REG_WR(bp, BAR_CSTRORM_INTMEM +
4130                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131         REG_WR(bp, BAR_CSTRORM_INTMEM +
4132                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4133                U64_HI(section));
4134         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4136
4137         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4139                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4140
4141         /* TSTORM */
4142         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143                                             t_def_status_block);
4144         def_sb->t_def_status_block.status_block_id = sb_id;
4145
4146         REG_WR(bp, BAR_TSTRORM_INTMEM +
4147                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4148         REG_WR(bp, BAR_TSTRORM_INTMEM +
4149                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4150                U64_HI(section));
4151         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4152                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4153
4154         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4157
4158         /* XSTORM */
4159         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160                                             x_def_status_block);
4161         def_sb->x_def_status_block.status_block_id = sb_id;
4162
4163         REG_WR(bp, BAR_XSTRORM_INTMEM +
4164                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4165         REG_WR(bp, BAR_XSTRORM_INTMEM +
4166                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4167                U64_HI(section));
4168         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4169                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4170
4171         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4173                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4174
4175         bp->stats_pending = 0;
4176         bp->set_mac_pending = 0;
4177
4178         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4179 }
4180
4181 static void bnx2x_update_coalesce(struct bnx2x *bp)
4182 {
4183         int port = BP_PORT(bp);
4184         int i;
4185
4186         for_each_queue(bp, i) {
4187                 int sb_id = bp->fp[i].sb_id;
4188
4189                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4191                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4192                                                     U_SB_ETH_RX_CQ_INDEX),
4193                         bp->rx_ticks/12);
4194                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4195                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4196                                                      U_SB_ETH_RX_CQ_INDEX),
4197                          bp->rx_ticks ? 0 : 1);
4198                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200                                                      U_SB_ETH_RX_BD_INDEX),
4201                          bp->rx_ticks ? 0 : 1);
4202
4203                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206                                                     C_SB_ETH_TX_CQ_INDEX),
4207                         bp->tx_ticks/12);
4208                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210                                                      C_SB_ETH_TX_CQ_INDEX),
4211                          bp->tx_ticks ? 0 : 1);
4212         }
4213 }
4214
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216                                        struct bnx2x_fastpath *fp, int last)
4217 {
4218         int i;
4219
4220         for (i = 0; i < last; i++) {
4221                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222                 struct sk_buff *skb = rx_buf->skb;
4223
4224                 if (skb == NULL) {
4225                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4226                         continue;
4227                 }
4228
4229                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230                         pci_unmap_single(bp->pdev,
4231                                          pci_unmap_addr(rx_buf, mapping),
4232                                          bp->rx_buf_use_size,
4233                                          PCI_DMA_FROMDEVICE);
4234
4235                 dev_kfree_skb(skb);
4236                 rx_buf->skb = NULL;
4237         }
4238 }
4239
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4241 {
4242         int func = BP_FUNC(bp);
4243         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4245         u16 ring_prod, cqe_ring_prod;
4246         int i, j;
4247
4248         bp->rx_buf_use_size = bp->dev->mtu;
4249         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4250         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4251
4252         if (bp->flags & TPA_ENABLE_FLAG) {
4253                 DP(NETIF_MSG_IFUP,
4254                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4255                    bp->rx_buf_use_size, bp->rx_buf_size,
4256                    bp->dev->mtu + ETH_OVREHEAD);
4257
4258                 for_each_queue(bp, j) {
4259                         struct bnx2x_fastpath *fp = &bp->fp[j];
4260
4261                         for (i = 0; i < max_agg_queues; i++) {
4262                                 fp->tpa_pool[i].skb =
4263                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264                                 if (!fp->tpa_pool[i].skb) {
4265                                         BNX2X_ERR("Failed to allocate TPA "
4266                                                   "skb pool for queue[%d] - "
4267                                                   "disabling TPA on this "
4268                                                   "queue!\n", j);
4269                                         bnx2x_free_tpa_pool(bp, fp, i);
4270                                         fp->disable_tpa = 1;
4271                                         break;
4272                                 }
4273                                 pci_unmap_addr_set((struct sw_rx_bd *)
4274                                                         &bp->fp->tpa_pool[i],
4275                                                    mapping, 0);
4276                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4277                         }
4278                 }
4279         }
4280
4281         for_each_queue(bp, j) {
4282                 struct bnx2x_fastpath *fp = &bp->fp[j];
4283
4284                 fp->rx_bd_cons = 0;
4285                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4286                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4287
4288                 /* "next page" elements initialization */
4289                 /* SGE ring */
4290                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291                         struct eth_rx_sge *sge;
4292
4293                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4294                         sge->addr_hi =
4295                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297                         sge->addr_lo =
4298                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4300                 }
4301
4302                 bnx2x_init_sge_ring_bit_mask(fp);
4303
4304                 /* RX BD ring */
4305                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306                         struct eth_rx_bd *rx_bd;
4307
4308                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4309                         rx_bd->addr_hi =
4310                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4311                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4312                         rx_bd->addr_lo =
4313                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4314                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4315                 }
4316
4317                 /* CQ ring */
4318                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319                         struct eth_rx_cqe_next_page *nextpg;
4320
4321                         nextpg = (struct eth_rx_cqe_next_page *)
4322                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4323                         nextpg->addr_hi =
4324                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4325                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4326                         nextpg->addr_lo =
4327                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4328                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4329                 }
4330
4331                 /* Allocate SGEs and initialize the ring elements */
4332                 for (i = 0, ring_prod = 0;
4333                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4334
4335                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336                                 BNX2X_ERR("was only able to allocate "
4337                                           "%d rx sges\n", i);
4338                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339                                 /* Cleanup already allocated elements */
4340                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4341                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4342                                 fp->disable_tpa = 1;
4343                                 ring_prod = 0;
4344                                 break;
4345                         }
4346                         ring_prod = NEXT_SGE_IDX(ring_prod);
4347                 }
4348                 fp->rx_sge_prod = ring_prod;
4349
4350                 /* Allocate BDs and initialize BD ring */
4351                 fp->rx_comp_cons = 0;
4352                 cqe_ring_prod = ring_prod = 0;
4353                 for (i = 0; i < bp->rx_ring_size; i++) {
4354                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355                                 BNX2X_ERR("was only able to allocate "
4356                                           "%d rx skbs\n", i);
4357                                 bp->eth_stats.rx_skb_alloc_failed++;
4358                                 break;
4359                         }
4360                         ring_prod = NEXT_RX_IDX(ring_prod);
4361                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4362                         WARN_ON(ring_prod <= i);
4363                 }
4364
4365                 fp->rx_bd_prod = ring_prod;
4366                 /* must not have more available CQEs than BDs */
4367                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4368                                        cqe_ring_prod);
4369                 fp->rx_pkt = fp->rx_calls = 0;
4370
4371                 /* Warning!
4372                  * this will generate an interrupt (to the TSTORM)
4373                  * must only be done after chip is initialized
4374                  */
4375                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4376                                      fp->rx_sge_prod);
4377                 if (j != 0)
4378                         continue;
4379
4380                 REG_WR(bp, BAR_USTRORM_INTMEM +
4381                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4382                        U64_LO(fp->rx_comp_mapping));
4383                 REG_WR(bp, BAR_USTRORM_INTMEM +
4384                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4385                        U64_HI(fp->rx_comp_mapping));
4386         }
4387 }
4388
4389 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4390 {
4391         int i, j;
4392
4393         for_each_queue(bp, j) {
4394                 struct bnx2x_fastpath *fp = &bp->fp[j];
4395
4396                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397                         struct eth_tx_bd *tx_bd =
4398                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4399
4400                         tx_bd->addr_hi =
4401                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4402                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4403                         tx_bd->addr_lo =
4404                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4405                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4406                 }
4407
4408                 fp->tx_pkt_prod = 0;
4409                 fp->tx_pkt_cons = 0;
4410                 fp->tx_bd_prod = 0;
4411                 fp->tx_bd_cons = 0;
4412                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4413                 fp->tx_pkt = 0;
4414         }
4415 }
4416
4417 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418 {
4419         int func = BP_FUNC(bp);
4420
4421         spin_lock_init(&bp->spq_lock);
4422
4423         bp->spq_left = MAX_SPQ_PENDING;
4424         bp->spq_prod_idx = 0;
4425         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426         bp->spq_prod_bd = bp->spq;
4427         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428
4429         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4430                U64_LO(bp->spq_mapping));
4431         REG_WR(bp,
4432                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4433                U64_HI(bp->spq_mapping));
4434
4435         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4436                bp->spq_prod_idx);
4437 }
4438
4439 static void bnx2x_init_context(struct bnx2x *bp)
4440 {
4441         int i;
4442
4443         for_each_queue(bp, i) {
4444                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445                 struct bnx2x_fastpath *fp = &bp->fp[i];
4446                 u8 sb_id = FP_SB_ID(fp);
4447
4448                 context->xstorm_st_context.tx_bd_page_base_hi =
4449                                                 U64_HI(fp->tx_desc_mapping);
4450                 context->xstorm_st_context.tx_bd_page_base_lo =
4451                                                 U64_LO(fp->tx_desc_mapping);
4452                 context->xstorm_st_context.db_data_addr_hi =
4453                                                 U64_HI(fp->tx_prods_mapping);
4454                 context->xstorm_st_context.db_data_addr_lo =
4455                                                 U64_LO(fp->tx_prods_mapping);
4456                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458
4459                 context->ustorm_st_context.common.sb_index_numbers =
4460                                                 BNX2X_RX_SB_INDEX_NUM;
4461                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462                 context->ustorm_st_context.common.status_block_id = sb_id;
4463                 context->ustorm_st_context.common.flags =
4464                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465                 context->ustorm_st_context.common.mc_alignment_size = 64;
4466                 context->ustorm_st_context.common.bd_buff_size =
4467                                                 bp->rx_buf_use_size;
4468                 context->ustorm_st_context.common.bd_page_base_hi =
4469                                                 U64_HI(fp->rx_desc_mapping);
4470                 context->ustorm_st_context.common.bd_page_base_lo =
4471                                                 U64_LO(fp->rx_desc_mapping);
4472                 if (!fp->disable_tpa) {
4473                         context->ustorm_st_context.common.flags |=
4474                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476                         context->ustorm_st_context.common.sge_buff_size =
4477                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478                         context->ustorm_st_context.common.sge_page_base_hi =
4479                                                 U64_HI(fp->rx_sge_mapping);
4480                         context->ustorm_st_context.common.sge_page_base_lo =
4481                                                 U64_LO(fp->rx_sge_mapping);
4482                 }
4483
4484                 context->cstorm_st_context.sb_index_number =
4485                                                 C_SB_ETH_TX_CQ_INDEX;
4486                 context->cstorm_st_context.status_block_id = sb_id;
4487
4488                 context->xstorm_ag_context.cdu_reserved =
4489                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490                                                CDU_REGION_NUMBER_XCM_AG,
4491                                                ETH_CONNECTION_TYPE);
4492                 context->ustorm_ag_context.cdu_usage =
4493                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494                                                CDU_REGION_NUMBER_UCM_AG,
4495                                                ETH_CONNECTION_TYPE);
4496         }
4497 }
4498
4499 static void bnx2x_init_ind_table(struct bnx2x *bp)
4500 {
4501         int port = BP_PORT(bp);
4502         int i;
4503
4504         if (!is_multi(bp))
4505                 return;
4506
4507         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4508         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4509                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4511                         i % bp->num_queues);
4512
4513         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4514 }
4515
4516 static void bnx2x_set_client_config(struct bnx2x *bp)
4517 {
4518         struct tstorm_eth_client_config tstorm_client = {0};
4519         int port = BP_PORT(bp);
4520         int i;
4521
4522         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4523         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4524         tstorm_client.config_flags =
4525                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4526 #ifdef BCM_VLAN
4527         if (bp->rx_mode && bp->vlgrp) {
4528                 tstorm_client.config_flags |=
4529                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4531         }
4532 #endif
4533
4534         if (bp->flags & TPA_ENABLE_FLAG) {
4535                 tstorm_client.max_sges_for_packet =
4536                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537                 tstorm_client.max_sges_for_packet =
4538                         ((tstorm_client.max_sges_for_packet +
4539                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540                         PAGES_PER_SGE_SHIFT;
4541
4542                 tstorm_client.config_flags |=
4543                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4544         }
4545
4546         for_each_queue(bp, i) {
4547                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4548                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4549                        ((u32 *)&tstorm_client)[0]);
4550                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4551                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4552                        ((u32 *)&tstorm_client)[1]);
4553         }
4554
4555         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4557 }
4558
4559 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4560 {
4561         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4562         int mode = bp->rx_mode;
4563         int mask = (1 << BP_L_ID(bp));
4564         int func = BP_FUNC(bp);
4565         int i;
4566
4567         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4568
4569         switch (mode) {
4570         case BNX2X_RX_MODE_NONE: /* no Rx */
4571                 tstorm_mac_filter.ucast_drop_all = mask;
4572                 tstorm_mac_filter.mcast_drop_all = mask;
4573                 tstorm_mac_filter.bcast_drop_all = mask;
4574                 break;
4575         case BNX2X_RX_MODE_NORMAL:
4576                 tstorm_mac_filter.bcast_accept_all = mask;
4577                 break;
4578         case BNX2X_RX_MODE_ALLMULTI:
4579                 tstorm_mac_filter.mcast_accept_all = mask;
4580                 tstorm_mac_filter.bcast_accept_all = mask;
4581                 break;
4582         case BNX2X_RX_MODE_PROMISC:
4583                 tstorm_mac_filter.ucast_accept_all = mask;
4584                 tstorm_mac_filter.mcast_accept_all = mask;
4585                 tstorm_mac_filter.bcast_accept_all = mask;
4586                 break;
4587         default:
4588                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4589                 break;
4590         }
4591
4592         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4595                        ((u32 *)&tstorm_mac_filter)[i]);
4596
4597 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4598                    ((u32 *)&tstorm_mac_filter)[i]); */
4599         }
4600
4601         if (mode != BNX2X_RX_MODE_NONE)
4602                 bnx2x_set_client_config(bp);
4603 }
4604
4605 static void bnx2x_init_internal_common(struct bnx2x *bp)
4606 {
4607         int i;
4608
4609         /* Zero this manually as its initialization is
4610            currently missing in the initTool */
4611         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612                 REG_WR(bp, BAR_USTRORM_INTMEM +
4613                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4614 }
4615
4616 static void bnx2x_init_internal_port(struct bnx2x *bp)
4617 {
4618         int port = BP_PORT(bp);
4619
4620         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4624 }
4625
4626 static void bnx2x_init_internal_func(struct bnx2x *bp)
4627 {
4628         struct tstorm_eth_function_common_config tstorm_config = {0};
4629         struct stats_indication_flags stats_flags = {0};
4630         int port = BP_PORT(bp);
4631         int func = BP_FUNC(bp);
4632         int i;
4633         u16 max_agg_size;
4634
4635         if (is_multi(bp)) {
4636                 tstorm_config.config_flags = MULTI_FLAGS;
4637                 tstorm_config.rss_result_mask = MULTI_MASK;
4638         }
4639
4640         tstorm_config.leading_client_id = BP_L_ID(bp);
4641
4642         REG_WR(bp, BAR_TSTRORM_INTMEM +
4643                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4644                (*(u32 *)&tstorm_config));
4645
4646         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4647         bnx2x_set_storm_rx_mode(bp);
4648
4649         /* reset xstorm per client statistics */
4650         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653                        i*4, 0);
4654         }
4655         /* reset tstorm per client statistics */
4656         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4659                        i*4, 0);
4660         }
4661
4662         /* Init statistics related context */
4663         stats_flags.collect_eth = 1;
4664
4665         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4666                ((u32 *)&stats_flags)[0]);
4667         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4668                ((u32 *)&stats_flags)[1]);
4669
4670         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4671                ((u32 *)&stats_flags)[0]);
4672         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4673                ((u32 *)&stats_flags)[1]);
4674
4675         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4676                ((u32 *)&stats_flags)[0]);
4677         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4678                ((u32 *)&stats_flags)[1]);
4679
4680         REG_WR(bp, BAR_XSTRORM_INTMEM +
4681                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683         REG_WR(bp, BAR_XSTRORM_INTMEM +
4684                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686
4687         REG_WR(bp, BAR_TSTRORM_INTMEM +
4688                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690         REG_WR(bp, BAR_TSTRORM_INTMEM +
4691                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4693
4694         if (CHIP_IS_E1H(bp)) {
4695                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4696                         IS_E1HMF(bp));
4697                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4698                         IS_E1HMF(bp));
4699                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4700                         IS_E1HMF(bp));
4701                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4702                         IS_E1HMF(bp));
4703
4704                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4705                          bp->e1hov);
4706         }
4707
4708         /* Init CQ ring mapping and aggregation size */
4709         max_agg_size = min((u32)(bp->rx_buf_use_size +
4710                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4711                            (u32)0xffff);
4712         for_each_queue(bp, i) {
4713                 struct bnx2x_fastpath *fp = &bp->fp[i];
4714
4715                 REG_WR(bp, BAR_USTRORM_INTMEM +
4716                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4717                        U64_LO(fp->rx_comp_mapping));
4718                 REG_WR(bp, BAR_USTRORM_INTMEM +
4719                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4720                        U64_HI(fp->rx_comp_mapping));
4721
4722                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4723                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4724                          max_agg_size);
4725         }
4726 }
4727
4728 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4729 {
4730         switch (load_code) {
4731         case FW_MSG_CODE_DRV_LOAD_COMMON:
4732                 bnx2x_init_internal_common(bp);
4733                 /* no break */
4734
4735         case FW_MSG_CODE_DRV_LOAD_PORT:
4736                 bnx2x_init_internal_port(bp);
4737                 /* no break */
4738
4739         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740                 bnx2x_init_internal_func(bp);
4741                 break;
4742
4743         default:
4744                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4745                 break;
4746         }
4747 }
4748
4749 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4750 {
4751         int i;
4752
4753         for_each_queue(bp, i) {
4754                 struct bnx2x_fastpath *fp = &bp->fp[i];
4755
4756                 fp->bp = bp;
4757                 fp->state = BNX2X_FP_STATE_CLOSED;
4758                 fp->index = i;
4759                 fp->cl_id = BP_L_ID(bp) + i;
4760                 fp->sb_id = fp->cl_id;
4761                 DP(NETIF_MSG_IFUP,
4762                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4763                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4764                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4765                               FP_SB_ID(fp));
4766                 bnx2x_update_fpsb_idx(fp);
4767         }
4768
4769         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4770                           DEF_SB_ID);
4771         bnx2x_update_dsb_idx(bp);
4772         bnx2x_update_coalesce(bp);
4773         bnx2x_init_rx_rings(bp);
4774         bnx2x_init_tx_ring(bp);
4775         bnx2x_init_sp_ring(bp);
4776         bnx2x_init_context(bp);
4777         bnx2x_init_internal(bp, load_code);
4778         bnx2x_init_ind_table(bp);
4779         bnx2x_int_enable(bp);
4780 }
4781
4782 /* end of nic init */
4783
4784 /*
4785  * gzip service functions
4786  */
4787
4788 static int bnx2x_gunzip_init(struct bnx2x *bp)
4789 {
4790         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4791                                               &bp->gunzip_mapping);
4792         if (bp->gunzip_buf  == NULL)
4793                 goto gunzip_nomem1;
4794
4795         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4796         if (bp->strm  == NULL)
4797                 goto gunzip_nomem2;
4798
4799         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4800                                       GFP_KERNEL);
4801         if (bp->strm->workspace == NULL)
4802                 goto gunzip_nomem3;
4803
4804         return 0;
4805
4806 gunzip_nomem3:
4807         kfree(bp->strm);
4808         bp->strm = NULL;
4809
4810 gunzip_nomem2:
4811         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4812                             bp->gunzip_mapping);
4813         bp->gunzip_buf = NULL;
4814
4815 gunzip_nomem1:
4816         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4817                " un-compression\n", bp->dev->name);
4818         return -ENOMEM;
4819 }
4820
4821 static void bnx2x_gunzip_end(struct bnx2x *bp)
4822 {
4823         kfree(bp->strm->workspace);
4824
4825         kfree(bp->strm);
4826         bp->strm = NULL;
4827
4828         if (bp->gunzip_buf) {
4829                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4830                                     bp->gunzip_mapping);
4831                 bp->gunzip_buf = NULL;
4832         }
4833 }
4834
4835 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4836 {
4837         int n, rc;
4838
4839         /* check gzip header */
4840         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4841                 return -EINVAL;
4842
4843         n = 10;
4844
4845 #define FNAME                           0x8
4846
4847         if (zbuf[3] & FNAME)
4848                 while ((zbuf[n++] != 0) && (n < len));
4849
4850         bp->strm->next_in = zbuf + n;
4851         bp->strm->avail_in = len - n;
4852         bp->strm->next_out = bp->gunzip_buf;
4853         bp->strm->avail_out = FW_BUF_SIZE;
4854
4855         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4856         if (rc != Z_OK)
4857                 return rc;
4858
4859         rc = zlib_inflate(bp->strm, Z_FINISH);
4860         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4861                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4862                        bp->dev->name, bp->strm->msg);
4863
4864         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4865         if (bp->gunzip_outlen & 0x3)
4866                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4867                                     " gunzip_outlen (%d) not aligned\n",
4868                        bp->dev->name, bp->gunzip_outlen);
4869         bp->gunzip_outlen >>= 2;
4870
4871         zlib_inflateEnd(bp->strm);
4872
4873         if (rc == Z_STREAM_END)
4874                 return 0;
4875
4876         return rc;
4877 }
4878
4879 /* nic load/unload */
4880
4881 /*
4882  * General service functions
4883  */
4884
4885 /* send a NIG loopback debug packet */
4886 static void bnx2x_lb_pckt(struct bnx2x *bp)
4887 {
4888         u32 wb_write[3];
4889
4890         /* Ethernet source and destination addresses */
4891         wb_write[0] = 0x55555555;
4892         wb_write[1] = 0x55555555;
4893         wb_write[2] = 0x20;             /* SOP */
4894         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4895
4896         /* NON-IP protocol */
4897         wb_write[0] = 0x09000000;
4898         wb_write[1] = 0x55555555;
4899         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4900         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4901 }
4902
4903 /* some of the internal memories
4904  * are not directly readable from the driver
4905  * to test them we send debug packets
4906  */
4907 static int bnx2x_int_mem_test(struct bnx2x *bp)
4908 {
4909         int factor;
4910         int count, i;
4911         u32 val = 0;
4912
4913         if (CHIP_REV_IS_FPGA(bp))
4914                 factor = 120;
4915         else if (CHIP_REV_IS_EMUL(bp))
4916                 factor = 200;
4917         else
4918                 factor = 1;
4919
4920         DP(NETIF_MSG_HW, "start part1\n");
4921
4922         /* Disable inputs of parser neighbor blocks */
4923         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4924         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4925         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4926         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4927
4928         /*  Write 0 to parser credits for CFC search request */
4929         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4930
4931         /* send Ethernet packet */
4932         bnx2x_lb_pckt(bp);
4933
4934         /* TODO do i reset NIG statistic? */
4935         /* Wait until NIG register shows 1 packet of size 0x10 */
4936         count = 1000 * factor;
4937         while (count) {
4938
4939                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4940                 val = *bnx2x_sp(bp, wb_data[0]);
4941                 if (val == 0x10)
4942                         break;
4943
4944                 msleep(10);
4945                 count--;
4946         }
4947         if (val != 0x10) {
4948                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4949                 return -1;
4950         }
4951
4952         /* Wait until PRS register shows 1 packet */
4953         count = 1000 * factor;
4954         while (count) {
4955                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4956                 if (val == 1)
4957                         break;
4958
4959                 msleep(10);
4960                 count--;
4961         }
4962         if (val != 0x1) {
4963                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4964                 return -2;
4965         }
4966
4967         /* Reset and init BRB, PRS */
4968         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4969         msleep(50);
4970         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4971         msleep(50);
4972         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4973         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4974
4975         DP(NETIF_MSG_HW, "part2\n");
4976
4977         /* Disable inputs of parser neighbor blocks */
4978         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4979         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4980         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4981         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4982
4983         /* Write 0 to parser credits for CFC search request */
4984         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4985
4986         /* send 10 Ethernet packets */
4987         for (i = 0; i < 10; i++)
4988                 bnx2x_lb_pckt(bp);
4989
4990         /* Wait until NIG register shows 10 + 1
4991            packets of size 11*0x10 = 0xb0 */
4992         count = 1000 * factor;
4993         while (count) {
4994
4995                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996                 val = *bnx2x_sp(bp, wb_data[0]);
4997                 if (val == 0xb0)
4998                         break;
4999
5000                 msleep(10);
5001                 count--;
5002         }
5003         if (val != 0xb0) {
5004                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5005                 return -3;
5006         }
5007
5008         /* Wait until PRS register shows 2 packets */
5009         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5010         if (val != 2)
5011                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5012
5013         /* Write 1 to parser credits for CFC search request */
5014         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5015
5016         /* Wait until PRS register shows 3 packets */
5017         msleep(10 * factor);
5018         /* Wait until NIG register shows 1 packet of size 0x10 */
5019         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020         if (val != 3)
5021                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5022
5023         /* clear NIG EOP FIFO */
5024         for (i = 0; i < 11; i++)
5025                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5026         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5027         if (val != 1) {
5028                 BNX2X_ERR("clear of NIG failed\n");
5029                 return -4;
5030         }
5031
5032         /* Reset and init BRB, PRS, NIG */
5033         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5034         msleep(50);
5035         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5036         msleep(50);
5037         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5038         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5039 #ifndef BCM_ISCSI
5040         /* set NIC mode */
5041         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5042 #endif
5043
5044         /* Enable inputs of parser neighbor blocks */
5045         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5046         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5047         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5048         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5049
5050         DP(NETIF_MSG_HW, "done\n");
5051
5052         return 0; /* OK */
5053 }
5054
5055 static void enable_blocks_attention(struct bnx2x *bp)
5056 {
5057         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5058         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5059         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5060         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5061         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5062         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5063         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5064         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5065         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5066 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5067 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5068         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5069         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5070         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5071 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5072 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5073         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5074         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5075         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5076         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5077 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5078 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5079         if (CHIP_REV_IS_FPGA(bp))
5080                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5081         else
5082                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5083         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5084         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5085         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5086 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5087 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5088         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5089         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5090 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5091         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5092 }
5093
5094
5095 static int bnx2x_init_common(struct bnx2x *bp)
5096 {
5097         u32 val, i;
5098
5099         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5100
5101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5102         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5103
5104         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5105         if (CHIP_IS_E1H(bp))
5106                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5107
5108         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5109         msleep(30);
5110         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5111
5112         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5113         if (CHIP_IS_E1(bp)) {
5114                 /* enable HW interrupt from PXP on USDM overflow
5115                    bit 16 on INT_MASK_0 */
5116                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5117         }
5118
5119         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5120         bnx2x_init_pxp(bp);
5121
5122 #ifdef __BIG_ENDIAN
5123         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5124         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5125         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5126         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5127         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5128         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5129
5130 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5131         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5132         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5133         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5134         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5135 #endif
5136
5137         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5138 #ifdef BCM_ISCSI
5139         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5140         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5141         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5142 #endif
5143
5144         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5145                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5146
5147         /* let the HW do it's magic ... */
5148         msleep(100);
5149         /* finish PXP init */
5150         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5151         if (val != 1) {
5152                 BNX2X_ERR("PXP2 CFG failed\n");
5153                 return -EBUSY;
5154         }
5155         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5156         if (val != 1) {
5157                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5158                 return -EBUSY;
5159         }
5160
5161         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5162         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5163
5164         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5165
5166         /* clean the DMAE memory */
5167         bp->dmae_ready = 1;
5168         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5169
5170         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5171         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5172         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5173         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5174
5175         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5176         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5177         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5178         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5179
5180         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5181         /* soft reset pulse */
5182         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5183         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5184
5185 #ifdef BCM_ISCSI
5186         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5187 #endif
5188
5189         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5190         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5191         if (!CHIP_REV_IS_SLOW(bp)) {
5192                 /* enable hw interrupt from doorbell Q */
5193                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5194         }
5195
5196         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5197         if (CHIP_REV_IS_SLOW(bp)) {
5198                 /* fix for emulation and FPGA for no pause */
5199                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5200                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5201                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5202                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5203         }
5204
5205         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206         /* set NIC mode */
5207         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5208         if (CHIP_IS_E1H(bp))
5209                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5210
5211         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5212         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5213         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5214         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5215
5216         if (CHIP_IS_E1H(bp)) {
5217                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5218                                 STORM_INTMEM_SIZE_E1H/2);
5219                 bnx2x_init_fill(bp,
5220                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221                                 0, STORM_INTMEM_SIZE_E1H/2);
5222                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5223                                 STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp,
5225                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226                                 0, STORM_INTMEM_SIZE_E1H/2);
5227                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5228                                 STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp,
5230                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231                                 0, STORM_INTMEM_SIZE_E1H/2);
5232                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5233                                 STORM_INTMEM_SIZE_E1H/2);
5234                 bnx2x_init_fill(bp,
5235                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236                                 0, STORM_INTMEM_SIZE_E1H/2);
5237         } else { /* E1 */
5238                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5239                                 STORM_INTMEM_SIZE_E1);
5240                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5241                                 STORM_INTMEM_SIZE_E1);
5242                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5243                                 STORM_INTMEM_SIZE_E1);
5244                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245                                 STORM_INTMEM_SIZE_E1);
5246         }
5247
5248         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5249         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5250         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5251         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5252
5253         /* sync semi rtc */
5254         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5255                0x80000000);
5256         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5257                0x80000000);
5258
5259         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5260         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5261         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5262
5263         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5264         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5265                 REG_WR(bp, i, 0xc0cac01a);
5266                 /* TODO: replace with something meaningful */
5267         }
5268         if (CHIP_IS_E1H(bp))
5269                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5270         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5271
5272         if (sizeof(union cdu_context) != 1024)
5273                 /* we currently assume that a context is 1024 bytes */
5274                 printk(KERN_ALERT PFX "please adjust the size of"
5275                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5276
5277         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5278         val = (4 << 24) + (0 << 12) + 1024;
5279         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5280         if (CHIP_IS_E1(bp)) {
5281                 /* !!! fix pxp client crdit until excel update */
5282                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5283                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5284         }
5285
5286         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5287         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5288
5289         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5290         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5291
5292         /* PXPCS COMMON comes here */
5293         /* Reset PCIE errors for debug */
5294         REG_WR(bp, 0x2814, 0xffffffff);
5295         REG_WR(bp, 0x3820, 0xffffffff);
5296
5297         /* EMAC0 COMMON comes here */
5298         /* EMAC1 COMMON comes here */
5299         /* DBU COMMON comes here */
5300         /* DBG COMMON comes here */
5301
5302         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5303         if (CHIP_IS_E1H(bp)) {
5304                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5305                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5306         }
5307
5308         if (CHIP_REV_IS_SLOW(bp))
5309                 msleep(200);
5310
5311         /* finish CFC init */
5312         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5313         if (val != 1) {
5314                 BNX2X_ERR("CFC LL_INIT failed\n");
5315                 return -EBUSY;
5316         }
5317         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5318         if (val != 1) {
5319                 BNX2X_ERR("CFC AC_INIT failed\n");
5320                 return -EBUSY;
5321         }
5322         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5323         if (val != 1) {
5324                 BNX2X_ERR("CFC CAM_INIT failed\n");
5325                 return -EBUSY;
5326         }
5327         REG_WR(bp, CFC_REG_DEBUG0, 0);
5328
5329         /* read NIG statistic
5330            to see if this is our first up since powerup */
5331         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332         val = *bnx2x_sp(bp, wb_data[0]);
5333
5334         /* do internal memory self test */
5335         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5336                 BNX2X_ERR("internal mem self test failed\n");
5337                 return -EBUSY;
5338         }
5339
5340         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5341         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5342                 /* Fan failure is indicated by SPIO 5 */
5343                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5344                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5345
5346                 /* set to active low mode */
5347                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5348                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5349                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5350                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5351
5352                 /* enable interrupt to signal the IGU */
5353                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5354                 val |= (1 << MISC_REGISTERS_SPIO_5);
5355                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5356                 break;
5357
5358         default:
5359                 break;
5360         }
5361
5362         /* clear PXP2 attentions */
5363         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5364
5365         enable_blocks_attention(bp);
5366
5367         if (bp->flags & TPA_ENABLE_FLAG) {
5368                 struct tstorm_eth_tpa_exist tmp = {0};
5369
5370                 tmp.tpa_exist = 1;
5371
5372                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5373                        ((u32 *)&tmp)[0]);
5374                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5375                        ((u32 *)&tmp)[1]);
5376         }
5377
5378         if (!BP_NOMCP(bp)) {
5379                 bnx2x_acquire_phy_lock(bp);
5380                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381                 bnx2x_release_phy_lock(bp);
5382         } else
5383                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
5385         return 0;
5386 }
5387
5388 static int bnx2x_init_port(struct bnx2x *bp)
5389 {
5390         int port = BP_PORT(bp);
5391         u32 val;
5392
5393         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5394
5395         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5396
5397         /* Port PXP comes here */
5398         /* Port PXP2 comes here */
5399 #ifdef BCM_ISCSI
5400         /* Port0  1
5401          * Port1  385 */
5402         i++;
5403         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5404         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5405         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5406         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5407
5408         /* Port0  2
5409          * Port1  386 */
5410         i++;
5411         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5412         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5413         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5414         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5415
5416         /* Port0  3
5417          * Port1  387 */
5418         i++;
5419         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5420         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5421         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5423 #endif
5424         /* Port CMs come here */
5425
5426         /* Port QM comes here */
5427 #ifdef BCM_ISCSI
5428         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5429         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5430
5431         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5432                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5433 #endif
5434         /* Port DQ comes here */
5435         /* Port BRB1 comes here */
5436         /* Port PRS comes here */
5437         /* Port TSDM comes here */
5438         /* Port CSDM comes here */
5439         /* Port USDM comes here */
5440         /* Port XSDM comes here */
5441         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5442                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5443         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5444                              port ? USEM_PORT1_END : USEM_PORT0_END);
5445         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5446                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5447         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5448                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5449         /* Port UPB comes here */
5450         /* Port XPB comes here */
5451
5452         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5453                              port ? PBF_PORT1_END : PBF_PORT0_END);
5454
5455         /* configure PBF to work without PAUSE mtu 9000 */
5456         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5457
5458         /* update threshold */
5459         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5460         /* update init credit */
5461         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5462
5463         /* probe changes */
5464         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5465         msleep(5);
5466         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5467
5468 #ifdef BCM_ISCSI
5469         /* tell the searcher where the T2 table is */
5470         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5471
5472         wb_write[0] = U64_LO(bp->t2_mapping);
5473         wb_write[1] = U64_HI(bp->t2_mapping);
5474         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5475         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5476         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5477         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5478
5479         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5480         /* Port SRCH comes here */
5481 #endif
5482         /* Port CDU comes here */
5483         /* Port CFC comes here */
5484
5485         if (CHIP_IS_E1(bp)) {
5486                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5487                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5488         }
5489         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5490                              port ? HC_PORT1_END : HC_PORT0_END);
5491
5492         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5493                                     MISC_AEU_PORT0_START,
5494                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5495         /* init aeu_mask_attn_func_0/1:
5496          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5497          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5498          *             bits 4-7 are used for "per vn group attention" */
5499         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5500                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5501
5502         /* Port PXPCS comes here */
5503         /* Port EMAC0 comes here */
5504         /* Port EMAC1 comes here */
5505         /* Port DBU comes here */
5506         /* Port DBG comes here */
5507         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5508                              port ? NIG_PORT1_END : NIG_PORT0_END);
5509
5510         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5511
5512         if (CHIP_IS_E1H(bp)) {
5513                 u32 wsum;
5514                 struct cmng_struct_per_port m_cmng_port;
5515                 int vn;
5516
5517                 /* 0x2 disable e1hov, 0x1 enable */
5518                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5519                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5520
5521                 /* Init RATE SHAPING and FAIRNESS contexts.
5522                    Initialize as if there is 10G link. */
5523                 wsum = bnx2x_calc_vn_wsum(bp);
5524                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5525                 if (IS_E1HMF(bp))
5526                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5528                                         wsum, 10000, &m_cmng_port);
5529         }
5530
5531         /* Port MCP comes here */
5532         /* Port DMAE comes here */
5533
5534         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5535         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5536                 /* add SPIO 5 to group 0 */
5537                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5538                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5539                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5540                 break;
5541
5542         default:
5543                 break;
5544         }
5545
5546         bnx2x__link_reset(bp);
5547
5548         return 0;
5549 }
5550
5551 #define ILT_PER_FUNC            (768/2)
5552 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5553 /* the phys address is shifted right 12 bits and has an added
5554    1=valid bit added to the 53rd bit
5555    then since this is a wide register(TM)
5556    we split it into two 32 bit writes
5557  */
5558 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5559 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5560 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5561 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5562
5563 #define CNIC_ILT_LINES          0
5564
5565 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5566 {
5567         int reg;
5568
5569         if (CHIP_IS_E1H(bp))
5570                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5571         else /* E1 */
5572                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5573
5574         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5575 }
5576
5577 static int bnx2x_init_func(struct bnx2x *bp)
5578 {
5579         int port = BP_PORT(bp);
5580         int func = BP_FUNC(bp);
5581         int i;
5582
5583         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5584
5585         i = FUNC_ILT_BASE(func);
5586
5587         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5588         if (CHIP_IS_E1H(bp)) {
5589                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5590                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5591         } else /* E1 */
5592                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5593                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5594
5595
5596         if (CHIP_IS_E1H(bp)) {
5597                 for (i = 0; i < 9; i++)
5598                         bnx2x_init_block(bp,
5599                                          cm_start[func][i], cm_end[func][i]);
5600
5601                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5602                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5603         }
5604
5605         /* HC init per function */
5606         if (CHIP_IS_E1H(bp)) {
5607                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5608
5609                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5610                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5611         }
5612         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5613
5614         if (CHIP_IS_E1H(bp))
5615                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5616
5617         /* Reset PCIE errors for debug */
5618         REG_WR(bp, 0x2114, 0xffffffff);
5619         REG_WR(bp, 0x2120, 0xffffffff);
5620
5621         return 0;
5622 }
5623
5624 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5625 {
5626         int i, rc = 0;
5627
5628         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5629            BP_FUNC(bp), load_code);
5630
5631         bp->dmae_ready = 0;
5632         mutex_init(&bp->dmae_mutex);
5633         bnx2x_gunzip_init(bp);
5634
5635         switch (load_code) {
5636         case FW_MSG_CODE_DRV_LOAD_COMMON:
5637                 rc = bnx2x_init_common(bp);
5638                 if (rc)
5639                         goto init_hw_err;
5640                 /* no break */
5641
5642         case FW_MSG_CODE_DRV_LOAD_PORT:
5643                 bp->dmae_ready = 1;
5644                 rc = bnx2x_init_port(bp);
5645                 if (rc)
5646                         goto init_hw_err;
5647                 /* no break */
5648
5649         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5650                 bp->dmae_ready = 1;
5651                 rc = bnx2x_init_func(bp);
5652                 if (rc)
5653                         goto init_hw_err;
5654                 break;
5655
5656         default:
5657                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5658                 break;
5659         }
5660
5661         if (!BP_NOMCP(bp)) {
5662                 int func = BP_FUNC(bp);
5663
5664                 bp->fw_drv_pulse_wr_seq =
5665                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5666                                  DRV_PULSE_SEQ_MASK);
5667                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5668                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5669                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5670         } else
5671                 bp->func_stx = 0;
5672
5673         /* this needs to be done before gunzip end */
5674         bnx2x_zero_def_sb(bp);
5675         for_each_queue(bp, i)
5676                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5677
5678 init_hw_err:
5679         bnx2x_gunzip_end(bp);
5680
5681         return rc;
5682 }
5683
5684 /* send the MCP a request, block until there is a reply */
5685 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5686 {
5687         int func = BP_FUNC(bp);
5688         u32 seq = ++bp->fw_seq;
5689         u32 rc = 0;
5690         u32 cnt = 1;
5691         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5692
5693         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5694         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5695
5696         do {
5697                 /* let the FW do it's magic ... */
5698                 msleep(delay);
5699
5700                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5701
5702                 /* Give the FW up to 2 second (200*10ms) */
5703         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706            cnt*delay, rc, seq);
5707
5708         /* is this a reply to our command? */
5709         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5710                 rc &= FW_MSG_CODE_MASK;
5711
5712         } else {
5713                 /* FW BUG! */
5714                 BNX2X_ERR("FW failed to respond!\n");
5715                 bnx2x_fw_dump(bp);
5716                 rc = 0;
5717         }
5718
5719         return rc;
5720 }
5721
5722 static void bnx2x_free_mem(struct bnx2x *bp)
5723 {
5724
5725 #define BNX2X_PCI_FREE(x, y, size) \
5726         do { \
5727                 if (x) { \
5728                         pci_free_consistent(bp->pdev, size, x, y); \
5729                         x = NULL; \
5730                         y = 0; \
5731                 } \
5732         } while (0)
5733
5734 #define BNX2X_FREE(x) \
5735         do { \
5736                 if (x) { \
5737                         vfree(x); \
5738                         x = NULL; \
5739                 } \
5740         } while (0)
5741
5742         int i;
5743
5744         /* fastpath */
5745         for_each_queue(bp, i) {
5746
5747                 /* Status blocks */
5748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5749                                bnx2x_fp(bp, i, status_blk_mapping),
5750                                sizeof(struct host_status_block) +
5751                                sizeof(struct eth_tx_db_data));
5752
5753                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5754                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5755                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5756                                bnx2x_fp(bp, i, tx_desc_mapping),
5757                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5758
5759                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5760                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5761                                bnx2x_fp(bp, i, rx_desc_mapping),
5762                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5763
5764                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5765                                bnx2x_fp(bp, i, rx_comp_mapping),
5766                                sizeof(struct eth_fast_path_rx_cqe) *
5767                                NUM_RCQ_BD);
5768
5769                 /* SGE ring */
5770                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5771                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5772                                bnx2x_fp(bp, i, rx_sge_mapping),
5773                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5774         }
5775         /* end of fastpath */
5776
5777         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5778                        sizeof(struct host_def_status_block));
5779
5780         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5781                        sizeof(struct bnx2x_slowpath));
5782
5783 #ifdef BCM_ISCSI
5784         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5785         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);