bnx2x: Re-arrange module parameters
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56
57 #define DRV_MODULE_VERSION      "1.45.26"
58 #define DRV_MODULE_RELDATE      "2009/01/26"
59 #define BNX2X_BC_VER            0x040200
60
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT              (5*HZ)
63
64 static char version[] __devinitdata =
65         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
75
76 static int disable_tpa;
77 module_param(disable_tpa, int, 0);
78 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
79
80 static int int_mode;
81 module_param(int_mode, int, 0);
82 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
84 static int poll;
85 module_param(poll, int, 0);
86 MODULE_PARM_DESC(poll, " Use polling (for debug)");
87
88 static int mrrs = -1;
89 module_param(mrrs, int, 0);
90 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
92 static int debug;
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
97
98 static struct workqueue_struct *bnx2x_wq;
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         __be32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         /* Indices */
508         /* Common */
509         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
510                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
511                   "  spq_prod_idx(%u)\n",
512                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515         /* Rx */
516         for_each_rx_queue(bp, i) {
517                 struct bnx2x_fastpath *fp = &bp->fp[i];
518
519                 BNX2X_ERR("queue[%d]: rx_bd_prod(%x)  rx_bd_cons(%x)"
520                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
521                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
522                           i, fp->rx_bd_prod, fp->rx_bd_cons,
523                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
526                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
527                           fp->rx_sge_prod, fp->last_max_sge,
528                           le16_to_cpu(fp->fp_u_idx),
529                           fp->status_blk->u_status_block.status_block_index);
530         }
531
532         /* Tx */
533         for_each_tx_queue(bp, i) {
534                 struct bnx2x_fastpath *fp = &bp->fp[i];
535                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
536
537                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
538                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
539                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541                 BNX2X_ERR("          fp_c_idx(%x)  *sb_c_idx(%x)"
542                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543                           fp->status_blk->c_status_block.status_block_index,
544                           hw_prods->packets_prod, hw_prods->bds_prod);
545         }
546
547         /* Rings */
548         /* Rx */
549         for_each_rx_queue(bp, i) {
550                 struct bnx2x_fastpath *fp = &bp->fp[i];
551
552                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
554                 for (j = start; j != end; j = RX_BD(j + 1)) {
555                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
559                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
560                 }
561
562                 start = RX_SGE(fp->rx_sge_prod);
563                 end = RX_SGE(fp->last_max_sge);
564                 for (j = start; j != end; j = RX_SGE(j + 1)) {
565                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
569                                   j, rx_sge[1], rx_sge[0], sw_page->page);
570                 }
571
572                 start = RCQ_BD(fp->rx_comp_cons - 10);
573                 end = RCQ_BD(fp->rx_comp_cons + 503);
574                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
575                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
579                 }
580         }
581
582         /* Tx */
583         for_each_tx_queue(bp, i) {
584                 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588                 for (j = start; j != end; j = TX_BD(j + 1)) {
589                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592                                   sw_bd->skb, sw_bd->first_bd);
593                 }
594
595                 start = TX_BD(fp->tx_bd_cons - 10);
596                 end = TX_BD(fp->tx_bd_cons + 254);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602                 }
603         }
604
605         bnx2x_fw_dump(bp);
606         bnx2x_mc_assert(bp);
607         BNX2X_ERR("end crash dump -----------------\n");
608 }
609
610 static void bnx2x_int_enable(struct bnx2x *bp)
611 {
612         int port = BP_PORT(bp);
613         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614         u32 val = REG_RD(bp, addr);
615         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
616         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
617
618         if (msix) {
619                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620                          HC_CONFIG_0_REG_INT_LINE_EN_0);
621                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
623         } else if (msi) {
624                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
628         } else {
629                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
632                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633
634                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635                    val, port, addr);
636
637                 REG_WR(bp, addr, val);
638
639                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640         }
641
642         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
643            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
644
645         REG_WR(bp, addr, val);
646
647         if (CHIP_IS_E1H(bp)) {
648                 /* init leading/trailing edge */
649                 if (IS_E1HMF(bp)) {
650                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
651                         if (bp->port.pmf)
652                                 /* enable nig and gpio3 attention */
653                                 val |= 0x1100;
654                 } else
655                         val = 0xffff;
656
657                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659         }
660 }
661
662 static void bnx2x_int_disable(struct bnx2x *bp)
663 {
664         int port = BP_PORT(bp);
665         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666         u32 val = REG_RD(bp, addr);
667
668         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674            val, port, addr);
675
676         /* flush all outstanding writes */
677         mmiowb();
678
679         REG_WR(bp, addr, val);
680         if (REG_RD(bp, addr) != val)
681                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
682 }
683
684 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
685 {
686         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
687         int i, offset;
688
689         /* disable interrupt handling */
690         atomic_inc(&bp->intr_sem);
691         if (disable_hw)
692                 /* prevent the HW from sending interrupts */
693                 bnx2x_int_disable(bp);
694
695         /* make sure all ISRs are done */
696         if (msix) {
697                 synchronize_irq(bp->msix_table[0].vector);
698                 offset = 1;
699                 for_each_queue(bp, i)
700                         synchronize_irq(bp->msix_table[i + offset].vector);
701         } else
702                 synchronize_irq(bp->pdev->irq);
703
704         /* make sure sp_task is not running */
705         cancel_delayed_work(&bp->sp_task);
706         flush_workqueue(bnx2x_wq);
707 }
708
709 /* fast path */
710
711 /*
712  * General service functions
713  */
714
715 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
716                                 u8 storm, u16 index, u8 op, u8 update)
717 {
718         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
719                        COMMAND_REG_INT_ACK);
720         struct igu_ack_register igu_ack;
721
722         igu_ack.status_block_index = index;
723         igu_ack.sb_id_and_flags =
724                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
725                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
726                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
727                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
728
729         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
730            (*(u32 *)&igu_ack), hc_addr);
731         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
732 }
733
734 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
735 {
736         struct host_status_block *fpsb = fp->status_blk;
737         u16 rc = 0;
738
739         barrier(); /* status block is written to by the chip */
740         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
741                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
742                 rc |= 1;
743         }
744         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
745                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
746                 rc |= 2;
747         }
748         return rc;
749 }
750
751 static u16 bnx2x_ack_int(struct bnx2x *bp)
752 {
753         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754                        COMMAND_REG_SIMD_MASK);
755         u32 result = REG_RD(bp, hc_addr);
756
757         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
758            result, hc_addr);
759
760         return result;
761 }
762
763
764 /*
765  * fast path service functions
766  */
767
768 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
769 {
770         u16 tx_cons_sb;
771
772         /* Tell compiler that status block fields can change */
773         barrier();
774         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
775         return (fp->tx_pkt_cons != tx_cons_sb);
776 }
777
778 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
779 {
780         /* Tell compiler that consumer and producer can change */
781         barrier();
782         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
783
784 }
785
786 /* free skb in the packet ring at pos idx
787  * return idx of last bd freed
788  */
789 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790                              u16 idx)
791 {
792         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793         struct eth_tx_bd *tx_bd;
794         struct sk_buff *skb = tx_buf->skb;
795         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
796         int nbd;
797
798         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
799            idx, tx_buf, skb);
800
801         /* unmap first bd */
802         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803         tx_bd = &fp->tx_desc_ring[bd_idx];
804         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807         nbd = le16_to_cpu(tx_bd->nbd) - 1;
808         new_cons = nbd + tx_buf->first_bd;
809 #ifdef BNX2X_STOP_ON_ERROR
810         if (nbd > (MAX_SKB_FRAGS + 2)) {
811                 BNX2X_ERR("BAD nbd!\n");
812                 bnx2x_panic();
813         }
814 #endif
815
816         /* Skip a parse bd and the TSO split header bd
817            since they have no mapping */
818         if (nbd)
819                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822                                            ETH_TX_BD_FLAGS_TCP_CSUM |
823                                            ETH_TX_BD_FLAGS_SW_LSO)) {
824                 if (--nbd)
825                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826                 tx_bd = &fp->tx_desc_ring[bd_idx];
827                 /* is this a TSO split header bd? */
828                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829                         if (--nbd)
830                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831                 }
832         }
833
834         /* now free frags */
835         while (nbd > 0) {
836
837                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838                 tx_bd = &fp->tx_desc_ring[bd_idx];
839                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841                 if (--nbd)
842                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843         }
844
845         /* release skb */
846         WARN_ON(!skb);
847         dev_kfree_skb(skb);
848         tx_buf->first_bd = 0;
849         tx_buf->skb = NULL;
850
851         return new_cons;
852 }
853
854 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
855 {
856         s16 used;
857         u16 prod;
858         u16 cons;
859
860         barrier(); /* Tell compiler that prod and cons can change */
861         prod = fp->tx_bd_prod;
862         cons = fp->tx_bd_cons;
863
864         /* NUM_TX_RINGS = number of "next-page" entries
865            It will be used as a threshold */
866         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
867
868 #ifdef BNX2X_STOP_ON_ERROR
869         WARN_ON(used < 0);
870         WARN_ON(used > fp->bp->tx_ring_size);
871         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
872 #endif
873
874         return (s16)(fp->bp->tx_ring_size) - used;
875 }
876
877 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878 {
879         struct bnx2x *bp = fp->bp;
880         struct netdev_queue *txq;
881         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882         int done = 0;
883
884 #ifdef BNX2X_STOP_ON_ERROR
885         if (unlikely(bp->panic))
886                 return;
887 #endif
888
889         txq = netdev_get_tx_queue(bp->dev, fp->index);
890         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891         sw_cons = fp->tx_pkt_cons;
892
893         while (sw_cons != hw_cons) {
894                 u16 pkt_cons;
895
896                 pkt_cons = TX_BD(sw_cons);
897
898                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
900                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
901                    hw_cons, sw_cons, pkt_cons);
902
903 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
904                         rmb();
905                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906                 }
907 */
908                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909                 sw_cons++;
910                 done++;
911
912                 if (done == work)
913                         break;
914         }
915
916         fp->tx_pkt_cons = sw_cons;
917         fp->tx_bd_cons = bd_cons;
918
919         /* Need to make the tx_bd_cons update visible to start_xmit()
920          * before checking for netif_tx_queue_stopped().  Without the
921          * memory barrier, there is a small possibility that start_xmit()
922          * will miss it and cause the queue to be stopped forever.
923          */
924         smp_mb();
925
926         /* TBD need a thresh? */
927         if (unlikely(netif_tx_queue_stopped(txq))) {
928
929                 __netif_tx_lock(txq, smp_processor_id());
930
931                 if ((netif_tx_queue_stopped(txq)) &&
932                     (bp->state == BNX2X_STATE_OPEN) &&
933                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934                         netif_tx_wake_queue(txq);
935
936                 __netif_tx_unlock(txq);
937         }
938 }
939
940
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942                            union eth_rx_cqe *rr_cqe)
943 {
944         struct bnx2x *bp = fp->bp;
945         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
948         DP(BNX2X_MSG_SP,
949            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
950            fp->index, cid, command, bp->state,
951            rr_cqe->ramrod_cqe.ramrod_type);
952
953         bp->spq_left++;
954
955         if (fp->index) {
956                 switch (command | fp->state) {
957                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958                                                 BNX2X_FP_STATE_OPENING):
959                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960                            cid);
961                         fp->state = BNX2X_FP_STATE_OPEN;
962                         break;
963
964                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966                            cid);
967                         fp->state = BNX2X_FP_STATE_HALTED;
968                         break;
969
970                 default:
971                         BNX2X_ERR("unexpected MC reply (%d)  "
972                                   "fp->state is %x\n", command, fp->state);
973                         break;
974                 }
975                 mb(); /* force bnx2x_wait_ramrod() to see the change */
976                 return;
977         }
978
979         switch (command | bp->state) {
980         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982                 bp->state = BNX2X_STATE_OPEN;
983                 break;
984
985         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988                 fp->state = BNX2X_FP_STATE_HALTED;
989                 break;
990
991         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
994                 break;
995
996
997         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000                 bp->set_mac_pending = 0;
1001                 break;
1002
1003         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1005                 break;
1006
1007         default:
1008                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1009                           command, bp->state);
1010                 break;
1011         }
1012         mb(); /* force bnx2x_wait_ramrod() to see the change */
1013 }
1014
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016                                      struct bnx2x_fastpath *fp, u16 index)
1017 {
1018         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019         struct page *page = sw_buf->page;
1020         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022         /* Skip "next page" elements */
1023         if (!page)
1024                 return;
1025
1026         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028         __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030         sw_buf->page = NULL;
1031         sge->addr_hi = 0;
1032         sge->addr_lo = 0;
1033 }
1034
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036                                            struct bnx2x_fastpath *fp, int last)
1037 {
1038         int i;
1039
1040         for (i = 0; i < last; i++)
1041                 bnx2x_free_rx_sge(bp, fp, i);
1042 }
1043
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045                                      struct bnx2x_fastpath *fp, u16 index)
1046 {
1047         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050         dma_addr_t mapping;
1051
1052         if (unlikely(page == NULL))
1053                 return -ENOMEM;
1054
1055         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056                                PCI_DMA_FROMDEVICE);
1057         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059                 return -ENOMEM;
1060         }
1061
1062         sw_buf->page = page;
1063         pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068         return 0;
1069 }
1070
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072                                      struct bnx2x_fastpath *fp, u16 index)
1073 {
1074         struct sk_buff *skb;
1075         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077         dma_addr_t mapping;
1078
1079         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080         if (unlikely(skb == NULL))
1081                 return -ENOMEM;
1082
1083         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084                                  PCI_DMA_FROMDEVICE);
1085         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1086                 dev_kfree_skb(skb);
1087                 return -ENOMEM;
1088         }
1089
1090         rx_buf->skb = skb;
1091         pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096         return 0;
1097 }
1098
1099 /* note that we are not allocating a new skb,
1100  * we are just moving one from cons to prod
1101  * we are not creating a new mapping,
1102  * so there is no need to check for dma_mapping_error().
1103  */
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105                                struct sk_buff *skb, u16 cons, u16 prod)
1106 {
1107         struct bnx2x *bp = fp->bp;
1108         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113         pci_dma_sync_single_for_device(bp->pdev,
1114                                        pci_unmap_addr(cons_rx_buf, mapping),
1115                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1116
1117         prod_rx_buf->skb = cons_rx_buf->skb;
1118         pci_unmap_addr_set(prod_rx_buf, mapping,
1119                            pci_unmap_addr(cons_rx_buf, mapping));
1120         *prod_bd = *cons_bd;
1121 }
1122
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124                                              u16 idx)
1125 {
1126         u16 last_max = fp->last_max_sge;
1127
1128         if (SUB_S16(idx, last_max) > 0)
1129                 fp->last_max_sge = idx;
1130 }
1131
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133 {
1134         int i, j;
1135
1136         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137                 int idx = RX_SGE_CNT * i - 1;
1138
1139                 for (j = 0; j < 2; j++) {
1140                         SGE_MASK_CLEAR_BIT(fp, idx);
1141                         idx--;
1142                 }
1143         }
1144 }
1145
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147                                   struct eth_fast_path_rx_cqe *fp_cqe)
1148 {
1149         struct bnx2x *bp = fp->bp;
1150         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1152                       SGE_PAGE_SHIFT;
1153         u16 last_max, last_elem, first_elem;
1154         u16 delta = 0;
1155         u16 i;
1156
1157         if (!sge_len)
1158                 return;
1159
1160         /* First mark all used pages */
1161         for (i = 0; i < sge_len; i++)
1162                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167         /* Here we assume that the last SGE index is the biggest */
1168         prefetch((void *)(fp->sge_mask));
1169         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171         last_max = RX_SGE(fp->last_max_sge);
1172         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175         /* If ring is not full */
1176         if (last_elem + 1 != first_elem)
1177                 last_elem++;
1178
1179         /* Now update the prod */
1180         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181                 if (likely(fp->sge_mask[i]))
1182                         break;
1183
1184                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185                 delta += RX_SGE_MASK_ELEM_SZ;
1186         }
1187
1188         if (delta > 0) {
1189                 fp->rx_sge_prod += delta;
1190                 /* clear page-end entries */
1191                 bnx2x_clear_sge_mask_next_elems(fp);
1192         }
1193
1194         DP(NETIF_MSG_RX_STATUS,
1195            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1196            fp->last_max_sge, fp->rx_sge_prod);
1197 }
1198
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200 {
1201         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202         memset(fp->sge_mask, 0xff,
1203                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
1205         /* Clear the two last indices in the page to 1:
1206            these are the indices that correspond to the "next" element,
1207            hence will never be indicated and should be removed from
1208            the calculations. */
1209         bnx2x_clear_sge_mask_next_elems(fp);
1210 }
1211
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213                             struct sk_buff *skb, u16 cons, u16 prod)
1214 {
1215         struct bnx2x *bp = fp->bp;
1216         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219         dma_addr_t mapping;
1220
1221         /* move empty skb from pool to prod and map it */
1222         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227         /* move partial skb from cons to pool (don't unmap yet) */
1228         fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230         /* mark bin state as start - print error if current state != stop */
1231         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234         fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236         /* point prod_bd to new skb */
1237         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240 #ifdef BNX2X_STOP_ON_ERROR
1241         fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244 #else
1245         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246 #endif
1247            fp->tpa_queue_used);
1248 #endif
1249 }
1250
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252                                struct sk_buff *skb,
1253                                struct eth_fast_path_rx_cqe *fp_cqe,
1254                                u16 cqe_idx)
1255 {
1256         struct sw_rx_page *rx_pg, old_rx_pg;
1257         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258         u32 i, frag_len, frag_size, pages;
1259         int err;
1260         int j;
1261
1262         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1264
1265         /* This is needed in order to enable forwarding support */
1266         if (frag_size)
1267                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268                                                max(frag_size, (u32)len_on_bd));
1269
1270 #ifdef BNX2X_STOP_ON_ERROR
1271         if (pages >
1272             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274                           pages, cqe_idx);
1275                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1276                           fp_cqe->pkt_len, len_on_bd);
1277                 bnx2x_panic();
1278                 return -EINVAL;
1279         }
1280 #endif
1281
1282         /* Run through the SGL and compose the fragmented skb */
1283         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286                 /* FW gives the indices of the SGE as if the ring is an array
1287                    (meaning that "next" element will consume 2 indices) */
1288                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289                 rx_pg = &fp->rx_page_ring[sge_idx];
1290                 old_rx_pg = *rx_pg;
1291
1292                 /* If we fail to allocate a substitute page, we simply stop
1293                    where we are and drop the whole packet */
1294                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295                 if (unlikely(err)) {
1296                         fp->eth_q_stats.rx_skb_alloc_failed++;
1297                         return err;
1298                 }
1299
1300                 /* Unmap the page as we r going to pass it to the stack */
1301                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1303
1304                 /* Add one frag and update the appropriate fields in the skb */
1305                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307                 skb->data_len += frag_len;
1308                 skb->truesize += frag_len;
1309                 skb->len += frag_len;
1310
1311                 frag_size -= frag_len;
1312         }
1313
1314         return 0;
1315 }
1316
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319                            u16 cqe_idx)
1320 {
1321         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322         struct sk_buff *skb = rx_buf->skb;
1323         /* alloc new skb */
1324         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326         /* Unmap skb in the pool anyway, as we are going to change
1327            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328            fails. */
1329         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1331
1332         if (likely(new_skb)) {
1333                 /* fix ip xsum and give it to the stack */
1334                 /* (no need to map the new skb) */
1335 #ifdef BCM_VLAN
1336                 int is_vlan_cqe =
1337                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338                          PARSING_FLAGS_VLAN);
1339                 int is_not_hwaccel_vlan_cqe =
1340                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341 #endif
1342
1343                 prefetch(skb);
1344                 prefetch(((char *)(skb)) + 128);
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347                 if (pad + len > bp->rx_buf_size) {
1348                         BNX2X_ERR("skb_put is about to fail...  "
1349                                   "pad %d  len %d  rx_buf_size %d\n",
1350                                   pad, len, bp->rx_buf_size);
1351                         bnx2x_panic();
1352                         return;
1353                 }
1354 #endif
1355
1356                 skb_reserve(skb, pad);
1357                 skb_put(skb, len);
1358
1359                 skb->protocol = eth_type_trans(skb, bp->dev);
1360                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362                 {
1363                         struct iphdr *iph;
1364
1365                         iph = (struct iphdr *)skb->data;
1366 #ifdef BCM_VLAN
1367                         /* If there is no Rx VLAN offloading -
1368                            take VLAN tag into an account */
1369                         if (unlikely(is_not_hwaccel_vlan_cqe))
1370                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371 #endif
1372                         iph->check = 0;
1373                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374                 }
1375
1376                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377                                          &cqe->fast_path_cqe, cqe_idx)) {
1378 #ifdef BCM_VLAN
1379                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380                             (!is_not_hwaccel_vlan_cqe))
1381                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382                                                 le16_to_cpu(cqe->fast_path_cqe.
1383                                                             vlan_tag));
1384                         else
1385 #endif
1386                                 netif_receive_skb(skb);
1387                 } else {
1388                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389                            " - dropping packet!\n");
1390                         dev_kfree_skb(skb);
1391                 }
1392
1393
1394                 /* put new skb in bin */
1395                 fp->tpa_pool[queue].skb = new_skb;
1396
1397         } else {
1398                 /* else drop the packet and keep the buffer in the bin */
1399                 DP(NETIF_MSG_RX_STATUS,
1400                    "Failed to allocate new skb - dropping packet!\n");
1401                 fp->eth_q_stats.rx_skb_alloc_failed++;
1402         }
1403
1404         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405 }
1406
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408                                         struct bnx2x_fastpath *fp,
1409                                         u16 bd_prod, u16 rx_comp_prod,
1410                                         u16 rx_sge_prod)
1411 {
1412         struct ustorm_eth_rx_producers rx_prods = {0};
1413         int i;
1414
1415         /* Update producers */
1416         rx_prods.bd_prod = bd_prod;
1417         rx_prods.cqe_prod = rx_comp_prod;
1418         rx_prods.sge_prod = rx_sge_prod;
1419
1420         /*
1421          * Make sure that the BD and SGE data is updated before updating the
1422          * producers since FW might read the BD/SGE right after the producer
1423          * is updated.
1424          * This is only applicable for weak-ordered memory model archs such
1425          * as IA-64. The following barrier is also mandatory since FW will
1426          * assumes BDs must have buffers.
1427          */
1428         wmb();
1429
1430         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431                 REG_WR(bp, BAR_USTRORM_INTMEM +
1432                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433                        ((u32 *)&rx_prods)[i]);
1434
1435         mmiowb(); /* keep prod updates ordered */
1436
1437         DP(NETIF_MSG_RX_STATUS,
1438            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1439            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1440 }
1441
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443 {
1444         struct bnx2x *bp = fp->bp;
1445         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447         int rx_pkt = 0;
1448
1449 #ifdef BNX2X_STOP_ON_ERROR
1450         if (unlikely(bp->panic))
1451                 return 0;
1452 #endif
1453
1454         /* CQ "next element" is of the size of the regular element,
1455            that's why it's ok here */
1456         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458                 hw_comp_cons++;
1459
1460         bd_cons = fp->rx_bd_cons;
1461         bd_prod = fp->rx_bd_prod;
1462         bd_prod_fw = bd_prod;
1463         sw_comp_cons = fp->rx_comp_cons;
1464         sw_comp_prod = fp->rx_comp_prod;
1465
1466         /* Memory barrier necessary as speculative reads of the rx
1467          * buffer can be ahead of the index in the status block
1468          */
1469         rmb();
1470
1471         DP(NETIF_MSG_RX_STATUS,
1472            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1473            fp->index, hw_comp_cons, sw_comp_cons);
1474
1475         while (sw_comp_cons != hw_comp_cons) {
1476                 struct sw_rx_bd *rx_buf = NULL;
1477                 struct sk_buff *skb;
1478                 union eth_rx_cqe *cqe;
1479                 u8 cqe_fp_flags;
1480                 u16 len, pad;
1481
1482                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483                 bd_prod = RX_BD(bd_prod);
1484                 bd_cons = RX_BD(bd_cons);
1485
1486                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1488
1489                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1490                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1491                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1495
1496                 /* is this a slowpath msg? */
1497                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498                         bnx2x_sp_event(fp, cqe);
1499                         goto next_cqe;
1500
1501                 /* this is an rx packet */
1502                 } else {
1503                         rx_buf = &fp->rx_buf_ring[bd_cons];
1504                         skb = rx_buf->skb;
1505                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506                         pad = cqe->fast_path_cqe.placement_offset;
1507
1508                         /* If CQE is marked both TPA_START and TPA_END
1509                            it is a non-TPA CQE */
1510                         if ((!fp->disable_tpa) &&
1511                             (TPA_TYPE(cqe_fp_flags) !=
1512                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1513                                 u16 queue = cqe->fast_path_cqe.queue_index;
1514
1515                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516                                         DP(NETIF_MSG_RX_STATUS,
1517                                            "calling tpa_start on queue %d\n",
1518                                            queue);
1519
1520                                         bnx2x_tpa_start(fp, queue, skb,
1521                                                         bd_cons, bd_prod);
1522                                         goto next_rx;
1523                                 }
1524
1525                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526                                         DP(NETIF_MSG_RX_STATUS,
1527                                            "calling tpa_stop on queue %d\n",
1528                                            queue);
1529
1530                                         if (!BNX2X_RX_SUM_FIX(cqe))
1531                                                 BNX2X_ERR("STOP on none TCP "
1532                                                           "data\n");
1533
1534                                         /* This is a size of the linear data
1535                                            on this skb */
1536                                         len = le16_to_cpu(cqe->fast_path_cqe.
1537                                                                 len_on_bd);
1538                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1539                                                     len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1541                                         if (bp->panic)
1542                                                 return -EINVAL;
1543 #endif
1544
1545                                         bnx2x_update_sge_prod(fp,
1546                                                         &cqe->fast_path_cqe);
1547                                         goto next_cqe;
1548                                 }
1549                         }
1550
1551                         pci_dma_sync_single_for_device(bp->pdev,
1552                                         pci_unmap_addr(rx_buf, mapping),
1553                                                        pad + RX_COPY_THRESH,
1554                                                        PCI_DMA_FROMDEVICE);
1555                         prefetch(skb);
1556                         prefetch(((char *)(skb)) + 128);
1557
1558                         /* is this an error packet? */
1559                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560                                 DP(NETIF_MSG_RX_ERR,
1561                                    "ERROR  flags %x  rx packet %u\n",
1562                                    cqe_fp_flags, sw_comp_cons);
1563                                 fp->eth_q_stats.rx_err_discard_pkt++;
1564                                 goto reuse_rx;
1565                         }
1566
1567                         /* Since we don't have a jumbo ring
1568                          * copy small packets if mtu > 1500
1569                          */
1570                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571                             (len <= RX_COPY_THRESH)) {
1572                                 struct sk_buff *new_skb;
1573
1574                                 new_skb = netdev_alloc_skb(bp->dev,
1575                                                            len + pad);
1576                                 if (new_skb == NULL) {
1577                                         DP(NETIF_MSG_RX_ERR,
1578                                            "ERROR  packet dropped "
1579                                            "because of alloc failure\n");
1580                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1581                                         goto reuse_rx;
1582                                 }
1583
1584                                 /* aligned copy */
1585                                 skb_copy_from_linear_data_offset(skb, pad,
1586                                                     new_skb->data + pad, len);
1587                                 skb_reserve(new_skb, pad);
1588                                 skb_put(new_skb, len);
1589
1590                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592                                 skb = new_skb;
1593
1594                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595                                 pci_unmap_single(bp->pdev,
1596                                         pci_unmap_addr(rx_buf, mapping),
1597                                                  bp->rx_buf_size,
1598                                                  PCI_DMA_FROMDEVICE);
1599                                 skb_reserve(skb, pad);
1600                                 skb_put(skb, len);
1601
1602                         } else {
1603                                 DP(NETIF_MSG_RX_ERR,
1604                                    "ERROR  packet dropped because "
1605                                    "of alloc failure\n");
1606                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 reuse_rx:
1608                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609                                 goto next_rx;
1610                         }
1611
1612                         skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614                         skb->ip_summed = CHECKSUM_NONE;
1615                         if (bp->rx_csum) {
1616                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1618                                 else
1619                                         fp->eth_q_stats.hw_csum_err++;
1620                         }
1621                 }
1622
1623                 skb_record_rx_queue(skb, fp->index);
1624 #ifdef BCM_VLAN
1625                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627                      PARSING_FLAGS_VLAN))
1628                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630                 else
1631 #endif
1632                         netif_receive_skb(skb);
1633
1634
1635 next_rx:
1636                 rx_buf->skb = NULL;
1637
1638                 bd_cons = NEXT_RX_IDX(bd_cons);
1639                 bd_prod = NEXT_RX_IDX(bd_prod);
1640                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641                 rx_pkt++;
1642 next_cqe:
1643                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1645
1646                 if (rx_pkt == budget)
1647                         break;
1648         } /* while */
1649
1650         fp->rx_bd_cons = bd_cons;
1651         fp->rx_bd_prod = bd_prod_fw;
1652         fp->rx_comp_cons = sw_comp_cons;
1653         fp->rx_comp_prod = sw_comp_prod;
1654
1655         /* Update producers */
1656         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657                              fp->rx_sge_prod);
1658
1659         fp->rx_pkt += rx_pkt;
1660         fp->rx_calls++;
1661
1662         return rx_pkt;
1663 }
1664
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666 {
1667         struct bnx2x_fastpath *fp = fp_cookie;
1668         struct bnx2x *bp = fp->bp;
1669         int index = fp->index;
1670
1671         /* Return here if interrupt is disabled */
1672         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674                 return IRQ_HANDLED;
1675         }
1676
1677         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1678            index, fp->sb_id);
1679         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1680
1681 #ifdef BNX2X_STOP_ON_ERROR
1682         if (unlikely(bp->panic))
1683                 return IRQ_HANDLED;
1684 #endif
1685
1686         prefetch(fp->rx_cons_sb);
1687         prefetch(fp->tx_cons_sb);
1688         prefetch(&fp->status_blk->c_status_block.status_block_index);
1689         prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
1691         napi_schedule(&bnx2x_fp(bp, index, napi));
1692
1693         return IRQ_HANDLED;
1694 }
1695
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697 {
1698         struct bnx2x *bp = netdev_priv(dev_instance);
1699         u16 status = bnx2x_ack_int(bp);
1700         u16 mask;
1701
1702         /* Return here if interrupt is shared and it's not for us */
1703         if (unlikely(status == 0)) {
1704                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705                 return IRQ_NONE;
1706         }
1707         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1708
1709         /* Return here if interrupt is disabled */
1710         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712                 return IRQ_HANDLED;
1713         }
1714
1715 #ifdef BNX2X_STOP_ON_ERROR
1716         if (unlikely(bp->panic))
1717                 return IRQ_HANDLED;
1718 #endif
1719
1720         mask = 0x2 << bp->fp[0].sb_id;
1721         if (status & mask) {
1722                 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724                 prefetch(fp->rx_cons_sb);
1725                 prefetch(fp->tx_cons_sb);
1726                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
1729                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1730
1731                 status &= ~mask;
1732         }
1733
1734
1735         if (unlikely(status & 0x1)) {
1736                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1737
1738                 status &= ~0x1;
1739                 if (!status)
1740                         return IRQ_HANDLED;
1741         }
1742
1743         if (status)
1744                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745                    status);
1746
1747         return IRQ_HANDLED;
1748 }
1749
1750 /* end of fast path */
1751
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1753
1754 /* Link */
1755
1756 /*
1757  * General service functions
1758  */
1759
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1761 {
1762         u32 lock_status;
1763         u32 resource_bit = (1 << resource);
1764         int func = BP_FUNC(bp);
1765         u32 hw_lock_control_reg;
1766         int cnt;
1767
1768         /* Validating that the resource is within range */
1769         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770                 DP(NETIF_MSG_HW,
1771                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773                 return -EINVAL;
1774         }
1775
1776         if (func <= 5) {
1777                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778         } else {
1779                 hw_lock_control_reg =
1780                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781         }
1782
1783         /* Validating that the resource is not already taken */
1784         lock_status = REG_RD(bp, hw_lock_control_reg);
1785         if (lock_status & resource_bit) {
1786                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1787                    lock_status, resource_bit);
1788                 return -EEXIST;
1789         }
1790
1791         /* Try for 5 second every 5ms */
1792         for (cnt = 0; cnt < 1000; cnt++) {
1793                 /* Try to acquire the lock */
1794                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795                 lock_status = REG_RD(bp, hw_lock_control_reg);
1796                 if (lock_status & resource_bit)
1797                         return 0;
1798
1799                 msleep(5);
1800         }
1801         DP(NETIF_MSG_HW, "Timeout\n");
1802         return -EAGAIN;
1803 }
1804
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1806 {
1807         u32 lock_status;
1808         u32 resource_bit = (1 << resource);
1809         int func = BP_FUNC(bp);
1810         u32 hw_lock_control_reg;
1811
1812         /* Validating that the resource is within range */
1813         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814                 DP(NETIF_MSG_HW,
1815                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817                 return -EINVAL;
1818         }
1819
1820         if (func <= 5) {
1821                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822         } else {
1823                 hw_lock_control_reg =
1824                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825         }
1826
1827         /* Validating that the resource is currently taken */
1828         lock_status = REG_RD(bp, hw_lock_control_reg);
1829         if (!(lock_status & resource_bit)) {
1830                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1831                    lock_status, resource_bit);
1832                 return -EFAULT;
1833         }
1834
1835         REG_WR(bp, hw_lock_control_reg, resource_bit);
1836         return 0;
1837 }
1838
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1841 {
1842         mutex_lock(&bp->port.phy_mutex);
1843
1844         if (bp->port.need_hw_lock)
1845                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1846 }
1847
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1849 {
1850         if (bp->port.need_hw_lock)
1851                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1852
1853         mutex_unlock(&bp->port.phy_mutex);
1854 }
1855
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857 {
1858         /* The GPIO should be swapped if swap register is set and active */
1859         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861         int gpio_shift = gpio_num +
1862                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863         u32 gpio_mask = (1 << gpio_shift);
1864         u32 gpio_reg;
1865         int value;
1866
1867         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869                 return -EINVAL;
1870         }
1871
1872         /* read GPIO value */
1873         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875         /* get the requested pin value */
1876         if ((gpio_reg & gpio_mask) == gpio_mask)
1877                 value = 1;
1878         else
1879                 value = 0;
1880
1881         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1882
1883         return value;
1884 }
1885
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1887 {
1888         /* The GPIO should be swapped if swap register is set and active */
1889         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891         int gpio_shift = gpio_num +
1892                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893         u32 gpio_mask = (1 << gpio_shift);
1894         u32 gpio_reg;
1895
1896         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898                 return -EINVAL;
1899         }
1900
1901         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902         /* read GPIO and mask except the float bits */
1903         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1904
1905         switch (mode) {
1906         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908                    gpio_num, gpio_shift);
1909                 /* clear FLOAT and set CLR */
1910                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912                 break;
1913
1914         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916                    gpio_num, gpio_shift);
1917                 /* clear FLOAT and set SET */
1918                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920                 break;
1921
1922         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924                    gpio_num, gpio_shift);
1925                 /* set FLOAT */
1926                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927                 break;
1928
1929         default:
1930                 break;
1931         }
1932
1933         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1935
1936         return 0;
1937 }
1938
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940 {
1941         /* The GPIO should be swapped if swap register is set and active */
1942         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944         int gpio_shift = gpio_num +
1945                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946         u32 gpio_mask = (1 << gpio_shift);
1947         u32 gpio_reg;
1948
1949         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951                 return -EINVAL;
1952         }
1953
1954         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955         /* read GPIO int */
1956         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958         switch (mode) {
1959         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961                                    "output low\n", gpio_num, gpio_shift);
1962                 /* clear SET and set CLR */
1963                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965                 break;
1966
1967         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969                                    "output high\n", gpio_num, gpio_shift);
1970                 /* clear CLR and set SET */
1971                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973                 break;
1974
1975         default:
1976                 break;
1977         }
1978
1979         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982         return 0;
1983 }
1984
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1986 {
1987         u32 spio_mask = (1 << spio_num);
1988         u32 spio_reg;
1989
1990         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991             (spio_num > MISC_REGISTERS_SPIO_7)) {
1992                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993                 return -EINVAL;
1994         }
1995
1996         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997         /* read SPIO and mask except the float bits */
1998         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1999
2000         switch (mode) {
2001         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003                 /* clear FLOAT and set CLR */
2004                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006                 break;
2007
2008         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010                 /* clear FLOAT and set SET */
2011                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017                 /* set FLOAT */
2018                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019                 break;
2020
2021         default:
2022                 break;
2023         }
2024
2025         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2027
2028         return 0;
2029 }
2030
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2032 {
2033         switch (bp->link_vars.ieee_fc &
2034                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2037                                           ADVERTISED_Pause);
2038                 break;
2039         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2040                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2041                                          ADVERTISED_Pause);
2042                 break;
2043         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2044                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2045                 break;
2046         default:
2047                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2048                                           ADVERTISED_Pause);
2049                 break;
2050         }
2051 }
2052
2053 static void bnx2x_link_report(struct bnx2x *bp)
2054 {
2055         if (bp->link_vars.link_up) {
2056                 if (bp->state == BNX2X_STATE_OPEN)
2057                         netif_carrier_on(bp->dev);
2058                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2059
2060                 printk("%d Mbps ", bp->link_vars.line_speed);
2061
2062                 if (bp->link_vars.duplex == DUPLEX_FULL)
2063                         printk("full duplex");
2064                 else
2065                         printk("half duplex");
2066
2067                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2068                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2069                                 printk(", receive ");
2070                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2071                                         printk("& transmit ");
2072                         } else {
2073                                 printk(", transmit ");
2074                         }
2075                         printk("flow control ON");
2076                 }
2077                 printk("\n");
2078
2079         } else { /* link_down */
2080                 netif_carrier_off(bp->dev);
2081                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2082         }
2083 }
2084
2085 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2086 {
2087         if (!BP_NOMCP(bp)) {
2088                 u8 rc;
2089
2090                 /* Initialize link parameters structure variables */
2091                 /* It is recommended to turn off RX FC for jumbo frames
2092                    for better performance */
2093                 if (IS_E1HMF(bp))
2094                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2095                 else if (bp->dev->mtu > 5000)
2096                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2097                 else
2098                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099
2100                 bnx2x_acquire_phy_lock(bp);
2101
2102                 if (load_mode == LOAD_DIAG)
2103                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2104
2105                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2106
2107                 bnx2x_release_phy_lock(bp);
2108
2109                 bnx2x_calc_fc_adv(bp);
2110
2111                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2112                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2113                         bnx2x_link_report(bp);
2114                 }
2115
2116                 return rc;
2117         }
2118         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2119         return -EINVAL;
2120 }
2121
2122 static void bnx2x_link_set(struct bnx2x *bp)
2123 {
2124         if (!BP_NOMCP(bp)) {
2125                 bnx2x_acquire_phy_lock(bp);
2126                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2127                 bnx2x_release_phy_lock(bp);
2128
2129                 bnx2x_calc_fc_adv(bp);
2130         } else
2131                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2132 }
2133
2134 static void bnx2x__link_reset(struct bnx2x *bp)
2135 {
2136         if (!BP_NOMCP(bp)) {
2137                 bnx2x_acquire_phy_lock(bp);
2138                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2139                 bnx2x_release_phy_lock(bp);
2140         } else
2141                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2142 }
2143
2144 static u8 bnx2x_link_test(struct bnx2x *bp)
2145 {
2146         u8 rc;
2147
2148         bnx2x_acquire_phy_lock(bp);
2149         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2150         bnx2x_release_phy_lock(bp);
2151
2152         return rc;
2153 }
2154
2155 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2156 {
2157         u32 r_param = bp->link_vars.line_speed / 8;
2158         u32 fair_periodic_timeout_usec;
2159         u32 t_fair;
2160
2161         memset(&(bp->cmng.rs_vars), 0,
2162                sizeof(struct rate_shaping_vars_per_port));
2163         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2164
2165         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2166         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2167
2168         /* this is the threshold below which no timer arming will occur
2169            1.25 coefficient is for the threshold to be a little bigger
2170            than the real time, to compensate for timer in-accuracy */
2171         bp->cmng.rs_vars.rs_threshold =
2172                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2173
2174         /* resolution of fairness timer */
2175         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2176         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2177         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2178
2179         /* this is the threshold below which we won't arm the timer anymore */
2180         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2181
2182         /* we multiply by 1e3/8 to get bytes/msec.
2183            We don't want the credits to pass a credit
2184            of the t_fair*FAIR_MEM (algorithm resolution) */
2185         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2186         /* since each tick is 4 usec */
2187         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2188 }
2189
2190 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2191 {
2192         struct rate_shaping_vars_per_vn m_rs_vn;
2193         struct fairness_vars_per_vn m_fair_vn;
2194         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2195         u16 vn_min_rate, vn_max_rate;
2196         int i;
2197
2198         /* If function is hidden - set min and max to zeroes */
2199         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2200                 vn_min_rate = 0;
2201                 vn_max_rate = 0;
2202
2203         } else {
2204                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2205                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2206                 /* If fairness is enabled (not all min rates are zeroes) and
2207                    if current min rate is zero - set it to 1.
2208                    This is a requirement of the algorithm. */
2209                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2210                         vn_min_rate = DEF_MIN_RATE;
2211                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2212                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2213         }
2214
2215         DP(NETIF_MSG_IFUP,
2216            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2217            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2218
2219         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2220         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2221
2222         /* global vn counter - maximal Mbps for this vn */
2223         m_rs_vn.vn_counter.rate = vn_max_rate;
2224
2225         /* quota - number of bytes transmitted in this period */
2226         m_rs_vn.vn_counter.quota =
2227                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2228
2229         if (bp->vn_weight_sum) {
2230                 /* credit for each period of the fairness algorithm:
2231                    number of bytes in T_FAIR (the vn share the port rate).
2232                    vn_weight_sum should not be larger than 10000, thus
2233                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2234                    than zero */
2235                 m_fair_vn.vn_credit_delta =
2236                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2237                                                  (8 * bp->vn_weight_sum))),
2238                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2239                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2240                    m_fair_vn.vn_credit_delta);
2241         }
2242
2243         /* Store it to internal memory */
2244         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2245                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2247                        ((u32 *)(&m_rs_vn))[i]);
2248
2249         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2250                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2252                        ((u32 *)(&m_fair_vn))[i]);
2253 }
2254
2255
2256 /* This function is called upon link interrupt */
2257 static void bnx2x_link_attn(struct bnx2x *bp)
2258 {
2259         /* Make sure that we are synced with the current statistics */
2260         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2261
2262         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2263
2264         if (bp->link_vars.link_up) {
2265
2266                 /* dropless flow control */
2267                 if (CHIP_IS_E1H(bp)) {
2268                         int port = BP_PORT(bp);
2269                         u32 pause_enabled = 0;
2270
2271                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2272                                 pause_enabled = 1;
2273
2274                         REG_WR(bp, BAR_USTRORM_INTMEM +
2275                                USTORM_PAUSE_ENABLED_OFFSET(port),
2276                                pause_enabled);
2277                 }
2278
2279                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2280                         struct host_port_stats *pstats;
2281
2282                         pstats = bnx2x_sp(bp, port_stats);
2283                         /* reset old bmac stats */
2284                         memset(&(pstats->mac_stx[0]), 0,
2285                                sizeof(struct mac_stx));
2286                 }
2287                 if ((bp->state == BNX2X_STATE_OPEN) ||
2288                     (bp->state == BNX2X_STATE_DISABLED))
2289                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290         }
2291
2292         /* indicate link status */
2293         bnx2x_link_report(bp);
2294
2295         if (IS_E1HMF(bp)) {
2296                 int port = BP_PORT(bp);
2297                 int func;
2298                 int vn;
2299
2300                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2301                         if (vn == BP_E1HVN(bp))
2302                                 continue;
2303
2304                         func = ((vn << 1) | port);
2305
2306                         /* Set the attention towards other drivers
2307                            on the same port */
2308                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2309                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2310                 }
2311
2312                 if (bp->link_vars.link_up) {
2313                         int i;
2314
2315                         /* Init rate shaping and fairness contexts */
2316                         bnx2x_init_port_minmax(bp);
2317
2318                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2319                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2320
2321                         /* Store it to internal memory */
2322                         for (i = 0;
2323                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2324                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2325                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2326                                        ((u32 *)(&bp->cmng))[i]);
2327                 }
2328         }
2329 }
2330
2331 static void bnx2x__link_status_update(struct bnx2x *bp)
2332 {
2333         if (bp->state != BNX2X_STATE_OPEN)
2334                 return;
2335
2336         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2337
2338         if (bp->link_vars.link_up)
2339                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2340         else
2341                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2342
2343         /* indicate link status */
2344         bnx2x_link_report(bp);
2345 }
2346
2347 static void bnx2x_pmf_update(struct bnx2x *bp)
2348 {
2349         int port = BP_PORT(bp);
2350         u32 val;
2351
2352         bp->port.pmf = 1;
2353         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2354
2355         /* enable nig attention */
2356         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2357         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2358         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2359
2360         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2361 }
2362
2363 /* end of Link */
2364
2365 /* slow path */
2366
2367 /*
2368  * General service functions
2369  */
2370
2371 /* the slow path queue is odd since completions arrive on the fastpath ring */
2372 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2373                          u32 data_hi, u32 data_lo, int common)
2374 {
2375         int func = BP_FUNC(bp);
2376
2377         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2378            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2379            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2380            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2381            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2382
2383 #ifdef BNX2X_STOP_ON_ERROR
2384         if (unlikely(bp->panic))
2385                 return -EIO;
2386 #endif
2387
2388         spin_lock_bh(&bp->spq_lock);
2389
2390         if (!bp->spq_left) {
2391                 BNX2X_ERR("BUG! SPQ ring full!\n");
2392                 spin_unlock_bh(&bp->spq_lock);
2393                 bnx2x_panic();
2394                 return -EBUSY;
2395         }
2396
2397         /* CID needs port number to be encoded int it */
2398         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2399                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2400                                      HW_CID(bp, cid)));
2401         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2402         if (common)
2403                 bp->spq_prod_bd->hdr.type |=
2404                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2405
2406         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2407         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2408
2409         bp->spq_left--;
2410
2411         if (bp->spq_prod_bd == bp->spq_last_bd) {
2412                 bp->spq_prod_bd = bp->spq;
2413                 bp->spq_prod_idx = 0;
2414                 DP(NETIF_MSG_TIMER, "end of spq\n");
2415
2416         } else {
2417                 bp->spq_prod_bd++;
2418                 bp->spq_prod_idx++;
2419         }
2420
2421         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2422                bp->spq_prod_idx);
2423
2424         spin_unlock_bh(&bp->spq_lock);
2425         return 0;
2426 }
2427
2428 /* acquire split MCP access lock register */
2429 static int bnx2x_acquire_alr(struct bnx2x *bp)
2430 {
2431         u32 i, j, val;
2432         int rc = 0;
2433
2434         might_sleep();
2435         i = 100;
2436         for (j = 0; j < i*10; j++) {
2437                 val = (1UL << 31);
2438                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2439                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2440                 if (val & (1L << 31))
2441                         break;
2442
2443                 msleep(5);
2444         }
2445         if (!(val & (1L << 31))) {
2446                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2447                 rc = -EBUSY;
2448         }
2449
2450         return rc;
2451 }
2452
2453 /* release split MCP access lock register */
2454 static void bnx2x_release_alr(struct bnx2x *bp)
2455 {
2456         u32 val = 0;
2457
2458         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2459 }
2460
2461 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2462 {
2463         struct host_def_status_block *def_sb = bp->def_status_blk;
2464         u16 rc = 0;
2465
2466         barrier(); /* status block is written to by the chip */
2467         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2468                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2469                 rc |= 1;
2470         }
2471         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2472                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2473                 rc |= 2;
2474         }
2475         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2476                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2477                 rc |= 4;
2478         }
2479         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2480                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2481                 rc |= 8;
2482         }
2483         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2484                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2485                 rc |= 16;
2486         }
2487         return rc;
2488 }
2489
2490 /*
2491  * slow path service functions
2492  */
2493
2494 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2495 {
2496         int port = BP_PORT(bp);
2497         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2498                        COMMAND_REG_ATTN_BITS_SET);
2499         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2500                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2501         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2502                                        NIG_REG_MASK_INTERRUPT_PORT0;
2503         u32 aeu_mask;
2504         u32 nig_mask = 0;
2505
2506         if (bp->attn_state & asserted)
2507                 BNX2X_ERR("IGU ERROR\n");
2508
2509         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2510         aeu_mask = REG_RD(bp, aeu_addr);
2511
2512         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2513            aeu_mask, asserted);
2514         aeu_mask &= ~(asserted & 0xff);
2515         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2516
2517         REG_WR(bp, aeu_addr, aeu_mask);
2518         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2519
2520         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2521         bp->attn_state |= asserted;
2522         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2523
2524         if (asserted & ATTN_HARD_WIRED_MASK) {
2525                 if (asserted & ATTN_NIG_FOR_FUNC) {
2526
2527                         bnx2x_acquire_phy_lock(bp);
2528
2529                         /* save nig interrupt mask */
2530                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2531                         REG_WR(bp, nig_int_mask_addr, 0);
2532
2533                         bnx2x_link_attn(bp);
2534
2535                         /* handle unicore attn? */
2536                 }
2537                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2538                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2539
2540                 if (asserted & GPIO_2_FUNC)
2541                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2542
2543                 if (asserted & GPIO_3_FUNC)
2544                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2545
2546                 if (asserted & GPIO_4_FUNC)
2547                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2548
2549                 if (port == 0) {
2550                         if (asserted & ATTN_GENERAL_ATTN_1) {
2551                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2552                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2553                         }
2554                         if (asserted & ATTN_GENERAL_ATTN_2) {
2555                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2556                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2557                         }
2558                         if (asserted & ATTN_GENERAL_ATTN_3) {
2559                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2560                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2561                         }
2562                 } else {
2563                         if (asserted & ATTN_GENERAL_ATTN_4) {
2564                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2565                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2566                         }
2567                         if (asserted & ATTN_GENERAL_ATTN_5) {
2568                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2569                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2570                         }
2571                         if (asserted & ATTN_GENERAL_ATTN_6) {
2572                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2573                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2574                         }
2575                 }
2576
2577         } /* if hardwired */
2578
2579         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2580            asserted, hc_addr);
2581         REG_WR(bp, hc_addr, asserted);
2582
2583         /* now set back the mask */
2584         if (asserted & ATTN_NIG_FOR_FUNC) {
2585                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2586                 bnx2x_release_phy_lock(bp);
2587         }
2588 }
2589
2590 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2591 {
2592         int port = BP_PORT(bp);
2593         int reg_offset;
2594         u32 val;
2595
2596         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2597                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2598
2599         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2600
2601                 val = REG_RD(bp, reg_offset);
2602                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2603                 REG_WR(bp, reg_offset, val);
2604
2605                 BNX2X_ERR("SPIO5 hw attention\n");
2606
2607                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2608                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2609                         /* Fan failure attention */
2610
2611                         /* The PHY reset is controlled by GPIO 1 */
2612                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2613                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2614                         /* Low power mode is controlled by GPIO 2 */
2615                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2616                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617                         /* mark the failure */
2618                         bp->link_params.ext_phy_config &=
2619                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2620                         bp->link_params.ext_phy_config |=
2621                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2622                         SHMEM_WR(bp,
2623                                  dev_info.port_hw_config[port].
2624                                                         external_phy_config,
2625                                  bp->link_params.ext_phy_config);
2626                         /* log the failure */
2627                         printk(KERN_ERR PFX "Fan Failure on Network"
2628                                " Controller %s has caused the driver to"
2629                                " shutdown the card to prevent permanent"
2630                                " damage.  Please contact Dell Support for"
2631                                " assistance\n", bp->dev->name);
2632                         break;
2633
2634                 default:
2635                         break;
2636                 }
2637         }
2638
2639         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2640                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2641                 bnx2x_acquire_phy_lock(bp);
2642                 bnx2x_handle_module_detect_int(&bp->link_params);
2643                 bnx2x_release_phy_lock(bp);
2644         }
2645
2646         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2647
2648                 val = REG_RD(bp, reg_offset);
2649                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2650                 REG_WR(bp, reg_offset, val);
2651
2652                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2653                           (attn & HW_INTERRUT_ASSERT_SET_0));
2654                 bnx2x_panic();
2655         }
2656 }
2657
2658 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2659 {
2660         u32 val;
2661
2662         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2663
2664                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2665                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2666                 /* DORQ discard attention */
2667                 if (val & 0x2)
2668                         BNX2X_ERR("FATAL error from DORQ\n");
2669         }
2670
2671         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2672
2673                 int port = BP_PORT(bp);
2674                 int reg_offset;
2675
2676                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2677                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2678
2679                 val = REG_RD(bp, reg_offset);
2680                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2681                 REG_WR(bp, reg_offset, val);
2682
2683                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2684                           (attn & HW_INTERRUT_ASSERT_SET_1));
2685                 bnx2x_panic();
2686         }
2687 }
2688
2689 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2690 {
2691         u32 val;
2692
2693         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2694
2695                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2696                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2697                 /* CFC error attention */
2698                 if (val & 0x2)
2699                         BNX2X_ERR("FATAL error from CFC\n");
2700         }
2701
2702         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2703
2704                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2705                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2706                 /* RQ_USDMDP_FIFO_OVERFLOW */
2707                 if (val & 0x18000)
2708                         BNX2X_ERR("FATAL error from PXP\n");
2709         }
2710
2711         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2712
2713                 int port = BP_PORT(bp);
2714                 int reg_offset;
2715
2716                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2717                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2718
2719                 val = REG_RD(bp, reg_offset);
2720                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2721                 REG_WR(bp, reg_offset, val);
2722
2723                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2724                           (attn & HW_INTERRUT_ASSERT_SET_2));
2725                 bnx2x_panic();
2726         }
2727 }
2728
2729 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2730 {
2731         u32 val;
2732
2733         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2734
2735                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2736                         int func = BP_FUNC(bp);
2737
2738                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2739                         bnx2x__link_status_update(bp);
2740                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2741                                                         DRV_STATUS_PMF)
2742                                 bnx2x_pmf_update(bp);
2743
2744                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2745
2746                         BNX2X_ERR("MC assert!\n");
2747                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2748                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2749                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2750                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2751                         bnx2x_panic();
2752
2753                 } else if (attn & BNX2X_MCP_ASSERT) {
2754
2755                         BNX2X_ERR("MCP assert!\n");
2756                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2757                         bnx2x_fw_dump(bp);
2758
2759                 } else
2760                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2761         }
2762
2763         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2764                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2765                 if (attn & BNX2X_GRC_TIMEOUT) {
2766                         val = CHIP_IS_E1H(bp) ?
2767                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2768                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2769                 }
2770                 if (attn & BNX2X_GRC_RSV) {
2771                         val = CHIP_IS_E1H(bp) ?
2772                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2773                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2774                 }
2775                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2776         }
2777 }
2778
2779 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2780 {
2781         struct attn_route attn;
2782         struct attn_route group_mask;
2783         int port = BP_PORT(bp);
2784         int index;
2785         u32 reg_addr;
2786         u32 val;
2787         u32 aeu_mask;
2788
2789         /* need to take HW lock because MCP or other port might also
2790            try to handle this event */
2791         bnx2x_acquire_alr(bp);
2792
2793         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2794         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2795         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2796         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2797         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2798            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2799
2800         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2801                 if (deasserted & (1 << index)) {
2802                         group_mask = bp->attn_group[index];
2803
2804                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2805                            index, group_mask.sig[0], group_mask.sig[1],
2806                            group_mask.sig[2], group_mask.sig[3]);
2807
2808                         bnx2x_attn_int_deasserted3(bp,
2809                                         attn.sig[3] & group_mask.sig[3]);
2810                         bnx2x_attn_int_deasserted1(bp,
2811                                         attn.sig[1] & group_mask.sig[1]);
2812                         bnx2x_attn_int_deasserted2(bp,
2813                                         attn.sig[2] & group_mask.sig[2]);
2814                         bnx2x_attn_int_deasserted0(bp,
2815                                         attn.sig[0] & group_mask.sig[0]);
2816
2817                         if ((attn.sig[0] & group_mask.sig[0] &
2818                                                 HW_PRTY_ASSERT_SET_0) ||
2819                             (attn.sig[1] & group_mask.sig[1] &
2820                                                 HW_PRTY_ASSERT_SET_1) ||
2821                             (attn.sig[2] & group_mask.sig[2] &
2822                                                 HW_PRTY_ASSERT_SET_2))
2823                                 BNX2X_ERR("FATAL HW block parity attention\n");
2824                 }
2825         }
2826
2827         bnx2x_release_alr(bp);
2828
2829         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2830
2831         val = ~deasserted;
2832         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833            val, reg_addr);
2834         REG_WR(bp, reg_addr, val);
2835
2836         if (~bp->attn_state & deasserted)
2837                 BNX2X_ERR("IGU ERROR\n");
2838
2839         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2840                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2841
2842         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2843         aeu_mask = REG_RD(bp, reg_addr);
2844
2845         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2846            aeu_mask, deasserted);
2847         aeu_mask |= (deasserted & 0xff);
2848         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2849
2850         REG_WR(bp, reg_addr, aeu_mask);
2851         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2852
2853         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2854         bp->attn_state &= ~deasserted;
2855         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2856 }
2857
2858 static void bnx2x_attn_int(struct bnx2x *bp)
2859 {
2860         /* read local copy of bits */
2861         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2862                                                                 attn_bits);
2863         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2864                                                                 attn_bits_ack);
2865         u32 attn_state = bp->attn_state;
2866
2867         /* look for changed bits */
2868         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2869         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2870
2871         DP(NETIF_MSG_HW,
2872            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2873            attn_bits, attn_ack, asserted, deasserted);
2874
2875         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2876                 BNX2X_ERR("BAD attention state\n");
2877
2878         /* handle bits that were raised */
2879         if (asserted)
2880                 bnx2x_attn_int_asserted(bp, asserted);
2881
2882         if (deasserted)
2883                 bnx2x_attn_int_deasserted(bp, deasserted);
2884 }
2885
2886 static void bnx2x_sp_task(struct work_struct *work)
2887 {
2888         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2889         u16 status;
2890
2891
2892         /* Return here if interrupt is disabled */
2893         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2894                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2895                 return;
2896         }
2897
2898         status = bnx2x_update_dsb_idx(bp);
2899 /*      if (status == 0)                                     */
2900 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2901
2902         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2903
2904         /* HW attentions */
2905         if (status & 0x1)
2906                 bnx2x_attn_int(bp);
2907
2908         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2909                      IGU_INT_NOP, 1);
2910         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2911                      IGU_INT_NOP, 1);
2912         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2913                      IGU_INT_NOP, 1);
2914         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2915                      IGU_INT_NOP, 1);
2916         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2917                      IGU_INT_ENABLE, 1);
2918
2919 }
2920
2921 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2922 {
2923         struct net_device *dev = dev_instance;
2924         struct bnx2x *bp = netdev_priv(dev);
2925
2926         /* Return here if interrupt is disabled */
2927         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2928                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2929                 return IRQ_HANDLED;
2930         }
2931
2932         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2933
2934 #ifdef BNX2X_STOP_ON_ERROR
2935         if (unlikely(bp->panic))
2936                 return IRQ_HANDLED;
2937 #endif
2938
2939         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2940
2941         return IRQ_HANDLED;
2942 }
2943
2944 /* end of slow path */
2945
2946 /* Statistics */
2947
2948 /****************************************************************************
2949 * Macros
2950 ****************************************************************************/
2951
2952 /* sum[hi:lo] += add[hi:lo] */
2953 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2954         do { \
2955                 s_lo += a_lo; \
2956                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2957         } while (0)
2958
2959 /* difference = minuend - subtrahend */
2960 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2961         do { \
2962                 if (m_lo < s_lo) { \
2963                         /* underflow */ \
2964                         d_hi = m_hi - s_hi; \
2965                         if (d_hi > 0) { \
2966                                 /* we can 'loan' 1 */ \
2967                                 d_hi--; \
2968                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2969                         } else { \
2970                                 /* m_hi <= s_hi */ \
2971                                 d_hi = 0; \
2972                                 d_lo = 0; \
2973                         } \
2974                 } else { \
2975                         /* m_lo >= s_lo */ \
2976                         if (m_hi < s_hi) { \
2977                                 d_hi = 0; \
2978                                 d_lo = 0; \
2979                         } else { \
2980                                 /* m_hi >= s_hi */ \
2981                                 d_hi = m_hi - s_hi; \
2982                                 d_lo = m_lo - s_lo; \
2983                         } \
2984                 } \
2985         } while (0)
2986
2987 #define UPDATE_STAT64(s, t) \
2988         do { \
2989                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2990                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2991                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2992                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2993                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2994                        pstats->mac_stx[1].t##_lo, diff.lo); \
2995         } while (0)
2996
2997 #define UPDATE_STAT64_NIG(s, t) \
2998         do { \
2999                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3000                         diff.lo, new->s##_lo, old->s##_lo); \
3001                 ADD_64(estats->t##_hi, diff.hi, \
3002                        estats->t##_lo, diff.lo); \
3003         } while (0)
3004
3005 /* sum[hi:lo] += add */
3006 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3007         do { \
3008                 s_lo += a; \
3009                 s_hi += (s_lo < a) ? 1 : 0; \
3010         } while (0)
3011
3012 #define UPDATE_EXTEND_STAT(s) \
3013         do { \
3014                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3015                               pstats->mac_stx[1].s##_lo, \
3016                               new->s); \
3017         } while (0)
3018
3019 #define UPDATE_EXTEND_TSTAT(s, t) \
3020         do { \
3021                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3022                 old_tclient->s = tclient->s; \
3023                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3024         } while (0)
3025
3026 #define UPDATE_EXTEND_USTAT(s, t) \
3027         do { \
3028                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3029                 old_uclient->s = uclient->s; \
3030                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3031         } while (0)
3032
3033 #define UPDATE_EXTEND_XSTAT(s, t) \
3034         do { \
3035                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3036                 old_xclient->s = xclient->s; \
3037                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038         } while (0)
3039
3040 /* minuend -= subtrahend */
3041 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3042         do { \
3043                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3044         } while (0)
3045
3046 /* minuend[hi:lo] -= subtrahend */
3047 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3048         do { \
3049                 SUB_64(m_hi, 0, m_lo, s); \
3050         } while (0)
3051
3052 #define SUB_EXTEND_USTAT(s, t) \
3053         do { \
3054                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3055                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3056         } while (0)
3057
3058 /*
3059  * General service functions
3060  */
3061
3062 static inline long bnx2x_hilo(u32 *hiref)
3063 {
3064         u32 lo = *(hiref + 1);
3065 #if (BITS_PER_LONG == 64)
3066         u32 hi = *hiref;
3067
3068         return HILO_U64(hi, lo);
3069 #else
3070         return lo;
3071 #endif
3072 }
3073
3074 /*
3075  * Init service functions
3076  */
3077
3078 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3079 {
3080         if (!bp->stats_pending) {
3081                 struct eth_query_ramrod_data ramrod_data = {0};
3082                 int i, rc;
3083
3084                 ramrod_data.drv_counter = bp->stats_counter++;
3085                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3086                 for_each_queue(bp, i)
3087                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3088
3089                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3090                                    ((u32 *)&ramrod_data)[1],
3091                                    ((u32 *)&ramrod_data)[0], 0);
3092                 if (rc == 0) {
3093                         /* stats ramrod has it's own slot on the spq */
3094                         bp->spq_left++;
3095                         bp->stats_pending = 1;
3096                 }
3097         }
3098 }
3099
3100 static void bnx2x_stats_init(struct bnx2x *bp)
3101 {
3102         int port = BP_PORT(bp);
3103         int i;
3104
3105         bp->stats_pending = 0;
3106         bp->executer_idx = 0;
3107         bp->stats_counter = 0;
3108
3109         /* port stats */
3110         if (!BP_NOMCP(bp))
3111                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3112         else
3113                 bp->port.port_stx = 0;
3114         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3115
3116         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3117         bp->port.old_nig_stats.brb_discard =
3118                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3119         bp->port.old_nig_stats.brb_truncate =
3120                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3121         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3122                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3123         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3124                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3125
3126         /* function stats */
3127         for_each_queue(bp, i) {
3128                 struct bnx2x_fastpath *fp = &bp->fp[i];
3129
3130                 memset(&fp->old_tclient, 0,
3131                        sizeof(struct tstorm_per_client_stats));
3132                 memset(&fp->old_uclient, 0,
3133                        sizeof(struct ustorm_per_client_stats));
3134                 memset(&fp->old_xclient, 0,
3135                        sizeof(struct xstorm_per_client_stats));
3136                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3137         }
3138
3139         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3140         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3141
3142         bp->stats_state = STATS_STATE_DISABLED;
3143         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3144                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3145 }
3146
3147 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3148 {
3149         struct dmae_command *dmae = &bp->stats_dmae;
3150         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3151
3152         *stats_comp = DMAE_COMP_VAL;
3153         if (CHIP_REV_IS_SLOW(bp))
3154                 return;
3155
3156         /* loader */
3157         if (bp->executer_idx) {
3158                 int loader_idx = PMF_DMAE_C(bp);
3159
3160                 memset(dmae, 0, sizeof(struct dmae_command));
3161
3162                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3163                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3164                                 DMAE_CMD_DST_RESET |
3165 #ifdef __BIG_ENDIAN
3166                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3167 #else
3168                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3169 #endif
3170                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3171                                                DMAE_CMD_PORT_0) |
3172                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3173                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3174                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3175                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3176                                      sizeof(struct dmae_command) *
3177                                      (loader_idx + 1)) >> 2;
3178                 dmae->dst_addr_hi = 0;
3179                 dmae->len = sizeof(struct dmae_command) >> 2;
3180                 if (CHIP_IS_E1(bp))
3181                         dmae->len--;
3182                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3183                 dmae->comp_addr_hi = 0;
3184                 dmae->comp_val = 1;
3185
3186                 *stats_comp = 0;
3187                 bnx2x_post_dmae(bp, dmae, loader_idx);
3188
3189         } else if (bp->func_stx) {
3190                 *stats_comp = 0;
3191                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3192         }
3193 }
3194
3195 static int bnx2x_stats_comp(struct bnx2x *bp)
3196 {
3197         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3198         int cnt = 10;
3199
3200         might_sleep();
3201         while (*stats_comp != DMAE_COMP_VAL) {
3202                 if (!cnt) {
3203                         BNX2X_ERR("timeout waiting for stats finished\n");
3204                         break;
3205                 }
3206                 cnt--;
3207                 msleep(1);
3208         }
3209         return 1;
3210 }
3211
3212 /*
3213  * Statistics service functions
3214  */
3215
3216 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3217 {
3218         struct dmae_command *dmae;
3219         u32 opcode;
3220         int loader_idx = PMF_DMAE_C(bp);
3221         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3222
3223         /* sanity */
3224         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3225                 BNX2X_ERR("BUG!\n");
3226                 return;
3227         }
3228
3229         bp->executer_idx = 0;
3230
3231         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232                   DMAE_CMD_C_ENABLE |
3233                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234 #ifdef __BIG_ENDIAN
3235                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236 #else
3237                   DMAE_CMD_ENDIANITY_DW_SWAP |
3238 #endif
3239                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3241
3242         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3244         dmae->src_addr_lo = bp->port.port_stx >> 2;
3245         dmae->src_addr_hi = 0;
3246         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3247         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3248         dmae->len = DMAE_LEN32_RD_MAX;
3249         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3250         dmae->comp_addr_hi = 0;
3251         dmae->comp_val = 1;
3252
3253         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3255         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3256         dmae->src_addr_hi = 0;
3257         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3258                                    DMAE_LEN32_RD_MAX * 4);
3259         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3260                                    DMAE_LEN32_RD_MAX * 4);
3261         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3262         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3263         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3264         dmae->comp_val = DMAE_COMP_VAL;
3265
3266         *stats_comp = 0;
3267         bnx2x_hw_stats_post(bp);
3268         bnx2x_stats_comp(bp);
3269 }
3270
3271 static void bnx2x_port_stats_init(struct bnx2x *bp)
3272 {
3273         struct dmae_command *dmae;
3274         int port = BP_PORT(bp);
3275         int vn = BP_E1HVN(bp);
3276         u32 opcode;
3277         int loader_idx = PMF_DMAE_C(bp);
3278         u32 mac_addr;
3279         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3280
3281         /* sanity */
3282         if (!bp->link_vars.link_up || !bp->port.pmf) {
3283                 BNX2X_ERR("BUG!\n");
3284                 return;
3285         }
3286
3287         bp->executer_idx = 0;
3288
3289         /* MCP */
3290         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3291                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3292                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3293 #ifdef __BIG_ENDIAN
3294                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3295 #else
3296                   DMAE_CMD_ENDIANITY_DW_SWAP |
3297 #endif
3298                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3299                   (vn << DMAE_CMD_E1HVN_SHIFT));
3300
3301         if (bp->port.port_stx) {
3302
3303                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3304                 dmae->opcode = opcode;
3305                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3306                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3307                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3308                 dmae->dst_addr_hi = 0;
3309                 dmae->len = sizeof(struct host_port_stats) >> 2;
3310                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3311                 dmae->comp_addr_hi = 0;
3312                 dmae->comp_val = 1;
3313         }
3314
3315         if (bp->func_stx) {
3316
3317                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3318                 dmae->opcode = opcode;
3319                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3320                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3321                 dmae->dst_addr_lo = bp->func_stx >> 2;
3322                 dmae->dst_addr_hi = 0;
3323                 dmae->len = sizeof(struct host_func_stats) >> 2;
3324                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3325                 dmae->comp_addr_hi = 0;
3326                 dmae->comp_val = 1;
3327         }
3328
3329         /* MAC */
3330         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3331                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3332                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3333 #ifdef __BIG_ENDIAN
3334                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3335 #else
3336                   DMAE_CMD_ENDIANITY_DW_SWAP |
3337 #endif
3338                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3339                   (vn << DMAE_CMD_E1HVN_SHIFT));
3340
3341         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3342
3343                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3344                                    NIG_REG_INGRESS_BMAC0_MEM);
3345
3346                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3347                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3348                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349                 dmae->opcode = opcode;
3350                 dmae->src_addr_lo = (mac_addr +
3351                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3352                 dmae->src_addr_hi = 0;
3353                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3354                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3355                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3356                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3358                 dmae->comp_addr_hi = 0;
3359                 dmae->comp_val = 1;
3360
3361                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3362                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3363                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364                 dmae->opcode = opcode;
3365                 dmae->src_addr_lo = (mac_addr +
3366                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3367                 dmae->src_addr_hi = 0;
3368                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3369                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3370                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3371                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3372                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3373                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3374                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3375                 dmae->comp_addr_hi = 0;
3376                 dmae->comp_val = 1;
3377
3378         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3379
3380                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3381
3382                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3383                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384                 dmae->opcode = opcode;
3385                 dmae->src_addr_lo = (mac_addr +
3386                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3387                 dmae->src_addr_hi = 0;
3388                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3389                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3390                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3391                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392                 dmae->comp_addr_hi = 0;
3393                 dmae->comp_val = 1;
3394
3395                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3396                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3397                 dmae->opcode = opcode;
3398                 dmae->src_addr_lo = (mac_addr +
3399                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3400                 dmae->src_addr_hi = 0;
3401                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3402                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3403                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3404                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3405                 dmae->len = 1;
3406                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407                 dmae->comp_addr_hi = 0;
3408                 dmae->comp_val = 1;
3409
3410                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3411                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3412                 dmae->opcode = opcode;
3413                 dmae->src_addr_lo = (mac_addr +
3414                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3415                 dmae->src_addr_hi = 0;
3416                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3417                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3418                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3419                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3420                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3421                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3422                 dmae->comp_addr_hi = 0;
3423                 dmae->comp_val = 1;
3424         }
3425
3426         /* NIG */
3427         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428         dmae->opcode = opcode;
3429         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3430                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3431         dmae->src_addr_hi = 0;
3432         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3433         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3434         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3435         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436         dmae->comp_addr_hi = 0;
3437         dmae->comp_val = 1;
3438
3439         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440         dmae->opcode = opcode;
3441         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3442                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3443         dmae->src_addr_hi = 0;
3444         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3446         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448         dmae->len = (2*sizeof(u32)) >> 2;
3449         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450         dmae->comp_addr_hi = 0;
3451         dmae->comp_val = 1;
3452
3453         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3455                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3456                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3457 #ifdef __BIG_ENDIAN
3458                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3459 #else
3460                         DMAE_CMD_ENDIANITY_DW_SWAP |
3461 #endif
3462                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3463                         (vn << DMAE_CMD_E1HVN_SHIFT));
3464         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3465                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3466         dmae->src_addr_hi = 0;
3467         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3468                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3469         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3470                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471         dmae->len = (2*sizeof(u32)) >> 2;
3472         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3473         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3474         dmae->comp_val = DMAE_COMP_VAL;
3475
3476         *stats_comp = 0;
3477 }
3478
3479 static void bnx2x_func_stats_init(struct bnx2x *bp)
3480 {
3481         struct dmae_command *dmae = &bp->stats_dmae;
3482         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3483
3484         /* sanity */
3485         if (!bp->func_stx) {
3486                 BNX2X_ERR("BUG!\n");
3487                 return;
3488         }
3489
3490         bp->executer_idx = 0;
3491         memset(dmae, 0, sizeof(struct dmae_command));
3492
3493         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3494                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3495                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3496 #ifdef __BIG_ENDIAN
3497                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3498 #else
3499                         DMAE_CMD_ENDIANITY_DW_SWAP |
3500 #endif
3501                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3502                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3503         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3504         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3505         dmae->dst_addr_lo = bp->func_stx >> 2;
3506         dmae->dst_addr_hi = 0;
3507         dmae->len = sizeof(struct host_func_stats) >> 2;
3508         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3509         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3510         dmae->comp_val = DMAE_COMP_VAL;
3511
3512         *stats_comp = 0;
3513 }
3514
3515 static void bnx2x_stats_start(struct bnx2x *bp)
3516 {
3517         if (bp->port.pmf)
3518                 bnx2x_port_stats_init(bp);
3519
3520         else if (bp->func_stx)
3521                 bnx2x_func_stats_init(bp);
3522
3523         bnx2x_hw_stats_post(bp);
3524         bnx2x_storm_stats_post(bp);
3525 }
3526
3527 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3528 {
3529         bnx2x_stats_comp(bp);
3530         bnx2x_stats_pmf_update(bp);
3531         bnx2x_stats_start(bp);
3532 }
3533
3534 static void bnx2x_stats_restart(struct bnx2x *bp)
3535 {
3536         bnx2x_stats_comp(bp);
3537         bnx2x_stats_start(bp);
3538 }
3539
3540 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3541 {
3542         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3543         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3544         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3545         struct {
3546                 u32 lo;
3547                 u32 hi;
3548         } diff;
3549
3550         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3551         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3552         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3553         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3554         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3555         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3556         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3557         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3558         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3559         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3560         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3561         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3562         UPDATE_STAT64(tx_stat_gt127,
3563                                 tx_stat_etherstatspkts65octetsto127octets);
3564         UPDATE_STAT64(tx_stat_gt255,
3565                                 tx_stat_etherstatspkts128octetsto255octets);
3566         UPDATE_STAT64(tx_stat_gt511,
3567                                 tx_stat_etherstatspkts256octetsto511octets);
3568         UPDATE_STAT64(tx_stat_gt1023,
3569                                 tx_stat_etherstatspkts512octetsto1023octets);
3570         UPDATE_STAT64(tx_stat_gt1518,
3571                                 tx_stat_etherstatspkts1024octetsto1522octets);
3572         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3573         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3574         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3575         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3576         UPDATE_STAT64(tx_stat_gterr,
3577                                 tx_stat_dot3statsinternalmactransmiterrors);
3578         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3579
3580         estats->pause_frames_received_hi =
3581                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3582         estats->pause_frames_received_lo =
3583                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3584
3585         estats->pause_frames_sent_hi =
3586                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3587         estats->pause_frames_sent_lo =
3588                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3589 }
3590
3591 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3592 {
3593         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3594         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3595         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3596
3597         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3598         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3599         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3600         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3601         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3602         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3603         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3604         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3605         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3606         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3607         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3608         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3609         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3610         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3611         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3612         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3613         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3614         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3615         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3616         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3617         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3618         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3619         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3620         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3621         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3622         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3623         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3624         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3627         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3628
3629         estats->pause_frames_received_hi =
3630                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3631         estats->pause_frames_received_lo =
3632                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3633         ADD_64(estats->pause_frames_received_hi,
3634                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3635                estats->pause_frames_received_lo,
3636                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3637
3638         estats->pause_frames_sent_hi =
3639                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3640         estats->pause_frames_sent_lo =
3641                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3642         ADD_64(estats->pause_frames_sent_hi,
3643                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3644                estats->pause_frames_sent_lo,
3645                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3646 }
3647
3648 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3649 {
3650         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3651         struct nig_stats *old = &(bp->port.old_nig_stats);
3652         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3653         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3654         struct {
3655                 u32 lo;
3656                 u32 hi;
3657         } diff;
3658         u32 nig_timer_max;
3659
3660         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3661                 bnx2x_bmac_stats_update(bp);
3662
3663         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3664                 bnx2x_emac_stats_update(bp);
3665
3666         else { /* unreached */
3667                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3668                 return -1;
3669         }
3670
3671         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3672                       new->brb_discard - old->brb_discard);
3673         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3674                       new->brb_truncate - old->brb_truncate);
3675
3676         UPDATE_STAT64_NIG(egress_mac_pkt0,
3677                                         etherstatspkts1024octetsto1522octets);
3678         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3679
3680         memcpy(old, new, sizeof(struct nig_stats));
3681
3682         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3683                sizeof(struct mac_stx));
3684         estats->brb_drop_hi = pstats->brb_drop_hi;
3685         estats->brb_drop_lo = pstats->brb_drop_lo;
3686
3687         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3688
3689         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3690         if (nig_timer_max != estats->nig_timer_max) {
3691                 estats->nig_timer_max = nig_timer_max;
3692                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3693         }
3694
3695         return 0;
3696 }
3697
3698 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3699 {
3700         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3701         struct tstorm_per_port_stats *tport =
3702                                         &stats->tstorm_common.port_statistics;
3703         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3704         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3705         int i;
3706
3707         memset(&(fstats->total_bytes_received_hi), 0,
3708                sizeof(struct host_func_stats) - 2*sizeof(u32));
3709         estats->error_bytes_received_hi = 0;
3710         estats->error_bytes_received_lo = 0;
3711         estats->etherstatsoverrsizepkts_hi = 0;
3712         estats->etherstatsoverrsizepkts_lo = 0;
3713         estats->no_buff_discard_hi = 0;
3714         estats->no_buff_discard_lo = 0;
3715
3716         for_each_queue(bp, i) {
3717                 struct bnx2x_fastpath *fp = &bp->fp[i];
3718                 int cl_id = fp->cl_id;
3719                 struct tstorm_per_client_stats *tclient =
3720                                 &stats->tstorm_common.client_statistics[cl_id];
3721                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3722                 struct ustorm_per_client_stats *uclient =
3723                                 &stats->ustorm_common.client_statistics[cl_id];
3724                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3725                 struct xstorm_per_client_stats *xclient =
3726                                 &stats->xstorm_common.client_statistics[cl_id];
3727                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3728                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3729                 u32 diff;
3730
3731                 /* are storm stats valid? */
3732                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3733                                                         bp->stats_counter) {
3734                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3735                            "  xstorm counter (%d) != stats_counter (%d)\n",
3736                            i, xclient->stats_counter, bp->stats_counter);
3737                         return -1;
3738                 }
3739                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3740                                                         bp->stats_counter) {
3741                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3742                            "  tstorm counter (%d) != stats_counter (%d)\n",
3743                            i, tclient->stats_counter, bp->stats_counter);
3744                         return -2;
3745                 }
3746                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3747                                                         bp->stats_counter) {
3748                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3749                            "  ustorm counter (%d) != stats_counter (%d)\n",
3750                            i, uclient->stats_counter, bp->stats_counter);
3751                         return -4;
3752                 }
3753
3754                 qstats->total_bytes_received_hi =
3755                 qstats->valid_bytes_received_hi =
3756                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3757                 qstats->total_bytes_received_lo =
3758                 qstats->valid_bytes_received_lo =
3759                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3760
3761                 qstats->error_bytes_received_hi =
3762                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3763                 qstats->error_bytes_received_lo =
3764                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3765
3766                 ADD_64(qstats->total_bytes_received_hi,
3767                        qstats->error_bytes_received_hi,
3768                        qstats->total_bytes_received_lo,
3769                        qstats->error_bytes_received_lo);
3770
3771                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3772                                         total_unicast_packets_received);
3773                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3774                                         total_multicast_packets_received);
3775                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3776                                         total_broadcast_packets_received);
3777                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3778                                         etherstatsoverrsizepkts);
3779                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3780
3781                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3782                                         total_unicast_packets_received);
3783                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3784                                         total_multicast_packets_received);
3785                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3786                                         total_broadcast_packets_received);
3787                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3788                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3789                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3790
3791                 qstats->total_bytes_transmitted_hi =
3792                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3793                 qstats->total_bytes_transmitted_lo =
3794                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3795
3796                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3797                                         total_unicast_packets_transmitted);
3798                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3799                                         total_multicast_packets_transmitted);
3800                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3801                                         total_broadcast_packets_transmitted);
3802
3803                 old_tclient->checksum_discard = tclient->checksum_discard;
3804                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3805
3806                 ADD_64(fstats->total_bytes_received_hi,
3807                        qstats->total_bytes_received_hi,
3808                        fstats->total_bytes_received_lo,
3809                        qstats->total_bytes_received_lo);
3810                 ADD_64(fstats->total_bytes_transmitted_hi,
3811                        qstats->total_bytes_transmitted_hi,
3812                        fstats->total_bytes_transmitted_lo,
3813                        qstats->total_bytes_transmitted_lo);
3814                 ADD_64(fstats->total_unicast_packets_received_hi,
3815                        qstats->total_unicast_packets_received_hi,
3816                        fstats->total_unicast_packets_received_lo,
3817                        qstats->total_unicast_packets_received_lo);
3818                 ADD_64(fstats->total_multicast_packets_received_hi,
3819                        qstats->total_multicast_packets_received_hi,
3820                        fstats->total_multicast_packets_received_lo,
3821                        qstats->total_multicast_packets_received_lo);
3822                 ADD_64(fstats->total_broadcast_packets_received_hi,
3823                        qstats->total_broadcast_packets_received_hi,
3824                        fstats->total_broadcast_packets_received_lo,
3825                        qstats->total_broadcast_packets_received_lo);
3826                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3827                        qstats->total_unicast_packets_transmitted_hi,
3828                        fstats->total_unicast_packets_transmitted_lo,
3829                        qstats->total_unicast_packets_transmitted_lo);
3830                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3831                        qstats->total_multicast_packets_transmitted_hi,
3832                        fstats->total_multicast_packets_transmitted_lo,
3833                        qstats->total_multicast_packets_transmitted_lo);
3834                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3835                        qstats->total_broadcast_packets_transmitted_hi,
3836                        fstats->total_broadcast_packets_transmitted_lo,
3837                        qstats->total_broadcast_packets_transmitted_lo);
3838                 ADD_64(fstats->valid_bytes_received_hi,
3839                        qstats->valid_bytes_received_hi,
3840                        fstats->valid_bytes_received_lo,
3841                        qstats->valid_bytes_received_lo);
3842
3843                 ADD_64(estats->error_bytes_received_hi,
3844                        qstats->error_bytes_received_hi,
3845                        estats->error_bytes_received_lo,
3846                        qstats->error_bytes_received_lo);
3847                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3848                        qstats->etherstatsoverrsizepkts_hi,
3849                        estats->etherstatsoverrsizepkts_lo,
3850                        qstats->etherstatsoverrsizepkts_lo);
3851                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3852                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3853         }
3854
3855         ADD_64(fstats->total_bytes_received_hi,
3856                estats->rx_stat_ifhcinbadoctets_hi,
3857                fstats->total_bytes_received_lo,
3858                estats->rx_stat_ifhcinbadoctets_lo);
3859
3860         memcpy(estats, &(fstats->total_bytes_received_hi),
3861                sizeof(struct host_func_stats) - 2*sizeof(u32));
3862
3863         ADD_64(estats->etherstatsoverrsizepkts_hi,
3864                estats->rx_stat_dot3statsframestoolong_hi,
3865                estats->etherstatsoverrsizepkts_lo,
3866                estats->rx_stat_dot3statsframestoolong_lo);
3867         ADD_64(estats->error_bytes_received_hi,
3868                estats->rx_stat_ifhcinbadoctets_hi,
3869                estats->error_bytes_received_lo,
3870                estats->rx_stat_ifhcinbadoctets_lo);
3871
3872         if (bp->port.pmf) {
3873                 estats->mac_filter_discard =
3874                                 le32_to_cpu(tport->mac_filter_discard);
3875                 estats->xxoverflow_discard =
3876                                 le32_to_cpu(tport->xxoverflow_discard);
3877                 estats->brb_truncate_discard =
3878                                 le32_to_cpu(tport->brb_truncate_discard);
3879                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3880         }
3881
3882         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3883
3884         bp->stats_pending = 0;
3885
3886         return 0;
3887 }
3888
3889 static void bnx2x_net_stats_update(struct bnx2x *bp)
3890 {
3891         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3892         struct net_device_stats *nstats = &bp->dev->stats;
3893         int i;
3894
3895         nstats->rx_packets =
3896                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3897                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3898                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3899
3900         nstats->tx_packets =
3901                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3902                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3903                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3904
3905         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3906
3907         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3908
3909         nstats->rx_dropped = estats->mac_discard;
3910         for_each_queue(bp, i)
3911                 nstats->rx_dropped +=
3912                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3913
3914         nstats->tx_dropped = 0;
3915
3916         nstats->multicast =
3917                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3918
3919         nstats->collisions =
3920                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3921
3922         nstats->rx_length_errors =
3923                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3924                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3925         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3926                                  bnx2x_hilo(&estats->brb_truncate_hi);
3927         nstats->rx_crc_errors =
3928                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3929         nstats->rx_frame_errors =
3930                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3931         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3932         nstats->rx_missed_errors = estats->xxoverflow_discard;
3933
3934         nstats->rx_errors = nstats->rx_length_errors +
3935                             nstats->rx_over_errors +
3936                             nstats->rx_crc_errors +
3937                             nstats->rx_frame_errors +
3938                             nstats->rx_fifo_errors +
3939                             nstats->rx_missed_errors;
3940
3941         nstats->tx_aborted_errors =
3942                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3943                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3944         nstats->tx_carrier_errors =
3945                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3946         nstats->tx_fifo_errors = 0;
3947         nstats->tx_heartbeat_errors = 0;
3948         nstats->tx_window_errors = 0;
3949
3950         nstats->tx_errors = nstats->tx_aborted_errors +
3951                             nstats->tx_carrier_errors +
3952             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3953 }
3954
3955 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3956 {
3957         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3958         int i;
3959
3960         estats->driver_xoff = 0;
3961         estats->rx_err_discard_pkt = 0;
3962         estats->rx_skb_alloc_failed = 0;
3963         estats->hw_csum_err = 0;
3964         for_each_queue(bp, i) {
3965                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3966
3967                 estats->driver_xoff += qstats->driver_xoff;
3968                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3969                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3970                 estats->hw_csum_err += qstats->hw_csum_err;
3971         }
3972 }
3973
3974 static void bnx2x_stats_update(struct bnx2x *bp)
3975 {
3976         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3977
3978         if (*stats_comp != DMAE_COMP_VAL)
3979                 return;
3980
3981         if (bp->port.pmf)
3982                 bnx2x_hw_stats_update(bp);
3983
3984         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3985                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3986                 bnx2x_panic();
3987                 return;
3988         }
3989
3990         bnx2x_net_stats_update(bp);
3991         bnx2x_drv_stats_update(bp);
3992
3993         if (bp->msglevel & NETIF_MSG_TIMER) {
3994                 struct tstorm_per_client_stats *old_tclient =
3995                                                         &bp->fp->old_tclient;
3996                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3997                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3998                 struct net_device_stats *nstats = &bp->dev->stats;
3999                 int i;
4000
4001                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4002                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4003                                   "  tx pkt (%lx)\n",
4004                        bnx2x_tx_avail(bp->fp),
4005                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4006                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4007                                   "  rx pkt (%lx)\n",
4008                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4009                              bp->fp->rx_comp_cons),
4010                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4011                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4012                                   "brb truncate %u\n",
4013                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4014                        qstats->driver_xoff,
4015                        estats->brb_drop_lo, estats->brb_truncate_lo);
4016                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4017                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4018                         "mac_discard %u  mac_filter_discard %u  "
4019                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4020                         "ttl0_discard %u\n",
4021                        le32_to_cpu(old_tclient->checksum_discard),
4022                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4023                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4024                        estats->mac_discard, estats->mac_filter_discard,
4025                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4026                        le32_to_cpu(old_tclient->ttl0_discard));
4027
4028                 for_each_queue(bp, i) {
4029                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4030                                bnx2x_fp(bp, i, tx_pkt),
4031                                bnx2x_fp(bp, i, rx_pkt),
4032                                bnx2x_fp(bp, i, rx_calls));
4033                 }
4034         }
4035
4036         bnx2x_hw_stats_post(bp);
4037         bnx2x_storm_stats_post(bp);
4038 }
4039
4040 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4041 {
4042         struct dmae_command *dmae;
4043         u32 opcode;
4044         int loader_idx = PMF_DMAE_C(bp);
4045         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4046
4047         bp->executer_idx = 0;
4048
4049         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4050                   DMAE_CMD_C_ENABLE |
4051                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4052 #ifdef __BIG_ENDIAN
4053                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4054 #else
4055                   DMAE_CMD_ENDIANITY_DW_SWAP |
4056 #endif
4057                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4058                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4059
4060         if (bp->port.port_stx) {
4061
4062                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4063                 if (bp->func_stx)
4064                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4065                 else
4066                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4068                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4069                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4070                 dmae->dst_addr_hi = 0;
4071                 dmae->len = sizeof(struct host_port_stats) >> 2;
4072                 if (bp->func_stx) {
4073                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4074                         dmae->comp_addr_hi = 0;
4075                         dmae->comp_val = 1;
4076                 } else {
4077                         dmae->comp_addr_lo =
4078                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4079                         dmae->comp_addr_hi =
4080                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4081                         dmae->comp_val = DMAE_COMP_VAL;
4082
4083                         *stats_comp = 0;
4084                 }
4085         }
4086
4087         if (bp->func_stx) {
4088
4089                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4090                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4091                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4092                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4093                 dmae->dst_addr_lo = bp->func_stx >> 2;
4094                 dmae->dst_addr_hi = 0;
4095                 dmae->len = sizeof(struct host_func_stats) >> 2;
4096                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4097                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4098                 dmae->comp_val = DMAE_COMP_VAL;
4099
4100                 *stats_comp = 0;
4101         }
4102 }
4103
4104 static void bnx2x_stats_stop(struct bnx2x *bp)
4105 {
4106         int update = 0;
4107
4108         bnx2x_stats_comp(bp);
4109
4110         if (bp->port.pmf)
4111                 update = (bnx2x_hw_stats_update(bp) == 0);
4112
4113         update |= (bnx2x_storm_stats_update(bp) == 0);
4114
4115         if (update) {
4116                 bnx2x_net_stats_update(bp);
4117
4118                 if (bp->port.pmf)
4119                         bnx2x_port_stats_stop(bp);
4120
4121                 bnx2x_hw_stats_post(bp);
4122                 bnx2x_stats_comp(bp);
4123         }
4124 }
4125
4126 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4127 {
4128 }
4129
4130 static const struct {
4131         void (*action)(struct bnx2x *bp);
4132         enum bnx2x_stats_state next_state;
4133 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4134 /* state        event   */
4135 {
4136 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4137 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4138 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4139 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4140 },
4141 {
4142 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4143 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4144 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4145 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4146 }
4147 };
4148
4149 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4150 {
4151         enum bnx2x_stats_state state = bp->stats_state;
4152
4153         bnx2x_stats_stm[state][event].action(bp);
4154         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4155
4156         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4157                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4158                    state, event, bp->stats_state);
4159 }
4160
4161 static void bnx2x_timer(unsigned long data)
4162 {
4163         struct bnx2x *bp = (struct bnx2x *) data;
4164
4165         if (!netif_running(bp->dev))
4166                 return;
4167
4168         if (atomic_read(&bp->intr_sem) != 0)
4169                 goto timer_restart;
4170
4171         if (poll) {
4172                 struct bnx2x_fastpath *fp = &bp->fp[0];
4173                 int rc;
4174
4175                 bnx2x_tx_int(fp, 1000);
4176                 rc = bnx2x_rx_int(fp, 1000);
4177         }
4178
4179         if (!BP_NOMCP(bp)) {
4180                 int func = BP_FUNC(bp);
4181                 u32 drv_pulse;
4182                 u32 mcp_pulse;
4183
4184                 ++bp->fw_drv_pulse_wr_seq;
4185                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4186                 /* TBD - add SYSTEM_TIME */
4187                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4188                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4189
4190                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4191                              MCP_PULSE_SEQ_MASK);
4192                 /* The delta between driver pulse and mcp response
4193                  * should be 1 (before mcp response) or 0 (after mcp response)
4194                  */
4195                 if ((drv_pulse != mcp_pulse) &&
4196                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4197                         /* someone lost a heartbeat... */
4198                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4199                                   drv_pulse, mcp_pulse);
4200                 }
4201         }
4202
4203         if ((bp->state == BNX2X_STATE_OPEN) ||
4204             (bp->state == BNX2X_STATE_DISABLED))
4205                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4206
4207 timer_restart:
4208         mod_timer(&bp->timer, jiffies + bp->current_interval);
4209 }
4210
4211 /* end of Statistics */
4212
4213 /* nic init */
4214
4215 /*
4216  * nic init service functions
4217  */
4218
4219 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4220 {
4221         int port = BP_PORT(bp);
4222
4223         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4224                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4225                         sizeof(struct ustorm_status_block)/4);
4226         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4227                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4228                         sizeof(struct cstorm_status_block)/4);
4229 }
4230
4231 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4232                           dma_addr_t mapping, int sb_id)
4233 {
4234         int port = BP_PORT(bp);
4235         int func = BP_FUNC(bp);
4236         int index;
4237         u64 section;
4238
4239         /* USTORM */
4240         section = ((u64)mapping) + offsetof(struct host_status_block,
4241                                             u_status_block);
4242         sb->u_status_block.status_block_id = sb_id;
4243
4244         REG_WR(bp, BAR_USTRORM_INTMEM +
4245                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4246         REG_WR(bp, BAR_USTRORM_INTMEM +
4247                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4248                U64_HI(section));
4249         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4250                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4251
4252         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4253                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4254                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4255
4256         /* CSTORM */
4257         section = ((u64)mapping) + offsetof(struct host_status_block,
4258                                             c_status_block);
4259         sb->c_status_block.status_block_id = sb_id;
4260
4261         REG_WR(bp, BAR_CSTRORM_INTMEM +
4262                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4263         REG_WR(bp, BAR_CSTRORM_INTMEM +
4264                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4265                U64_HI(section));
4266         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4267                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4268
4269         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4270                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4271                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4272
4273         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4274 }
4275
4276 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4277 {
4278         int func = BP_FUNC(bp);
4279
4280         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4281                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4282                         sizeof(struct ustorm_def_status_block)/4);
4283         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4284                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285                         sizeof(struct cstorm_def_status_block)/4);
4286         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4287                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288                         sizeof(struct xstorm_def_status_block)/4);
4289         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4290                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291                         sizeof(struct tstorm_def_status_block)/4);
4292 }
4293
4294 static void bnx2x_init_def_sb(struct bnx2x *bp,
4295                               struct host_def_status_block *def_sb,
4296                               dma_addr_t mapping, int sb_id)
4297 {
4298         int port = BP_PORT(bp);
4299         int func = BP_FUNC(bp);
4300         int index, val, reg_offset;
4301         u64 section;
4302
4303         /* ATTN */
4304         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4305                                             atten_status_block);
4306         def_sb->atten_status_block.status_block_id = sb_id;
4307
4308         bp->attn_state = 0;
4309
4310         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4311                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4312
4313         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4314                 bp->attn_group[index].sig[0] = REG_RD(bp,
4315                                                      reg_offset + 0x10*index);
4316                 bp->attn_group[index].sig[1] = REG_RD(bp,
4317                                                reg_offset + 0x4 + 0x10*index);
4318                 bp->attn_group[index].sig[2] = REG_RD(bp,
4319                                                reg_offset + 0x8 + 0x10*index);
4320                 bp->attn_group[index].sig[3] = REG_RD(bp,
4321                                                reg_offset + 0xc + 0x10*index);
4322         }
4323
4324         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4325                              HC_REG_ATTN_MSG0_ADDR_L);
4326
4327         REG_WR(bp, reg_offset, U64_LO(section));
4328         REG_WR(bp, reg_offset + 4, U64_HI(section));
4329
4330         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4331
4332         val = REG_RD(bp, reg_offset);
4333         val |= sb_id;
4334         REG_WR(bp, reg_offset, val);
4335
4336         /* USTORM */
4337         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4338                                             u_def_status_block);
4339         def_sb->u_def_status_block.status_block_id = sb_id;
4340
4341         REG_WR(bp, BAR_USTRORM_INTMEM +
4342                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4343         REG_WR(bp, BAR_USTRORM_INTMEM +
4344                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4345                U64_HI(section));
4346         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4347                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4348
4349         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4350                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4351                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4352
4353         /* CSTORM */
4354         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4355                                             c_def_status_block);
4356         def_sb->c_def_status_block.status_block_id = sb_id;
4357
4358         REG_WR(bp, BAR_CSTRORM_INTMEM +
4359                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4360         REG_WR(bp, BAR_CSTRORM_INTMEM +
4361                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4362                U64_HI(section));
4363         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4364                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4365
4366         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4367                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4368                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4369
4370         /* TSTORM */
4371         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4372                                             t_def_status_block);
4373         def_sb->t_def_status_block.status_block_id = sb_id;
4374
4375         REG_WR(bp, BAR_TSTRORM_INTMEM +
4376                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4377         REG_WR(bp, BAR_TSTRORM_INTMEM +
4378                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4379                U64_HI(section));
4380         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4381                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4382
4383         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4384                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4385                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4386
4387         /* XSTORM */
4388         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4389                                             x_def_status_block);
4390         def_sb->x_def_status_block.status_block_id = sb_id;
4391
4392         REG_WR(bp, BAR_XSTRORM_INTMEM +
4393                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4394         REG_WR(bp, BAR_XSTRORM_INTMEM +
4395                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4396                U64_HI(section));
4397         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4398                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4399
4400         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4401                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4402                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4403
4404         bp->stats_pending = 0;
4405         bp->set_mac_pending = 0;
4406
4407         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4408 }
4409
4410 static void bnx2x_update_coalesce(struct bnx2x *bp)
4411 {
4412         int port = BP_PORT(bp);
4413         int i;
4414
4415         for_each_queue(bp, i) {
4416                 int sb_id = bp->fp[i].sb_id;
4417
4418                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4419                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4420                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4421                                                     U_SB_ETH_RX_CQ_INDEX),
4422                         bp->rx_ticks/12);
4423                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4424                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4425                                                      U_SB_ETH_RX_CQ_INDEX),
4426                          bp->rx_ticks ? 0 : 1);
4427
4428                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4429                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4430                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4431                                                     C_SB_ETH_TX_CQ_INDEX),
4432                         bp->tx_ticks/12);
4433                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4434                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4435                                                      C_SB_ETH_TX_CQ_INDEX),
4436                          bp->tx_ticks ? 0 : 1);
4437         }
4438 }
4439
4440 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4441                                        struct bnx2x_fastpath *fp, int last)
4442 {
4443         int i;
4444
4445         for (i = 0; i < last; i++) {
4446                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4447                 struct sk_buff *skb = rx_buf->skb;
4448
4449                 if (skb == NULL) {
4450                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4451                         continue;
4452                 }
4453
4454                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4455                         pci_unmap_single(bp->pdev,
4456                                          pci_unmap_addr(rx_buf, mapping),
4457                                          bp->rx_buf_size,
4458                                          PCI_DMA_FROMDEVICE);
4459
4460                 dev_kfree_skb(skb);
4461                 rx_buf->skb = NULL;
4462         }
4463 }
4464
4465 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4466 {
4467         int func = BP_FUNC(bp);
4468         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4469                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4470         u16 ring_prod, cqe_ring_prod;
4471         int i, j;
4472
4473         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4474         DP(NETIF_MSG_IFUP,
4475            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4476
4477         if (bp->flags & TPA_ENABLE_FLAG) {
4478
4479                 for_each_rx_queue(bp, j) {
4480                         struct bnx2x_fastpath *fp = &bp->fp[j];
4481
4482                         for (i = 0; i < max_agg_queues; i++) {
4483                                 fp->tpa_pool[i].skb =
4484                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4485                                 if (!fp->tpa_pool[i].skb) {
4486                                         BNX2X_ERR("Failed to allocate TPA "
4487                                                   "skb pool for queue[%d] - "
4488                                                   "disabling TPA on this "
4489                                                   "queue!\n", j);
4490                                         bnx2x_free_tpa_pool(bp, fp, i);
4491                                         fp->disable_tpa = 1;
4492                                         break;
4493                                 }
4494                                 pci_unmap_addr_set((struct sw_rx_bd *)
4495                                                         &bp->fp->tpa_pool[i],
4496                                                    mapping, 0);
4497                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4498                         }
4499                 }
4500         }
4501
4502         for_each_rx_queue(bp, j) {
4503                 struct bnx2x_fastpath *fp = &bp->fp[j];
4504
4505                 fp->rx_bd_cons = 0;
4506                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4507                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4508
4509                 /* "next page" elements initialization */
4510                 /* SGE ring */
4511                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4512                         struct eth_rx_sge *sge;
4513
4514                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4515                         sge->addr_hi =
4516                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4517                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4518                         sge->addr_lo =
4519                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4520                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521                 }
4522
4523                 bnx2x_init_sge_ring_bit_mask(fp);
4524
4525                 /* RX BD ring */
4526                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4527                         struct eth_rx_bd *rx_bd;
4528
4529                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4530                         rx_bd->addr_hi =
4531                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4532                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4533                         rx_bd->addr_lo =
4534                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4535                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4536                 }
4537
4538                 /* CQ ring */
4539                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4540                         struct eth_rx_cqe_next_page *nextpg;
4541
4542                         nextpg = (struct eth_rx_cqe_next_page *)
4543                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4544                         nextpg->addr_hi =
4545                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4546                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4547                         nextpg->addr_lo =
4548                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4549                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4550                 }
4551
4552                 /* Allocate SGEs and initialize the ring elements */
4553                 for (i = 0, ring_prod = 0;
4554                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4555
4556                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4557                                 BNX2X_ERR("was only able to allocate "
4558                                           "%d rx sges\n", i);
4559                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4560                                 /* Cleanup already allocated elements */
4561                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4562                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4563                                 fp->disable_tpa = 1;
4564                                 ring_prod = 0;
4565                                 break;
4566                         }
4567                         ring_prod = NEXT_SGE_IDX(ring_prod);
4568                 }
4569                 fp->rx_sge_prod = ring_prod;
4570
4571                 /* Allocate BDs and initialize BD ring */
4572                 fp->rx_comp_cons = 0;
4573                 cqe_ring_prod = ring_prod = 0;
4574                 for (i = 0; i < bp->rx_ring_size; i++) {
4575                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4576                                 BNX2X_ERR("was only able to allocate "
4577                                           "%d rx skbs on queue[%d]\n", i, j);
4578                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4579                                 break;
4580                         }
4581                         ring_prod = NEXT_RX_IDX(ring_prod);
4582                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4583                         WARN_ON(ring_prod <= i);
4584                 }
4585
4586                 fp->rx_bd_prod = ring_prod;
4587                 /* must not have more available CQEs than BDs */
4588                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4589                                        cqe_ring_prod);
4590                 fp->rx_pkt = fp->rx_calls = 0;
4591
4592                 /* Warning!
4593                  * this will generate an interrupt (to the TSTORM)
4594                  * must only be done after chip is initialized
4595                  */
4596                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4597                                      fp->rx_sge_prod);
4598                 if (j != 0)
4599                         continue;
4600
4601                 REG_WR(bp, BAR_USTRORM_INTMEM +
4602                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4603                        U64_LO(fp->rx_comp_mapping));
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4606                        U64_HI(fp->rx_comp_mapping));
4607         }
4608 }
4609
4610 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4611 {
4612         int i, j;
4613
4614         for_each_tx_queue(bp, j) {
4615                 struct bnx2x_fastpath *fp = &bp->fp[j];
4616
4617                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4618                         struct eth_tx_bd *tx_bd =
4619                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4620
4621                         tx_bd->addr_hi =
4622                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4623                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4624                         tx_bd->addr_lo =
4625                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4626                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4627                 }
4628
4629                 fp->tx_pkt_prod = 0;
4630                 fp->tx_pkt_cons = 0;
4631                 fp->tx_bd_prod = 0;
4632                 fp->tx_bd_cons = 0;
4633                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4634                 fp->tx_pkt = 0;
4635         }
4636 }
4637
4638 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4639 {
4640         int func = BP_FUNC(bp);
4641
4642         spin_lock_init(&bp->spq_lock);
4643
4644         bp->spq_left = MAX_SPQ_PENDING;
4645         bp->spq_prod_idx = 0;
4646         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4647         bp->spq_prod_bd = bp->spq;
4648         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4649
4650         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4651                U64_LO(bp->spq_mapping));
4652         REG_WR(bp,
4653                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4654                U64_HI(bp->spq_mapping));
4655
4656         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4657                bp->spq_prod_idx);
4658 }
4659
4660 static void bnx2x_init_context(struct bnx2x *bp)
4661 {
4662         int i;
4663
4664         for_each_queue(bp, i) {
4665                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4666                 struct bnx2x_fastpath *fp = &bp->fp[i];
4667                 u8 cl_id = fp->cl_id;
4668                 u8 sb_id = fp->sb_id;
4669
4670                 context->ustorm_st_context.common.sb_index_numbers =
4671                                                 BNX2X_RX_SB_INDEX_NUM;
4672                 context->ustorm_st_context.common.clientId = cl_id;
4673                 context->ustorm_st_context.common.status_block_id = sb_id;
4674                 context->ustorm_st_context.common.flags =
4675                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4676                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4677                 context->ustorm_st_context.common.statistics_counter_id =
4678                                                 cl_id;
4679                 context->ustorm_st_context.common.mc_alignment_log_size =
4680                                                 BNX2X_RX_ALIGN_SHIFT;
4681                 context->ustorm_st_context.common.bd_buff_size =
4682                                                 bp->rx_buf_size;
4683                 context->ustorm_st_context.common.bd_page_base_hi =
4684                                                 U64_HI(fp->rx_desc_mapping);
4685                 context->ustorm_st_context.common.bd_page_base_lo =
4686                                                 U64_LO(fp->rx_desc_mapping);
4687                 if (!fp->disable_tpa) {
4688                         context->ustorm_st_context.common.flags |=
4689                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4690                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4691                         context->ustorm_st_context.common.sge_buff_size =
4692                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4693                                          (u32)0xffff);
4694                         context->ustorm_st_context.common.sge_page_base_hi =
4695                                                 U64_HI(fp->rx_sge_mapping);
4696                         context->ustorm_st_context.common.sge_page_base_lo =
4697                                                 U64_LO(fp->rx_sge_mapping);
4698                 }
4699
4700                 context->ustorm_ag_context.cdu_usage =
4701                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4702                                                CDU_REGION_NUMBER_UCM_AG,
4703                                                ETH_CONNECTION_TYPE);
4704
4705                 context->xstorm_st_context.tx_bd_page_base_hi =
4706                                                 U64_HI(fp->tx_desc_mapping);
4707                 context->xstorm_st_context.tx_bd_page_base_lo =
4708                                                 U64_LO(fp->tx_desc_mapping);
4709                 context->xstorm_st_context.db_data_addr_hi =
4710                                                 U64_HI(fp->tx_prods_mapping);
4711                 context->xstorm_st_context.db_data_addr_lo =
4712                                                 U64_LO(fp->tx_prods_mapping);
4713                 context->xstorm_st_context.statistics_data = (cl_id |
4714                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4715                 context->cstorm_st_context.sb_index_number =
4716                                                 C_SB_ETH_TX_CQ_INDEX;
4717                 context->cstorm_st_context.status_block_id = sb_id;
4718
4719                 context->xstorm_ag_context.cdu_reserved =
4720                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4721                                                CDU_REGION_NUMBER_XCM_AG,
4722                                                ETH_CONNECTION_TYPE);
4723         }
4724 }
4725
4726 static void bnx2x_init_ind_table(struct bnx2x *bp)
4727 {
4728         int func = BP_FUNC(bp);
4729         int i;
4730
4731         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4732                 return;
4733
4734         DP(NETIF_MSG_IFUP,
4735            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4736         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4737                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4738                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4739                         bp->fp->cl_id + (i % bp->num_rx_queues));
4740 }
4741
4742 static void bnx2x_set_client_config(struct bnx2x *bp)
4743 {
4744         struct tstorm_eth_client_config tstorm_client = {0};
4745         int port = BP_PORT(bp);
4746         int i;
4747
4748         tstorm_client.mtu = bp->dev->mtu;
4749         tstorm_client.config_flags =
4750                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4751                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4752 #ifdef BCM_VLAN
4753         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4754                 tstorm_client.config_flags |=
4755                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4756                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4757         }
4758 #endif
4759
4760         if (bp->flags & TPA_ENABLE_FLAG) {
4761                 tstorm_client.max_sges_for_packet =
4762                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4763                 tstorm_client.max_sges_for_packet =
4764                         ((tstorm_client.max_sges_for_packet +
4765                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4766                         PAGES_PER_SGE_SHIFT;
4767
4768                 tstorm_client.config_flags |=
4769                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4770         }
4771
4772         for_each_queue(bp, i) {
4773                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4774
4775                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4776                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4777                        ((u32 *)&tstorm_client)[0]);
4778                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4780                        ((u32 *)&tstorm_client)[1]);
4781         }
4782
4783         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4784            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4785 }
4786
4787 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4788 {
4789         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4790         int mode = bp->rx_mode;
4791         int mask = (1 << BP_L_ID(bp));
4792         int func = BP_FUNC(bp);
4793         int i;
4794
4795         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4796
4797         switch (mode) {
4798         case BNX2X_RX_MODE_NONE: /* no Rx */
4799                 tstorm_mac_filter.ucast_drop_all = mask;
4800                 tstorm_mac_filter.mcast_drop_all = mask;
4801                 tstorm_mac_filter.bcast_drop_all = mask;
4802                 break;
4803         case BNX2X_RX_MODE_NORMAL:
4804                 tstorm_mac_filter.bcast_accept_all = mask;
4805                 break;
4806         case BNX2X_RX_MODE_ALLMULTI:
4807                 tstorm_mac_filter.mcast_accept_all = mask;
4808                 tstorm_mac_filter.bcast_accept_all = mask;
4809                 break;
4810         case BNX2X_RX_MODE_PROMISC:
4811                 tstorm_mac_filter.ucast_accept_all = mask;
4812                 tstorm_mac_filter.mcast_accept_all = mask;
4813                 tstorm_mac_filter.bcast_accept_all = mask;
4814                 break;
4815         default:
4816                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4817                 break;
4818         }
4819
4820         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4821                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4822                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4823                        ((u32 *)&tstorm_mac_filter)[i]);
4824
4825 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4826                    ((u32 *)&tstorm_mac_filter)[i]); */
4827         }
4828
4829         if (mode != BNX2X_RX_MODE_NONE)
4830                 bnx2x_set_client_config(bp);
4831 }
4832
4833 static void bnx2x_init_internal_common(struct bnx2x *bp)
4834 {
4835         int i;
4836
4837         if (bp->flags & TPA_ENABLE_FLAG) {
4838                 struct tstorm_eth_tpa_exist tpa = {0};
4839
4840                 tpa.tpa_exist = 1;
4841
4842                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4843                        ((u32 *)&tpa)[0]);
4844                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4845                        ((u32 *)&tpa)[1]);
4846         }
4847
4848         /* Zero this manually as its initialization is
4849            currently missing in the initTool */
4850         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4851                 REG_WR(bp, BAR_USTRORM_INTMEM +
4852                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4853 }
4854
4855 static void bnx2x_init_internal_port(struct bnx2x *bp)
4856 {
4857         int port = BP_PORT(bp);
4858
4859         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4860         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4861         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4862         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4863 }
4864
4865 /* Calculates the sum of vn_min_rates.
4866    It's needed for further normalizing of the min_rates.
4867    Returns:
4868      sum of vn_min_rates.
4869        or
4870      0 - if all the min_rates are 0.
4871      In the later case fainess algorithm should be deactivated.
4872      If not all min_rates are zero then those that are zeroes will be set to 1.
4873  */
4874 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4875 {
4876         int all_zero = 1;
4877         int port = BP_PORT(bp);
4878         int vn;
4879
4880         bp->vn_weight_sum = 0;
4881         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4882                 int func = 2*vn + port;
4883                 u32 vn_cfg =
4884                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4885                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4886                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4887
4888                 /* Skip hidden vns */
4889                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4890                         continue;
4891
4892                 /* If min rate is zero - set it to 1 */
4893                 if (!vn_min_rate)
4894                         vn_min_rate = DEF_MIN_RATE;
4895                 else
4896                         all_zero = 0;
4897
4898                 bp->vn_weight_sum += vn_min_rate;
4899         }
4900
4901         /* ... only if all min rates are zeros - disable fairness */
4902         if (all_zero)
4903                 bp->vn_weight_sum = 0;
4904 }
4905
4906 static void bnx2x_init_internal_func(struct bnx2x *bp)
4907 {
4908         struct tstorm_eth_function_common_config tstorm_config = {0};
4909         struct stats_indication_flags stats_flags = {0};
4910         int port = BP_PORT(bp);
4911         int func = BP_FUNC(bp);
4912         int i, j;
4913         u32 offset;
4914         u16 max_agg_size;
4915
4916         if (is_multi(bp)) {
4917                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4918                 tstorm_config.rss_result_mask = MULTI_MASK;
4919         }
4920         if (IS_E1HMF(bp))
4921                 tstorm_config.config_flags |=
4922                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4923
4924         tstorm_config.leading_client_id = BP_L_ID(bp);
4925
4926         REG_WR(bp, BAR_TSTRORM_INTMEM +
4927                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4928                (*(u32 *)&tstorm_config));
4929
4930         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4931         bnx2x_set_storm_rx_mode(bp);
4932
4933         for_each_queue(bp, i) {
4934                 u8 cl_id = bp->fp[i].cl_id;
4935
4936                 /* reset xstorm per client statistics */
4937                 offset = BAR_XSTRORM_INTMEM +
4938                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4939                 for (j = 0;
4940                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4941                         REG_WR(bp, offset + j*4, 0);
4942
4943                 /* reset tstorm per client statistics */
4944                 offset = BAR_TSTRORM_INTMEM +
4945                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946                 for (j = 0;
4947                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4948                         REG_WR(bp, offset + j*4, 0);
4949
4950                 /* reset ustorm per client statistics */
4951                 offset = BAR_USTRORM_INTMEM +
4952                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953                 for (j = 0;
4954                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4955                         REG_WR(bp, offset + j*4, 0);
4956         }
4957
4958         /* Init statistics related context */
4959         stats_flags.collect_eth = 1;
4960
4961         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4962                ((u32 *)&stats_flags)[0]);
4963         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4964                ((u32 *)&stats_flags)[1]);
4965
4966         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4967                ((u32 *)&stats_flags)[0]);
4968         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4969                ((u32 *)&stats_flags)[1]);
4970
4971         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4972                ((u32 *)&stats_flags)[0]);
4973         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4974                ((u32 *)&stats_flags)[1]);
4975
4976         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4977                ((u32 *)&stats_flags)[0]);
4978         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4979                ((u32 *)&stats_flags)[1]);
4980
4981         REG_WR(bp, BAR_XSTRORM_INTMEM +
4982                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4983                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4984         REG_WR(bp, BAR_XSTRORM_INTMEM +
4985                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4986                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4987
4988         REG_WR(bp, BAR_TSTRORM_INTMEM +
4989                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991         REG_WR(bp, BAR_TSTRORM_INTMEM +
4992                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4994
4995         REG_WR(bp, BAR_USTRORM_INTMEM +
4996                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998         REG_WR(bp, BAR_USTRORM_INTMEM +
4999                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002         if (CHIP_IS_E1H(bp)) {
5003                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5004                         IS_E1HMF(bp));
5005                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5006                         IS_E1HMF(bp));
5007                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5008                         IS_E1HMF(bp));
5009                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5010                         IS_E1HMF(bp));
5011
5012                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5013                          bp->e1hov);
5014         }
5015
5016         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5017         max_agg_size =
5018                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5019                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5020                     (u32)0xffff);
5021         for_each_rx_queue(bp, i) {
5022                 struct bnx2x_fastpath *fp = &bp->fp[i];
5023
5024                 REG_WR(bp, BAR_USTRORM_INTMEM +
5025                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5026                        U64_LO(fp->rx_comp_mapping));
5027                 REG_WR(bp, BAR_USTRORM_INTMEM +
5028                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5029                        U64_HI(fp->rx_comp_mapping));
5030
5031                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5032                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5033                          max_agg_size);
5034         }
5035
5036         /* dropless flow control */
5037         if (CHIP_IS_E1H(bp)) {
5038                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5039
5040                 rx_pause.bd_thr_low = 250;
5041                 rx_pause.cqe_thr_low = 250;
5042                 rx_pause.cos = 1;
5043                 rx_pause.sge_thr_low = 0;
5044                 rx_pause.bd_thr_high = 350;
5045                 rx_pause.cqe_thr_high = 350;
5046                 rx_pause.sge_thr_high = 0;
5047
5048                 for_each_rx_queue(bp, i) {
5049                         struct bnx2x_fastpath *fp = &bp->fp[i];
5050
5051                         if (!fp->disable_tpa) {
5052                                 rx_pause.sge_thr_low = 150;
5053                                 rx_pause.sge_thr_high = 250;
5054                         }
5055
5056
5057                         offset = BAR_USTRORM_INTMEM +
5058                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5059                                                                    fp->cl_id);
5060                         for (j = 0;
5061                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5062                              j++)
5063                                 REG_WR(bp, offset + j*4,
5064                                        ((u32 *)&rx_pause)[j]);
5065                 }
5066         }
5067
5068         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5069
5070         /* Init rate shaping and fairness contexts */
5071         if (IS_E1HMF(bp)) {
5072                 int vn;
5073
5074                 /* During init there is no active link
5075                    Until link is up, set link rate to 10Gbps */
5076                 bp->link_vars.line_speed = SPEED_10000;
5077                 bnx2x_init_port_minmax(bp);
5078
5079                 bnx2x_calc_vn_weight_sum(bp);
5080
5081                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5082                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5083
5084                 /* Enable rate shaping and fairness */
5085                 bp->cmng.flags.cmng_enables =
5086                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5087                 if (bp->vn_weight_sum)
5088                         bp->cmng.flags.cmng_enables |=
5089                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5090                 else
5091                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5092                            "  fairness will be disabled\n");
5093         } else {
5094                 /* rate shaping and fairness are disabled */
5095                 DP(NETIF_MSG_IFUP,
5096                    "single function mode  minmax will be disabled\n");
5097         }
5098
5099
5100         /* Store it to internal memory */
5101         if (bp->port.pmf)
5102                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5103                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5104                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5105                                ((u32 *)(&bp->cmng))[i]);
5106 }
5107
5108 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5109 {
5110         switch (load_code) {
5111         case FW_MSG_CODE_DRV_LOAD_COMMON:
5112                 bnx2x_init_internal_common(bp);
5113                 /* no break */
5114
5115         case FW_MSG_CODE_DRV_LOAD_PORT:
5116                 bnx2x_init_internal_port(bp);
5117                 /* no break */
5118
5119         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5120                 bnx2x_init_internal_func(bp);
5121                 break;
5122
5123         default:
5124                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5125                 break;
5126         }
5127 }
5128
5129 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5130 {
5131         int i;
5132
5133         for_each_queue(bp, i) {
5134                 struct bnx2x_fastpath *fp = &bp->fp[i];
5135
5136                 fp->bp = bp;
5137                 fp->state = BNX2X_FP_STATE_CLOSED;
5138                 fp->index = i;
5139                 fp->cl_id = BP_L_ID(bp) + i;
5140                 fp->sb_id = fp->cl_id;
5141                 DP(NETIF_MSG_IFUP,
5142                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
5143                    bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
5144                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5145                               fp->sb_id);
5146                 bnx2x_update_fpsb_idx(fp);
5147         }
5148
5149         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5150                           DEF_SB_ID);
5151         bnx2x_update_dsb_idx(bp);
5152         bnx2x_update_coalesce(bp);
5153         bnx2x_init_rx_rings(bp);
5154         bnx2x_init_tx_ring(bp);
5155         bnx2x_init_sp_ring(bp);
5156         bnx2x_init_context(bp);
5157         bnx2x_init_internal(bp, load_code);
5158         bnx2x_init_ind_table(bp);
5159         bnx2x_stats_init(bp);
5160
5161         /* At this point, we are ready for interrupts */
5162         atomic_set(&bp->intr_sem, 0);
5163
5164         /* flush all before enabling interrupts */
5165         mb();
5166         mmiowb();
5167
5168         bnx2x_int_enable(bp);
5169 }
5170
5171 /* end of nic init */
5172
5173 /*
5174  * gzip service functions
5175  */
5176
5177 static int bnx2x_gunzip_init(struct bnx2x *bp)
5178 {
5179         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5180                                               &bp->gunzip_mapping);
5181         if (bp->gunzip_buf  == NULL)
5182                 goto gunzip_nomem1;
5183
5184         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5185         if (bp->strm  == NULL)
5186                 goto gunzip_nomem2;
5187
5188         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5189                                       GFP_KERNEL);
5190         if (bp->strm->workspace == NULL)
5191                 goto gunzip_nomem3;
5192
5193         return 0;
5194
5195 gunzip_nomem3:
5196         kfree(bp->strm);
5197         bp->strm = NULL;
5198
5199 gunzip_nomem2:
5200         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5201                             bp->gunzip_mapping);
5202         bp->gunzip_buf = NULL;
5203
5204 gunzip_nomem1:
5205         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5206                " un-compression\n", bp->dev->name);
5207         return -ENOMEM;
5208 }
5209
5210 static void bnx2x_gunzip_end(struct bnx2x *bp)
5211 {
5212         kfree(bp->strm->workspace);
5213
5214         kfree(bp->strm);
5215         bp->strm = NULL;
5216
5217         if (bp->gunzip_buf) {
5218                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219                                     bp->gunzip_mapping);
5220                 bp->gunzip_buf = NULL;
5221         }
5222 }
5223
5224 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5225 {
5226         int n, rc;
5227
5228         /* check gzip header */
5229         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5230                 return -EINVAL;
5231
5232         n = 10;
5233
5234 #define FNAME                           0x8
5235
5236         if (zbuf[3] & FNAME)
5237                 while ((zbuf[n++] != 0) && (n < len));
5238
5239         bp->strm->next_in = zbuf + n;
5240         bp->strm->avail_in = len - n;
5241         bp->strm->next_out = bp->gunzip_buf;
5242         bp->strm->avail_out = FW_BUF_SIZE;
5243
5244         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5245         if (rc != Z_OK)
5246                 return rc;
5247
5248         rc = zlib_inflate(bp->strm, Z_FINISH);
5249         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5250                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5251                        bp->dev->name, bp->strm->msg);
5252
5253         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5254         if (bp->gunzip_outlen & 0x3)
5255                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5256                                     " gunzip_outlen (%d) not aligned\n",
5257                        bp->dev->name, bp->gunzip_outlen);
5258         bp->gunzip_outlen >>= 2;
5259
5260         zlib_inflateEnd(bp->strm);
5261
5262         if (rc == Z_STREAM_END)
5263                 return 0;
5264
5265         return rc;
5266 }
5267
5268 /* nic load/unload */
5269
5270 /*
5271  * General service functions
5272  */
5273
5274 /* send a NIG loopback debug packet */
5275 static void bnx2x_lb_pckt(struct bnx2x *bp)
5276 {
5277         u32 wb_write[3];
5278
5279         /* Ethernet source and destination addresses */
5280         wb_write[0] = 0x55555555;
5281         wb_write[1] = 0x55555555;
5282         wb_write[2] = 0x20;             /* SOP */
5283         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5284
5285         /* NON-IP protocol */
5286         wb_write[0] = 0x09000000;
5287         wb_write[1] = 0x55555555;
5288         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5289         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5290 }
5291
5292 /* some of the internal memories
5293  * are not directly readable from the driver
5294  * to test them we send debug packets
5295  */
5296 static int bnx2x_int_mem_test(struct bnx2x *bp)
5297 {
5298         int factor;
5299         int count, i;
5300         u32 val = 0;
5301
5302         if (CHIP_REV_IS_FPGA(bp))
5303                 factor = 120;
5304         else if (CHIP_REV_IS_EMUL(bp))
5305                 factor = 200;
5306         else
5307                 factor = 1;
5308
5309         DP(NETIF_MSG_HW, "start part1\n");
5310
5311         /* Disable inputs of parser neighbor blocks */
5312         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5313         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5314         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5315         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5316
5317         /*  Write 0 to parser credits for CFC search request */
5318         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5319
5320         /* send Ethernet packet */
5321         bnx2x_lb_pckt(bp);
5322
5323         /* TODO do i reset NIG statistic? */
5324         /* Wait until NIG register shows 1 packet of size 0x10 */
5325         count = 1000 * factor;
5326         while (count) {
5327
5328                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5329                 val = *bnx2x_sp(bp, wb_data[0]);
5330                 if (val == 0x10)
5331                         break;
5332
5333                 msleep(10);
5334                 count--;
5335         }
5336         if (val != 0x10) {
5337                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5338                 return -1;
5339         }
5340
5341         /* Wait until PRS register shows 1 packet */
5342         count = 1000 * factor;
5343         while (count) {
5344                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5345                 if (val == 1)
5346                         break;
5347
5348                 msleep(10);
5349                 count--;
5350         }
5351         if (val != 0x1) {
5352                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5353                 return -2;
5354         }
5355
5356         /* Reset and init BRB, PRS */
5357         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5358         msleep(50);
5359         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5360         msleep(50);
5361         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5362         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5363
5364         DP(NETIF_MSG_HW, "part2\n");
5365
5366         /* Disable inputs of parser neighbor blocks */
5367         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5368         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5369         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5370         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5371
5372         /* Write 0 to parser credits for CFC search request */
5373         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5374
5375         /* send 10 Ethernet packets */
5376         for (i = 0; i < 10; i++)
5377                 bnx2x_lb_pckt(bp);
5378
5379         /* Wait until NIG register shows 10 + 1
5380            packets of size 11*0x10 = 0xb0 */
5381         count = 1000 * factor;
5382         while (count) {
5383
5384                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385                 val = *bnx2x_sp(bp, wb_data[0]);
5386                 if (val == 0xb0)
5387                         break;
5388
5389                 msleep(10);
5390                 count--;
5391         }
5392         if (val != 0xb0) {
5393                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5394                 return -3;
5395         }
5396
5397         /* Wait until PRS register shows 2 packets */
5398         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5399         if (val != 2)
5400                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5401
5402         /* Write 1 to parser credits for CFC search request */
5403         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5404
5405         /* Wait until PRS register shows 3 packets */
5406         msleep(10 * factor);
5407         /* Wait until NIG register shows 1 packet of size 0x10 */
5408         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409         if (val != 3)
5410                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5411
5412         /* clear NIG EOP FIFO */
5413         for (i = 0; i < 11; i++)
5414                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5415         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5416         if (val != 1) {
5417                 BNX2X_ERR("clear of NIG failed\n");
5418                 return -4;
5419         }
5420
5421         /* Reset and init BRB, PRS, NIG */
5422         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5423         msleep(50);
5424         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5425         msleep(50);
5426         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5427         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5428 #ifndef BCM_ISCSI
5429         /* set NIC mode */
5430         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5431 #endif
5432
5433         /* Enable inputs of parser neighbor blocks */
5434         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5435         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5436         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5437         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5438
5439         DP(NETIF_MSG_HW, "done\n");
5440
5441         return 0; /* OK */
5442 }
5443
5444 static void enable_blocks_attention(struct bnx2x *bp)
5445 {
5446         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5447         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5448         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5449         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5450         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5451         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5452         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5453         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5454         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5455 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5456 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5457         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5458         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5459         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5460 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5461 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5462         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5463         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5464         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5465         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5466 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5467 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5468         if (CHIP_REV_IS_FPGA(bp))
5469                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5470         else
5471                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5472         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5473         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5474         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5475 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5476 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5477         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5478         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5479 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5480         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5481 }
5482
5483
5484 static void bnx2x_reset_common(struct bnx2x *bp)
5485 {
5486         /* reset_common */
5487         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5488                0xd3ffff7f);
5489         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5490 }
5491
5492 static int bnx2x_init_common(struct bnx2x *bp)
5493 {
5494         u32 val, i;
5495
5496         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5497
5498         bnx2x_reset_common(bp);
5499         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5500         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5501
5502         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5503         if (CHIP_IS_E1H(bp))
5504                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5505
5506         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5507         msleep(30);
5508         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5509
5510         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5511         if (CHIP_IS_E1(bp)) {
5512                 /* enable HW interrupt from PXP on USDM overflow
5513                    bit 16 on INT_MASK_0 */
5514                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5515         }
5516
5517         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5518         bnx2x_init_pxp(bp);
5519
5520 #ifdef __BIG_ENDIAN
5521         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5522         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5523         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5524         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5525         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5526         /* make sure this value is 0 */
5527         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5528
5529 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5530         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5531         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5532         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5533         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5534 #endif
5535
5536         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5537 #ifdef BCM_ISCSI
5538         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5539         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5540         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5541 #endif
5542
5543         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5544                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5545
5546         /* let the HW do it's magic ... */
5547         msleep(100);
5548         /* finish PXP init */
5549         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5550         if (val != 1) {
5551                 BNX2X_ERR("PXP2 CFG failed\n");
5552                 return -EBUSY;
5553         }
5554         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5555         if (val != 1) {
5556                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5557                 return -EBUSY;
5558         }
5559
5560         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5561         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5562
5563         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5564
5565         /* clean the DMAE memory */
5566         bp->dmae_ready = 1;
5567         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5568
5569         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5570         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5571         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5572         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5573
5574         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5575         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5576         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5577         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5578
5579         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5580         /* soft reset pulse */
5581         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5582         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5583
5584 #ifdef BCM_ISCSI
5585         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5586 #endif
5587
5588         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5589         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5590         if (!CHIP_REV_IS_SLOW(bp)) {
5591                 /* enable hw interrupt from doorbell Q */
5592                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5593         }
5594
5595         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5596         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5597         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5598         /* set NIC mode */
5599         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5600         if (CHIP_IS_E1H(bp))
5601                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5602
5603         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5604         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5605         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5606         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5607
5608         if (CHIP_IS_E1H(bp)) {
5609                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5610                                 STORM_INTMEM_SIZE_E1H/2);
5611                 bnx2x_init_fill(bp,
5612                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5613                                 0, STORM_INTMEM_SIZE_E1H/2);
5614                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5615                                 STORM_INTMEM_SIZE_E1H/2);
5616                 bnx2x_init_fill(bp,
5617                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5618                                 0, STORM_INTMEM_SIZE_E1H/2);
5619                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5620                                 STORM_INTMEM_SIZE_E1H/2);
5621                 bnx2x_init_fill(bp,
5622                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5623                                 0, STORM_INTMEM_SIZE_E1H/2);
5624                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5625                                 STORM_INTMEM_SIZE_E1H/2);
5626                 bnx2x_init_fill(bp,
5627                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5628                                 0, STORM_INTMEM_SIZE_E1H/2);
5629         } else { /* E1 */
5630                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5631                                 STORM_INTMEM_SIZE_E1);
5632                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5633                                 STORM_INTMEM_SIZE_E1);
5634                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5635                                 STORM_INTMEM_SIZE_E1);
5636                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5637                                 STORM_INTMEM_SIZE_E1);
5638         }
5639
5640         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5641         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5642         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5643         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5644
5645         /* sync semi rtc */
5646         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5647                0x80000000);
5648         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5649                0x80000000);
5650
5651         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5652         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5653         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5654
5655         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5656         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5657                 REG_WR(bp, i, 0xc0cac01a);
5658                 /* TODO: replace with something meaningful */
5659         }
5660         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5661         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5662
5663         if (sizeof(union cdu_context) != 1024)
5664                 /* we currently assume that a context is 1024 bytes */
5665                 printk(KERN_ALERT PFX "please adjust the size of"
5666                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5667
5668         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5669         val = (4 << 24) + (0 << 12) + 1024;
5670         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5671         if (CHIP_IS_E1(bp)) {
5672                 /* !!! fix pxp client crdit until excel update */
5673                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5674                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5675         }
5676
5677         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5678         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5679         /* enable context validation interrupt from CFC */
5680         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5681
5682         /* set the thresholds to prevent CFC/CDU race */
5683         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5684
5685         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5686         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5687
5688         /* PXPCS COMMON comes here */
5689         /* Reset PCIE errors for debug */
5690         REG_WR(bp, 0x2814, 0xffffffff);
5691         REG_WR(bp, 0x3820, 0xffffffff);
5692
5693         /* EMAC0 COMMON comes here */
5694         /* EMAC1 COMMON comes here */
5695         /* DBU COMMON comes here */
5696         /* DBG COMMON comes here */
5697
5698         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5699         if (CHIP_IS_E1H(bp)) {
5700                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5701                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5702         }
5703
5704         if (CHIP_REV_IS_SLOW(bp))
5705                 msleep(200);
5706
5707         /* finish CFC init */
5708         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5709         if (val != 1) {
5710                 BNX2X_ERR("CFC LL_INIT failed\n");
5711                 return -EBUSY;
5712         }
5713         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5714         if (val != 1) {
5715                 BNX2X_ERR("CFC AC_INIT failed\n");
5716                 return -EBUSY;
5717         }
5718         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5719         if (val != 1) {
5720                 BNX2X_ERR("CFC CAM_INIT failed\n");
5721                 return -EBUSY;
5722         }
5723         REG_WR(bp, CFC_REG_DEBUG0, 0);
5724
5725         /* read NIG statistic
5726            to see if this is our first up since powerup */
5727         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5728         val = *bnx2x_sp(bp, wb_data[0]);
5729
5730         /* do internal memory self test */
5731         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5732                 BNX2X_ERR("internal mem self test failed\n");
5733                 return -EBUSY;
5734         }
5735
5736         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5737         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5738         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5739         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5740                 bp->port.need_hw_lock = 1;
5741                 break;
5742
5743         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5744                 /* Fan failure is indicated by SPIO 5 */
5745                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5746                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5747
5748                 /* set to active low mode */
5749                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5750<