bnx2x: Fix the behavior of ethtool when ONBOOT=no
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.105-1"
60 #define DRV_MODULE_RELDATE      "2009/04/22"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497                 for (word = 0; word < 8; word++)
498                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499                                                   offset + 4*word));
500                 data[8] = 0x0;
501                 printk(KERN_CONT "%s", (char *)data);
502         }
503         printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508         int i;
509         u16 j, start, end;
510
511         bp->stats_state = STATS_STATE_DISABLED;
512         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514         BNX2X_ERR("begin crash dump -----------------\n");
515
516         /* Indices */
517         /* Common */
518         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
519                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
520                   "  spq_prod_idx(%u)\n",
521                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524         /* Rx */
525         for_each_rx_queue(bp, i) {
526                 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
529                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
530                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
531                           i, fp->rx_bd_prod, fp->rx_bd_cons,
532                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
535                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
536                           fp->rx_sge_prod, fp->last_max_sge,
537                           le16_to_cpu(fp->fp_u_idx),
538                           fp->status_blk->u_status_block.status_block_index);
539         }
540
541         /* Tx */
542         for_each_tx_queue(bp, i) {
543                 struct bnx2x_fastpath *fp = &bp->fp[i];
544                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
547                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
548                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
551                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552                           fp->status_blk->c_status_block.status_block_index,
553                           hw_prods->packets_prod, hw_prods->bds_prod);
554         }
555
556         /* Rings */
557         /* Rx */
558         for_each_rx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563                 for (j = start; j != end; j = RX_BD(j + 1)) {
564                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
568                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569                 }
570
571                 start = RX_SGE(fp->rx_sge_prod);
572                 end = RX_SGE(fp->last_max_sge);
573                 for (j = start; j != end; j = RX_SGE(j + 1)) {
574                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
578                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
579                 }
580
581                 start = RCQ_BD(fp->rx_comp_cons - 10);
582                 end = RCQ_BD(fp->rx_comp_cons + 503);
583                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588                 }
589         }
590
591         /* Tx */
592         for_each_tx_queue(bp, i) {
593                 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601                                   i, j, sw_bd->skb, sw_bd->first_bd);
602                 }
603
604                 start = TX_BD(fp->tx_bd_cons - 10);
605                 end = TX_BD(fp->tx_bd_cons + 254);
606                 for (j = start; j != end; j = TX_BD(j + 1)) {
607                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611                 }
612         }
613
614         bnx2x_fw_dump(bp);
615         bnx2x_mc_assert(bp);
616         BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621         int port = BP_PORT(bp);
622         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623         u32 val = REG_RD(bp, addr);
624         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627         if (msix) {
628                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629                          HC_CONFIG_0_REG_INT_LINE_EN_0);
630                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632         } else if (msi) {
633                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637         } else {
638                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644                    val, port, addr);
645
646                 REG_WR(bp, addr, val);
647
648                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649         }
650
651         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
652            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654         REG_WR(bp, addr, val);
655
656         if (CHIP_IS_E1H(bp)) {
657                 /* init leading/trailing edge */
658                 if (IS_E1HMF(bp)) {
659                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
660                         if (bp->port.pmf)
661                                 /* enable nig and gpio3 attention */
662                                 val |= 0x1100;
663                 } else
664                         val = 0xffff;
665
666                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
668         }
669 }
670
671 static void bnx2x_int_disable(struct bnx2x *bp)
672 {
673         int port = BP_PORT(bp);
674         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675         u32 val = REG_RD(bp, addr);
676
677         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
680                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681
682         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683            val, port, addr);
684
685         /* flush all outstanding writes */
686         mmiowb();
687
688         REG_WR(bp, addr, val);
689         if (REG_RD(bp, addr) != val)
690                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
691
692 }
693
694 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
695 {
696         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
697         int i, offset;
698
699         /* disable interrupt handling */
700         atomic_inc(&bp->intr_sem);
701         if (disable_hw)
702                 /* prevent the HW from sending interrupts */
703                 bnx2x_int_disable(bp);
704
705         /* make sure all ISRs are done */
706         if (msix) {
707                 synchronize_irq(bp->msix_table[0].vector);
708                 offset = 1;
709                 for_each_queue(bp, i)
710                         synchronize_irq(bp->msix_table[i + offset].vector);
711         } else
712                 synchronize_irq(bp->pdev->irq);
713
714         /* make sure sp_task is not running */
715         cancel_delayed_work(&bp->sp_task);
716         flush_workqueue(bnx2x_wq);
717 }
718
719 /* fast path */
720
721 /*
722  * General service functions
723  */
724
725 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
726                                 u8 storm, u16 index, u8 op, u8 update)
727 {
728         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729                        COMMAND_REG_INT_ACK);
730         struct igu_ack_register igu_ack;
731
732         igu_ack.status_block_index = index;
733         igu_ack.sb_id_and_flags =
734                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
735                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
738
739         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740            (*(u32 *)&igu_ack), hc_addr);
741         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
742 }
743
744 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
745 {
746         struct host_status_block *fpsb = fp->status_blk;
747         u16 rc = 0;
748
749         barrier(); /* status block is written to by the chip */
750         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
752                 rc |= 1;
753         }
754         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
756                 rc |= 2;
757         }
758         return rc;
759 }
760
761 static u16 bnx2x_ack_int(struct bnx2x *bp)
762 {
763         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764                        COMMAND_REG_SIMD_MASK);
765         u32 result = REG_RD(bp, hc_addr);
766
767         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768            result, hc_addr);
769
770         return result;
771 }
772
773
774 /*
775  * fast path service functions
776  */
777
778 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
779 {
780         u16 tx_cons_sb;
781
782         /* Tell compiler that status block fields can change */
783         barrier();
784         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
785         return (fp->tx_pkt_cons != tx_cons_sb);
786 }
787
788 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
789 {
790         /* Tell compiler that consumer and producer can change */
791         barrier();
792         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
793 }
794
795 /* free skb in the packet ring at pos idx
796  * return idx of last bd freed
797  */
798 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
799                              u16 idx)
800 {
801         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802         struct eth_tx_bd *tx_bd;
803         struct sk_buff *skb = tx_buf->skb;
804         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
805         int nbd;
806
807         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
808            idx, tx_buf, skb);
809
810         /* unmap first bd */
811         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812         tx_bd = &fp->tx_desc_ring[bd_idx];
813         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
815
816         nbd = le16_to_cpu(tx_bd->nbd) - 1;
817         new_cons = nbd + tx_buf->first_bd;
818 #ifdef BNX2X_STOP_ON_ERROR
819         if (nbd > (MAX_SKB_FRAGS + 2)) {
820                 BNX2X_ERR("BAD nbd!\n");
821                 bnx2x_panic();
822         }
823 #endif
824
825         /* Skip a parse bd and the TSO split header bd
826            since they have no mapping */
827         if (nbd)
828                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
829
830         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831                                            ETH_TX_BD_FLAGS_TCP_CSUM |
832                                            ETH_TX_BD_FLAGS_SW_LSO)) {
833                 if (--nbd)
834                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835                 tx_bd = &fp->tx_desc_ring[bd_idx];
836                 /* is this a TSO split header bd? */
837                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
838                         if (--nbd)
839                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
840                 }
841         }
842
843         /* now free frags */
844         while (nbd > 0) {
845
846                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847                 tx_bd = &fp->tx_desc_ring[bd_idx];
848                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
850                 if (--nbd)
851                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852         }
853
854         /* release skb */
855         WARN_ON(!skb);
856         dev_kfree_skb(skb);
857         tx_buf->first_bd = 0;
858         tx_buf->skb = NULL;
859
860         return new_cons;
861 }
862
863 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
864 {
865         s16 used;
866         u16 prod;
867         u16 cons;
868
869         barrier(); /* Tell compiler that prod and cons can change */
870         prod = fp->tx_bd_prod;
871         cons = fp->tx_bd_cons;
872
873         /* NUM_TX_RINGS = number of "next-page" entries
874            It will be used as a threshold */
875         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
876
877 #ifdef BNX2X_STOP_ON_ERROR
878         WARN_ON(used < 0);
879         WARN_ON(used > fp->bp->tx_ring_size);
880         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
881 #endif
882
883         return (s16)(fp->bp->tx_ring_size) - used;
884 }
885
886 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
887 {
888         struct bnx2x *bp = fp->bp;
889         struct netdev_queue *txq;
890         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
891         int done = 0;
892
893 #ifdef BNX2X_STOP_ON_ERROR
894         if (unlikely(bp->panic))
895                 return;
896 #endif
897
898         txq = netdev_get_tx_queue(bp->dev, fp->index);
899         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900         sw_cons = fp->tx_pkt_cons;
901
902         while (sw_cons != hw_cons) {
903                 u16 pkt_cons;
904
905                 pkt_cons = TX_BD(sw_cons);
906
907                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
908
909                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
910                    hw_cons, sw_cons, pkt_cons);
911
912 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
913                         rmb();
914                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
915                 }
916 */
917                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
918                 sw_cons++;
919                 done++;
920         }
921
922         fp->tx_pkt_cons = sw_cons;
923         fp->tx_bd_cons = bd_cons;
924
925         /* TBD need a thresh? */
926         if (unlikely(netif_tx_queue_stopped(txq))) {
927
928                 __netif_tx_lock(txq, smp_processor_id());
929
930                 /* Need to make the tx_bd_cons update visible to start_xmit()
931                  * before checking for netif_tx_queue_stopped().  Without the
932                  * memory barrier, there is a small possibility that
933                  * start_xmit() will miss it and cause the queue to be stopped
934                  * forever.
935                  */
936                 smp_mb();
937
938                 if ((netif_tx_queue_stopped(txq)) &&
939                     (bp->state == BNX2X_STATE_OPEN) &&
940                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
941                         netif_tx_wake_queue(txq);
942
943                 __netif_tx_unlock(txq);
944         }
945 }
946
947
948 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949                            union eth_rx_cqe *rr_cqe)
950 {
951         struct bnx2x *bp = fp->bp;
952         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954
955         DP(BNX2X_MSG_SP,
956            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
957            fp->index, cid, command, bp->state,
958            rr_cqe->ramrod_cqe.ramrod_type);
959
960         bp->spq_left++;
961
962         if (fp->index) {
963                 switch (command | fp->state) {
964                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965                                                 BNX2X_FP_STATE_OPENING):
966                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
967                            cid);
968                         fp->state = BNX2X_FP_STATE_OPEN;
969                         break;
970
971                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
973                            cid);
974                         fp->state = BNX2X_FP_STATE_HALTED;
975                         break;
976
977                 default:
978                         BNX2X_ERR("unexpected MC reply (%d)  "
979                                   "fp->state is %x\n", command, fp->state);
980                         break;
981                 }
982                 mb(); /* force bnx2x_wait_ramrod() to see the change */
983                 return;
984         }
985
986         switch (command | bp->state) {
987         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989                 bp->state = BNX2X_STATE_OPEN;
990                 break;
991
992         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995                 fp->state = BNX2X_FP_STATE_HALTED;
996                 break;
997
998         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
999                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1000                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1001                 break;
1002
1003
1004         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1005         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1006                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1007                 bp->set_mac_pending = 0;
1008                 break;
1009
1010         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1011                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1012                 break;
1013
1014         default:
1015                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1016                           command, bp->state);
1017                 break;
1018         }
1019         mb(); /* force bnx2x_wait_ramrod() to see the change */
1020 }
1021
1022 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023                                      struct bnx2x_fastpath *fp, u16 index)
1024 {
1025         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026         struct page *page = sw_buf->page;
1027         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1028
1029         /* Skip "next page" elements */
1030         if (!page)
1031                 return;
1032
1033         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1034                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1035         __free_pages(page, PAGES_PER_SGE_SHIFT);
1036
1037         sw_buf->page = NULL;
1038         sge->addr_hi = 0;
1039         sge->addr_lo = 0;
1040 }
1041
1042 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043                                            struct bnx2x_fastpath *fp, int last)
1044 {
1045         int i;
1046
1047         for (i = 0; i < last; i++)
1048                 bnx2x_free_rx_sge(bp, fp, i);
1049 }
1050
1051 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052                                      struct bnx2x_fastpath *fp, u16 index)
1053 {
1054         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057         dma_addr_t mapping;
1058
1059         if (unlikely(page == NULL))
1060                 return -ENOMEM;
1061
1062         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1063                                PCI_DMA_FROMDEVICE);
1064         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1065                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066                 return -ENOMEM;
1067         }
1068
1069         sw_buf->page = page;
1070         pci_unmap_addr_set(sw_buf, mapping, mapping);
1071
1072         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1074
1075         return 0;
1076 }
1077
1078 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079                                      struct bnx2x_fastpath *fp, u16 index)
1080 {
1081         struct sk_buff *skb;
1082         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1084         dma_addr_t mapping;
1085
1086         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087         if (unlikely(skb == NULL))
1088                 return -ENOMEM;
1089
1090         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1091                                  PCI_DMA_FROMDEVICE);
1092         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1093                 dev_kfree_skb(skb);
1094                 return -ENOMEM;
1095         }
1096
1097         rx_buf->skb = skb;
1098         pci_unmap_addr_set(rx_buf, mapping, mapping);
1099
1100         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1102
1103         return 0;
1104 }
1105
1106 /* note that we are not allocating a new skb,
1107  * we are just moving one from cons to prod
1108  * we are not creating a new mapping,
1109  * so there is no need to check for dma_mapping_error().
1110  */
1111 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112                                struct sk_buff *skb, u16 cons, u16 prod)
1113 {
1114         struct bnx2x *bp = fp->bp;
1115         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1119
1120         pci_dma_sync_single_for_device(bp->pdev,
1121                                        pci_unmap_addr(cons_rx_buf, mapping),
1122                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1123
1124         prod_rx_buf->skb = cons_rx_buf->skb;
1125         pci_unmap_addr_set(prod_rx_buf, mapping,
1126                            pci_unmap_addr(cons_rx_buf, mapping));
1127         *prod_bd = *cons_bd;
1128 }
1129
1130 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1131                                              u16 idx)
1132 {
1133         u16 last_max = fp->last_max_sge;
1134
1135         if (SUB_S16(idx, last_max) > 0)
1136                 fp->last_max_sge = idx;
1137 }
1138
1139 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1140 {
1141         int i, j;
1142
1143         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144                 int idx = RX_SGE_CNT * i - 1;
1145
1146                 for (j = 0; j < 2; j++) {
1147                         SGE_MASK_CLEAR_BIT(fp, idx);
1148                         idx--;
1149                 }
1150         }
1151 }
1152
1153 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154                                   struct eth_fast_path_rx_cqe *fp_cqe)
1155 {
1156         struct bnx2x *bp = fp->bp;
1157         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1158                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1159                       SGE_PAGE_SHIFT;
1160         u16 last_max, last_elem, first_elem;
1161         u16 delta = 0;
1162         u16 i;
1163
1164         if (!sge_len)
1165                 return;
1166
1167         /* First mark all used pages */
1168         for (i = 0; i < sge_len; i++)
1169                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1170
1171         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1173
1174         /* Here we assume that the last SGE index is the biggest */
1175         prefetch((void *)(fp->sge_mask));
1176         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1177
1178         last_max = RX_SGE(fp->last_max_sge);
1179         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1181
1182         /* If ring is not full */
1183         if (last_elem + 1 != first_elem)
1184                 last_elem++;
1185
1186         /* Now update the prod */
1187         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188                 if (likely(fp->sge_mask[i]))
1189                         break;
1190
1191                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192                 delta += RX_SGE_MASK_ELEM_SZ;
1193         }
1194
1195         if (delta > 0) {
1196                 fp->rx_sge_prod += delta;
1197                 /* clear page-end entries */
1198                 bnx2x_clear_sge_mask_next_elems(fp);
1199         }
1200
1201         DP(NETIF_MSG_RX_STATUS,
1202            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1203            fp->last_max_sge, fp->rx_sge_prod);
1204 }
1205
1206 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1207 {
1208         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209         memset(fp->sge_mask, 0xff,
1210                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1211
1212         /* Clear the two last indices in the page to 1:
1213            these are the indices that correspond to the "next" element,
1214            hence will never be indicated and should be removed from
1215            the calculations. */
1216         bnx2x_clear_sge_mask_next_elems(fp);
1217 }
1218
1219 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220                             struct sk_buff *skb, u16 cons, u16 prod)
1221 {
1222         struct bnx2x *bp = fp->bp;
1223         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1226         dma_addr_t mapping;
1227
1228         /* move empty skb from pool to prod and map it */
1229         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1231                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1232         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1233
1234         /* move partial skb from cons to pool (don't unmap yet) */
1235         fp->tpa_pool[queue] = *cons_rx_buf;
1236
1237         /* mark bin state as start - print error if current state != stop */
1238         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1240
1241         fp->tpa_state[queue] = BNX2X_TPA_START;
1242
1243         /* point prod_bd to new skb */
1244         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1246
1247 #ifdef BNX2X_STOP_ON_ERROR
1248         fp->tpa_queue_used |= (1 << queue);
1249 #ifdef __powerpc64__
1250         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1251 #else
1252         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1253 #endif
1254            fp->tpa_queue_used);
1255 #endif
1256 }
1257
1258 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259                                struct sk_buff *skb,
1260                                struct eth_fast_path_rx_cqe *fp_cqe,
1261                                u16 cqe_idx)
1262 {
1263         struct sw_rx_page *rx_pg, old_rx_pg;
1264         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265         u32 i, frag_len, frag_size, pages;
1266         int err;
1267         int j;
1268
1269         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1270         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1271
1272         /* This is needed in order to enable forwarding support */
1273         if (frag_size)
1274                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1275                                                max(frag_size, (u32)len_on_bd));
1276
1277 #ifdef BNX2X_STOP_ON_ERROR
1278         if (pages >
1279             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1280                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1281                           pages, cqe_idx);
1282                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1283                           fp_cqe->pkt_len, len_on_bd);
1284                 bnx2x_panic();
1285                 return -EINVAL;
1286         }
1287 #endif
1288
1289         /* Run through the SGL and compose the fragmented skb */
1290         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1292
1293                 /* FW gives the indices of the SGE as if the ring is an array
1294                    (meaning that "next" element will consume 2 indices) */
1295                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1296                 rx_pg = &fp->rx_page_ring[sge_idx];
1297                 old_rx_pg = *rx_pg;
1298
1299                 /* If we fail to allocate a substitute page, we simply stop
1300                    where we are and drop the whole packet */
1301                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302                 if (unlikely(err)) {
1303                         fp->eth_q_stats.rx_skb_alloc_failed++;
1304                         return err;
1305                 }
1306
1307                 /* Unmap the page as we r going to pass it to the stack */
1308                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1309                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1310
1311                 /* Add one frag and update the appropriate fields in the skb */
1312                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1313
1314                 skb->data_len += frag_len;
1315                 skb->truesize += frag_len;
1316                 skb->len += frag_len;
1317
1318                 frag_size -= frag_len;
1319         }
1320
1321         return 0;
1322 }
1323
1324 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1326                            u16 cqe_idx)
1327 {
1328         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329         struct sk_buff *skb = rx_buf->skb;
1330         /* alloc new skb */
1331         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1332
1333         /* Unmap skb in the pool anyway, as we are going to change
1334            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1335            fails. */
1336         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1337                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1338
1339         if (likely(new_skb)) {
1340                 /* fix ip xsum and give it to the stack */
1341                 /* (no need to map the new skb) */
1342 #ifdef BCM_VLAN
1343                 int is_vlan_cqe =
1344                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345                          PARSING_FLAGS_VLAN);
1346                 int is_not_hwaccel_vlan_cqe =
1347                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1348 #endif
1349
1350                 prefetch(skb);
1351                 prefetch(((char *)(skb)) + 128);
1352
1353 #ifdef BNX2X_STOP_ON_ERROR
1354                 if (pad + len > bp->rx_buf_size) {
1355                         BNX2X_ERR("skb_put is about to fail...  "
1356                                   "pad %d  len %d  rx_buf_size %d\n",
1357                                   pad, len, bp->rx_buf_size);
1358                         bnx2x_panic();
1359                         return;
1360                 }
1361 #endif
1362
1363                 skb_reserve(skb, pad);
1364                 skb_put(skb, len);
1365
1366                 skb->protocol = eth_type_trans(skb, bp->dev);
1367                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368
1369                 {
1370                         struct iphdr *iph;
1371
1372                         iph = (struct iphdr *)skb->data;
1373 #ifdef BCM_VLAN
1374                         /* If there is no Rx VLAN offloading -
1375                            take VLAN tag into an account */
1376                         if (unlikely(is_not_hwaccel_vlan_cqe))
1377                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1378 #endif
1379                         iph->check = 0;
1380                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1381                 }
1382
1383                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384                                          &cqe->fast_path_cqe, cqe_idx)) {
1385 #ifdef BCM_VLAN
1386                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387                             (!is_not_hwaccel_vlan_cqe))
1388                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389                                                 le16_to_cpu(cqe->fast_path_cqe.
1390                                                             vlan_tag));
1391                         else
1392 #endif
1393                                 netif_receive_skb(skb);
1394                 } else {
1395                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396                            " - dropping packet!\n");
1397                         dev_kfree_skb(skb);
1398                 }
1399
1400
1401                 /* put new skb in bin */
1402                 fp->tpa_pool[queue].skb = new_skb;
1403
1404         } else {
1405                 /* else drop the packet and keep the buffer in the bin */
1406                 DP(NETIF_MSG_RX_STATUS,
1407                    "Failed to allocate new skb - dropping packet!\n");
1408                 fp->eth_q_stats.rx_skb_alloc_failed++;
1409         }
1410
1411         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1412 }
1413
1414 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415                                         struct bnx2x_fastpath *fp,
1416                                         u16 bd_prod, u16 rx_comp_prod,
1417                                         u16 rx_sge_prod)
1418 {
1419         struct ustorm_eth_rx_producers rx_prods = {0};
1420         int i;
1421
1422         /* Update producers */
1423         rx_prods.bd_prod = bd_prod;
1424         rx_prods.cqe_prod = rx_comp_prod;
1425         rx_prods.sge_prod = rx_sge_prod;
1426
1427         /*
1428          * Make sure that the BD and SGE data is updated before updating the
1429          * producers since FW might read the BD/SGE right after the producer
1430          * is updated.
1431          * This is only applicable for weak-ordered memory model archs such
1432          * as IA-64. The following barrier is also mandatory since FW will
1433          * assumes BDs must have buffers.
1434          */
1435         wmb();
1436
1437         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438                 REG_WR(bp, BAR_USTRORM_INTMEM +
1439                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1440                        ((u32 *)&rx_prods)[i]);
1441
1442         mmiowb(); /* keep prod updates ordered */
1443
1444         DP(NETIF_MSG_RX_STATUS,
1445            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1446            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1447 }
1448
1449 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1450 {
1451         struct bnx2x *bp = fp->bp;
1452         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1453         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1454         int rx_pkt = 0;
1455
1456 #ifdef BNX2X_STOP_ON_ERROR
1457         if (unlikely(bp->panic))
1458                 return 0;
1459 #endif
1460
1461         /* CQ "next element" is of the size of the regular element,
1462            that's why it's ok here */
1463         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1465                 hw_comp_cons++;
1466
1467         bd_cons = fp->rx_bd_cons;
1468         bd_prod = fp->rx_bd_prod;
1469         bd_prod_fw = bd_prod;
1470         sw_comp_cons = fp->rx_comp_cons;
1471         sw_comp_prod = fp->rx_comp_prod;
1472
1473         /* Memory barrier necessary as speculative reads of the rx
1474          * buffer can be ahead of the index in the status block
1475          */
1476         rmb();
1477
1478         DP(NETIF_MSG_RX_STATUS,
1479            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1480            fp->index, hw_comp_cons, sw_comp_cons);
1481
1482         while (sw_comp_cons != hw_comp_cons) {
1483                 struct sw_rx_bd *rx_buf = NULL;
1484                 struct sk_buff *skb;
1485                 union eth_rx_cqe *cqe;
1486                 u8 cqe_fp_flags;
1487                 u16 len, pad;
1488
1489                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490                 bd_prod = RX_BD(bd_prod);
1491                 bd_cons = RX_BD(bd_cons);
1492
1493                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1494                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1495
1496                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1497                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1498                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1499                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1500                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1502
1503                 /* is this a slowpath msg? */
1504                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1505                         bnx2x_sp_event(fp, cqe);
1506                         goto next_cqe;
1507
1508                 /* this is an rx packet */
1509                 } else {
1510                         rx_buf = &fp->rx_buf_ring[bd_cons];
1511                         skb = rx_buf->skb;
1512                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513                         pad = cqe->fast_path_cqe.placement_offset;
1514
1515                         /* If CQE is marked both TPA_START and TPA_END
1516                            it is a non-TPA CQE */
1517                         if ((!fp->disable_tpa) &&
1518                             (TPA_TYPE(cqe_fp_flags) !=
1519                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1520                                 u16 queue = cqe->fast_path_cqe.queue_index;
1521
1522                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523                                         DP(NETIF_MSG_RX_STATUS,
1524                                            "calling tpa_start on queue %d\n",
1525                                            queue);
1526
1527                                         bnx2x_tpa_start(fp, queue, skb,
1528                                                         bd_cons, bd_prod);
1529                                         goto next_rx;
1530                                 }
1531
1532                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533                                         DP(NETIF_MSG_RX_STATUS,
1534                                            "calling tpa_stop on queue %d\n",
1535                                            queue);
1536
1537                                         if (!BNX2X_RX_SUM_FIX(cqe))
1538                                                 BNX2X_ERR("STOP on none TCP "
1539                                                           "data\n");
1540
1541                                         /* This is a size of the linear data
1542                                            on this skb */
1543                                         len = le16_to_cpu(cqe->fast_path_cqe.
1544                                                                 len_on_bd);
1545                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1546                                                     len, cqe, comp_ring_cons);
1547 #ifdef BNX2X_STOP_ON_ERROR
1548                                         if (bp->panic)
1549                                                 return 0;
1550 #endif
1551
1552                                         bnx2x_update_sge_prod(fp,
1553                                                         &cqe->fast_path_cqe);
1554                                         goto next_cqe;
1555                                 }
1556                         }
1557
1558                         pci_dma_sync_single_for_device(bp->pdev,
1559                                         pci_unmap_addr(rx_buf, mapping),
1560                                                        pad + RX_COPY_THRESH,
1561                                                        PCI_DMA_FROMDEVICE);
1562                         prefetch(skb);
1563                         prefetch(((char *)(skb)) + 128);
1564
1565                         /* is this an error packet? */
1566                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1567                                 DP(NETIF_MSG_RX_ERR,
1568                                    "ERROR  flags %x  rx packet %u\n",
1569                                    cqe_fp_flags, sw_comp_cons);
1570                                 fp->eth_q_stats.rx_err_discard_pkt++;
1571                                 goto reuse_rx;
1572                         }
1573
1574                         /* Since we don't have a jumbo ring
1575                          * copy small packets if mtu > 1500
1576                          */
1577                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578                             (len <= RX_COPY_THRESH)) {
1579                                 struct sk_buff *new_skb;
1580
1581                                 new_skb = netdev_alloc_skb(bp->dev,
1582                                                            len + pad);
1583                                 if (new_skb == NULL) {
1584                                         DP(NETIF_MSG_RX_ERR,
1585                                            "ERROR  packet dropped "
1586                                            "because of alloc failure\n");
1587                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1588                                         goto reuse_rx;
1589                                 }
1590
1591                                 /* aligned copy */
1592                                 skb_copy_from_linear_data_offset(skb, pad,
1593                                                     new_skb->data + pad, len);
1594                                 skb_reserve(new_skb, pad);
1595                                 skb_put(new_skb, len);
1596
1597                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1598
1599                                 skb = new_skb;
1600
1601                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602                                 pci_unmap_single(bp->pdev,
1603                                         pci_unmap_addr(rx_buf, mapping),
1604                                                  bp->rx_buf_size,
1605                                                  PCI_DMA_FROMDEVICE);
1606                                 skb_reserve(skb, pad);
1607                                 skb_put(skb, len);
1608
1609                         } else {
1610                                 DP(NETIF_MSG_RX_ERR,
1611                                    "ERROR  packet dropped because "
1612                                    "of alloc failure\n");
1613                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1614 reuse_rx:
1615                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616                                 goto next_rx;
1617                         }
1618
1619                         skb->protocol = eth_type_trans(skb, bp->dev);
1620
1621                         skb->ip_summed = CHECKSUM_NONE;
1622                         if (bp->rx_csum) {
1623                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1625                                 else
1626                                         fp->eth_q_stats.hw_csum_err++;
1627                         }
1628                 }
1629
1630                 skb_record_rx_queue(skb, fp->index);
1631 #ifdef BCM_VLAN
1632                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1633                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634                      PARSING_FLAGS_VLAN))
1635                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1637                 else
1638 #endif
1639                         netif_receive_skb(skb);
1640
1641
1642 next_rx:
1643                 rx_buf->skb = NULL;
1644
1645                 bd_cons = NEXT_RX_IDX(bd_cons);
1646                 bd_prod = NEXT_RX_IDX(bd_prod);
1647                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1648                 rx_pkt++;
1649 next_cqe:
1650                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1652
1653                 if (rx_pkt == budget)
1654                         break;
1655         } /* while */
1656
1657         fp->rx_bd_cons = bd_cons;
1658         fp->rx_bd_prod = bd_prod_fw;
1659         fp->rx_comp_cons = sw_comp_cons;
1660         fp->rx_comp_prod = sw_comp_prod;
1661
1662         /* Update producers */
1663         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1664                              fp->rx_sge_prod);
1665
1666         fp->rx_pkt += rx_pkt;
1667         fp->rx_calls++;
1668
1669         return rx_pkt;
1670 }
1671
1672 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1673 {
1674         struct bnx2x_fastpath *fp = fp_cookie;
1675         struct bnx2x *bp = fp->bp;
1676         int index = fp->index;
1677
1678         /* Return here if interrupt is disabled */
1679         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1681                 return IRQ_HANDLED;
1682         }
1683
1684         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1685            index, fp->sb_id);
1686         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1687
1688 #ifdef BNX2X_STOP_ON_ERROR
1689         if (unlikely(bp->panic))
1690                 return IRQ_HANDLED;
1691 #endif
1692
1693         prefetch(fp->rx_cons_sb);
1694         prefetch(fp->tx_cons_sb);
1695         prefetch(&fp->status_blk->c_status_block.status_block_index);
1696         prefetch(&fp->status_blk->u_status_block.status_block_index);
1697
1698         napi_schedule(&bnx2x_fp(bp, index, napi));
1699
1700         return IRQ_HANDLED;
1701 }
1702
1703 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1704 {
1705         struct bnx2x *bp = netdev_priv(dev_instance);
1706         u16 status = bnx2x_ack_int(bp);
1707         u16 mask;
1708
1709         /* Return here if interrupt is shared and it's not for us */
1710         if (unlikely(status == 0)) {
1711                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1712                 return IRQ_NONE;
1713         }
1714         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1715
1716         /* Return here if interrupt is disabled */
1717         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1719                 return IRQ_HANDLED;
1720         }
1721
1722 #ifdef BNX2X_STOP_ON_ERROR
1723         if (unlikely(bp->panic))
1724                 return IRQ_HANDLED;
1725 #endif
1726
1727         mask = 0x2 << bp->fp[0].sb_id;
1728         if (status & mask) {
1729                 struct bnx2x_fastpath *fp = &bp->fp[0];
1730
1731                 prefetch(fp->rx_cons_sb);
1732                 prefetch(fp->tx_cons_sb);
1733                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1735
1736                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1737
1738                 status &= ~mask;
1739         }
1740
1741
1742         if (unlikely(status & 0x1)) {
1743                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744
1745                 status &= ~0x1;
1746                 if (!status)
1747                         return IRQ_HANDLED;
1748         }
1749
1750         if (status)
1751                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1752                    status);
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 /* end of fast path */
1758
1759 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1760
1761 /* Link */
1762
1763 /*
1764  * General service functions
1765  */
1766
1767 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1768 {
1769         u32 lock_status;
1770         u32 resource_bit = (1 << resource);
1771         int func = BP_FUNC(bp);
1772         u32 hw_lock_control_reg;
1773         int cnt;
1774
1775         /* Validating that the resource is within range */
1776         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1777                 DP(NETIF_MSG_HW,
1778                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1780                 return -EINVAL;
1781         }
1782
1783         if (func <= 5) {
1784                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1785         } else {
1786                 hw_lock_control_reg =
1787                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1788         }
1789
1790         /* Validating that the resource is not already taken */
1791         lock_status = REG_RD(bp, hw_lock_control_reg);
1792         if (lock_status & resource_bit) {
1793                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1794                    lock_status, resource_bit);
1795                 return -EEXIST;
1796         }
1797
1798         /* Try for 5 second every 5ms */
1799         for (cnt = 0; cnt < 1000; cnt++) {
1800                 /* Try to acquire the lock */
1801                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802                 lock_status = REG_RD(bp, hw_lock_control_reg);
1803                 if (lock_status & resource_bit)
1804                         return 0;
1805
1806                 msleep(5);
1807         }
1808         DP(NETIF_MSG_HW, "Timeout\n");
1809         return -EAGAIN;
1810 }
1811
1812 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1813 {
1814         u32 lock_status;
1815         u32 resource_bit = (1 << resource);
1816         int func = BP_FUNC(bp);
1817         u32 hw_lock_control_reg;
1818
1819         /* Validating that the resource is within range */
1820         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1821                 DP(NETIF_MSG_HW,
1822                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1824                 return -EINVAL;
1825         }
1826
1827         if (func <= 5) {
1828                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1829         } else {
1830                 hw_lock_control_reg =
1831                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832         }
1833
1834         /* Validating that the resource is currently taken */
1835         lock_status = REG_RD(bp, hw_lock_control_reg);
1836         if (!(lock_status & resource_bit)) {
1837                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1838                    lock_status, resource_bit);
1839                 return -EFAULT;
1840         }
1841
1842         REG_WR(bp, hw_lock_control_reg, resource_bit);
1843         return 0;
1844 }
1845
1846 /* HW Lock for shared dual port PHYs */
1847 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1848 {
1849         mutex_lock(&bp->port.phy_mutex);
1850
1851         if (bp->port.need_hw_lock)
1852                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 }
1854
1855 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1856 {
1857         if (bp->port.need_hw_lock)
1858                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1859
1860         mutex_unlock(&bp->port.phy_mutex);
1861 }
1862
1863 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1864 {
1865         /* The GPIO should be swapped if swap register is set and active */
1866         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868         int gpio_shift = gpio_num +
1869                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870         u32 gpio_mask = (1 << gpio_shift);
1871         u32 gpio_reg;
1872         int value;
1873
1874         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1876                 return -EINVAL;
1877         }
1878
1879         /* read GPIO value */
1880         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1881
1882         /* get the requested pin value */
1883         if ((gpio_reg & gpio_mask) == gpio_mask)
1884                 value = 1;
1885         else
1886                 value = 0;
1887
1888         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1889
1890         return value;
1891 }
1892
1893 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1894 {
1895         /* The GPIO should be swapped if swap register is set and active */
1896         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1897                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1898         int gpio_shift = gpio_num +
1899                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900         u32 gpio_mask = (1 << gpio_shift);
1901         u32 gpio_reg;
1902
1903         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1905                 return -EINVAL;
1906         }
1907
1908         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1909         /* read GPIO and mask except the float bits */
1910         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1911
1912         switch (mode) {
1913         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915                    gpio_num, gpio_shift);
1916                 /* clear FLOAT and set CLR */
1917                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1919                 break;
1920
1921         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923                    gpio_num, gpio_shift);
1924                 /* clear FLOAT and set SET */
1925                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1927                 break;
1928
1929         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1930                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931                    gpio_num, gpio_shift);
1932                 /* set FLOAT */
1933                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934                 break;
1935
1936         default:
1937                 break;
1938         }
1939
1940         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1941         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1942
1943         return 0;
1944 }
1945
1946 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1947 {
1948         /* The GPIO should be swapped if swap register is set and active */
1949         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951         int gpio_shift = gpio_num +
1952                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953         u32 gpio_mask = (1 << gpio_shift);
1954         u32 gpio_reg;
1955
1956         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1958                 return -EINVAL;
1959         }
1960
1961         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962         /* read GPIO int */
1963         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1964
1965         switch (mode) {
1966         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968                                    "output low\n", gpio_num, gpio_shift);
1969                 /* clear SET and set CLR */
1970                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972                 break;
1973
1974         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976                                    "output high\n", gpio_num, gpio_shift);
1977                 /* clear CLR and set SET */
1978                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1980                 break;
1981
1982         default:
1983                 break;
1984         }
1985
1986         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988
1989         return 0;
1990 }
1991
1992 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1993 {
1994         u32 spio_mask = (1 << spio_num);
1995         u32 spio_reg;
1996
1997         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998             (spio_num > MISC_REGISTERS_SPIO_7)) {
1999                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2004         /* read SPIO and mask except the float bits */
2005         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2009                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010                 /* clear FLOAT and set CLR */
2011                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2016                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017                 /* clear FLOAT and set SET */
2018                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2020                 break;
2021
2022         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2024                 /* set FLOAT */
2025                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026                 break;
2027
2028         default:
2029                 break;
2030         }
2031
2032         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2033         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2034
2035         return 0;
2036 }
2037
2038 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2039 {
2040         switch (bp->link_vars.ieee_fc &
2041                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2042         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2043                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2044                                           ADVERTISED_Pause);
2045                 break;
2046
2047         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2048                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2049                                          ADVERTISED_Pause);
2050                 break;
2051
2052         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2053                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2054                 break;
2055
2056         default:
2057                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058                                           ADVERTISED_Pause);
2059                 break;
2060         }
2061 }
2062
2063 static void bnx2x_link_report(struct bnx2x *bp)
2064 {
2065         if (bp->link_vars.link_up) {
2066                 if (bp->state == BNX2X_STATE_OPEN)
2067                         netif_carrier_on(bp->dev);
2068                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2069
2070                 printk("%d Mbps ", bp->link_vars.line_speed);
2071
2072                 if (bp->link_vars.duplex == DUPLEX_FULL)
2073                         printk("full duplex");
2074                 else
2075                         printk("half duplex");
2076
2077                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2079                                 printk(", receive ");
2080                                 if (bp->link_vars.flow_ctrl &
2081                                     BNX2X_FLOW_CTRL_TX)
2082                                         printk("& transmit ");
2083                         } else {
2084                                 printk(", transmit ");
2085                         }
2086                         printk("flow control ON");
2087                 }
2088                 printk("\n");
2089
2090         } else { /* link_down */
2091                 netif_carrier_off(bp->dev);
2092                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2093         }
2094 }
2095
2096 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2097 {
2098         if (!BP_NOMCP(bp)) {
2099                 u8 rc;
2100
2101                 /* Initialize link parameters structure variables */
2102                 /* It is recommended to turn off RX FC for jumbo frames
2103                    for better performance */
2104                 if (IS_E1HMF(bp))
2105                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2106                 else if (bp->dev->mtu > 5000)
2107                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2108                 else
2109                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2110
2111                 bnx2x_acquire_phy_lock(bp);
2112
2113                 if (load_mode == LOAD_DIAG)
2114                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2115
2116                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2117
2118                 bnx2x_release_phy_lock(bp);
2119
2120                 bnx2x_calc_fc_adv(bp);
2121
2122                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2124                         bnx2x_link_report(bp);
2125                 }
2126
2127                 return rc;
2128         }
2129         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2130         return -EINVAL;
2131 }
2132
2133 static void bnx2x_link_set(struct bnx2x *bp)
2134 {
2135         if (!BP_NOMCP(bp)) {
2136                 bnx2x_acquire_phy_lock(bp);
2137                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2138                 bnx2x_release_phy_lock(bp);
2139
2140                 bnx2x_calc_fc_adv(bp);
2141         } else
2142                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2143 }
2144
2145 static void bnx2x__link_reset(struct bnx2x *bp)
2146 {
2147         if (!BP_NOMCP(bp)) {
2148                 bnx2x_acquire_phy_lock(bp);
2149                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2150                 bnx2x_release_phy_lock(bp);
2151         } else
2152                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2153 }
2154
2155 static u8 bnx2x_link_test(struct bnx2x *bp)
2156 {
2157         u8 rc;
2158
2159         bnx2x_acquire_phy_lock(bp);
2160         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2161         bnx2x_release_phy_lock(bp);
2162
2163         return rc;
2164 }
2165
2166 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2167 {
2168         u32 r_param = bp->link_vars.line_speed / 8;
2169         u32 fair_periodic_timeout_usec;
2170         u32 t_fair;
2171
2172         memset(&(bp->cmng.rs_vars), 0,
2173                sizeof(struct rate_shaping_vars_per_port));
2174         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2175
2176         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2178
2179         /* this is the threshold below which no timer arming will occur
2180            1.25 coefficient is for the threshold to be a little bigger
2181            than the real time, to compensate for timer in-accuracy */
2182         bp->cmng.rs_vars.rs_threshold =
2183                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2184
2185         /* resolution of fairness timer */
2186         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2189
2190         /* this is the threshold below which we won't arm the timer anymore */
2191         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2192
2193         /* we multiply by 1e3/8 to get bytes/msec.
2194            We don't want the credits to pass a credit
2195            of the t_fair*FAIR_MEM (algorithm resolution) */
2196         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197         /* since each tick is 4 usec */
2198         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2199 }
2200
2201 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2202 {
2203         struct rate_shaping_vars_per_vn m_rs_vn;
2204         struct fairness_vars_per_vn m_fair_vn;
2205         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206         u16 vn_min_rate, vn_max_rate;
2207         int i;
2208
2209         /* If function is hidden - set min and max to zeroes */
2210         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2211                 vn_min_rate = 0;
2212                 vn_max_rate = 0;
2213
2214         } else {
2215                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2217                 /* If fairness is enabled (not all min rates are zeroes) and
2218                    if current min rate is zero - set it to 1.
2219                    This is a requirement of the algorithm. */
2220                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2221                         vn_min_rate = DEF_MIN_RATE;
2222                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2224         }
2225
2226         DP(NETIF_MSG_IFUP,
2227            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2228            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2229
2230         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2232
2233         /* global vn counter - maximal Mbps for this vn */
2234         m_rs_vn.vn_counter.rate = vn_max_rate;
2235
2236         /* quota - number of bytes transmitted in this period */
2237         m_rs_vn.vn_counter.quota =
2238                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2239
2240         if (bp->vn_weight_sum) {
2241                 /* credit for each period of the fairness algorithm:
2242                    number of bytes in T_FAIR (the vn share the port rate).
2243                    vn_weight_sum should not be larger than 10000, thus
2244                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2245                    than zero */
2246                 m_fair_vn.vn_credit_delta =
2247                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2248                                                  (8 * bp->vn_weight_sum))),
2249                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2250                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251                    m_fair_vn.vn_credit_delta);
2252         }
2253
2254         /* Store it to internal memory */
2255         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258                        ((u32 *)(&m_rs_vn))[i]);
2259
2260         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263                        ((u32 *)(&m_fair_vn))[i]);
2264 }
2265
2266
2267 /* This function is called upon link interrupt */
2268 static void bnx2x_link_attn(struct bnx2x *bp)
2269 {
2270         /* Make sure that we are synced with the current statistics */
2271         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2272
2273         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2274
2275         if (bp->link_vars.link_up) {
2276
2277                 /* dropless flow control */
2278                 if (CHIP_IS_E1H(bp)) {
2279                         int port = BP_PORT(bp);
2280                         u32 pause_enabled = 0;
2281
2282                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2283                                 pause_enabled = 1;
2284
2285                         REG_WR(bp, BAR_USTRORM_INTMEM +
2286                                USTORM_PAUSE_ENABLED_OFFSET(port),
2287                                pause_enabled);
2288                 }
2289
2290                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291                         struct host_port_stats *pstats;
2292
2293                         pstats = bnx2x_sp(bp, port_stats);
2294                         /* reset old bmac stats */
2295                         memset(&(pstats->mac_stx[0]), 0,
2296                                sizeof(struct mac_stx));
2297                 }
2298                 if ((bp->state == BNX2X_STATE_OPEN) ||
2299                     (bp->state == BNX2X_STATE_DISABLED))
2300                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301         }
2302
2303         /* indicate link status */
2304         bnx2x_link_report(bp);
2305
2306         if (IS_E1HMF(bp)) {
2307                 int port = BP_PORT(bp);
2308                 int func;
2309                 int vn;
2310
2311                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312                         if (vn == BP_E1HVN(bp))
2313                                 continue;
2314
2315                         func = ((vn << 1) | port);
2316
2317                         /* Set the attention towards other drivers
2318                            on the same port */
2319                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2321                 }
2322
2323                 if (bp->link_vars.link_up) {
2324                         int i;
2325
2326                         /* Init rate shaping and fairness contexts */
2327                         bnx2x_init_port_minmax(bp);
2328
2329                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2330                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2331
2332                         /* Store it to internal memory */
2333                         for (i = 0;
2334                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337                                        ((u32 *)(&bp->cmng))[i]);
2338                 }
2339         }
2340 }
2341
2342 static void bnx2x__link_status_update(struct bnx2x *bp)
2343 {
2344         if (bp->state != BNX2X_STATE_OPEN)
2345                 return;
2346
2347         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2348
2349         if (bp->link_vars.link_up)
2350                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2351         else
2352                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2353
2354         /* indicate link status */
2355         bnx2x_link_report(bp);
2356 }
2357
2358 static void bnx2x_pmf_update(struct bnx2x *bp)
2359 {
2360         int port = BP_PORT(bp);
2361         u32 val;
2362
2363         bp->port.pmf = 1;
2364         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2365
2366         /* enable nig attention */
2367         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2370
2371         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2372 }
2373
2374 /* end of Link */
2375
2376 /* slow path */
2377
2378 /*
2379  * General service functions
2380  */
2381
2382 /* the slow path queue is odd since completions arrive on the fastpath ring */
2383 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384                          u32 data_hi, u32 data_lo, int common)
2385 {
2386         int func = BP_FUNC(bp);
2387
2388         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2390            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2393
2394 #ifdef BNX2X_STOP_ON_ERROR
2395         if (unlikely(bp->panic))
2396                 return -EIO;
2397 #endif
2398
2399         spin_lock_bh(&bp->spq_lock);
2400
2401         if (!bp->spq_left) {
2402                 BNX2X_ERR("BUG! SPQ ring full!\n");
2403                 spin_unlock_bh(&bp->spq_lock);
2404                 bnx2x_panic();
2405                 return -EBUSY;
2406         }
2407
2408         /* CID needs port number to be encoded int it */
2409         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2411                                      HW_CID(bp, cid)));
2412         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2413         if (common)
2414                 bp->spq_prod_bd->hdr.type |=
2415                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2416
2417         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2419
2420         bp->spq_left--;
2421
2422         if (bp->spq_prod_bd == bp->spq_last_bd) {
2423                 bp->spq_prod_bd = bp->spq;
2424                 bp->spq_prod_idx = 0;
2425                 DP(NETIF_MSG_TIMER, "end of spq\n");
2426
2427         } else {
2428                 bp->spq_prod_bd++;
2429                 bp->spq_prod_idx++;
2430         }
2431
2432         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2433                bp->spq_prod_idx);
2434
2435         spin_unlock_bh(&bp->spq_lock);
2436         return 0;
2437 }
2438
2439 /* acquire split MCP access lock register */
2440 static int bnx2x_acquire_alr(struct bnx2x *bp)
2441 {
2442         u32 i, j, val;
2443         int rc = 0;
2444
2445         might_sleep();
2446         i = 100;
2447         for (j = 0; j < i*10; j++) {
2448                 val = (1UL << 31);
2449                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451                 if (val & (1L << 31))
2452                         break;
2453
2454                 msleep(5);
2455         }
2456         if (!(val & (1L << 31))) {
2457                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2458                 rc = -EBUSY;
2459         }
2460
2461         return rc;
2462 }
2463
2464 /* release split MCP access lock register */
2465 static void bnx2x_release_alr(struct bnx2x *bp)
2466 {
2467         u32 val = 0;
2468
2469         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 }
2471
2472 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2473 {
2474         struct host_def_status_block *def_sb = bp->def_status_blk;
2475         u16 rc = 0;
2476
2477         barrier(); /* status block is written to by the chip */
2478         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2480                 rc |= 1;
2481         }
2482         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2484                 rc |= 2;
2485         }
2486         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2488                 rc |= 4;
2489         }
2490         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2492                 rc |= 8;
2493         }
2494         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2496                 rc |= 16;
2497         }
2498         return rc;
2499 }
2500
2501 /*
2502  * slow path service functions
2503  */
2504
2505 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2506 {
2507         int port = BP_PORT(bp);
2508         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509                        COMMAND_REG_ATTN_BITS_SET);
2510         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2512         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513                                        NIG_REG_MASK_INTERRUPT_PORT0;
2514         u32 aeu_mask;
2515         u32 nig_mask = 0;
2516
2517         if (bp->attn_state & asserted)
2518                 BNX2X_ERR("IGU ERROR\n");
2519
2520         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521         aeu_mask = REG_RD(bp, aeu_addr);
2522
2523         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2524            aeu_mask, asserted);
2525         aeu_mask &= ~(asserted & 0xff);
2526         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2527
2528         REG_WR(bp, aeu_addr, aeu_mask);
2529         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2530
2531         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2532         bp->attn_state |= asserted;
2533         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2534
2535         if (asserted & ATTN_HARD_WIRED_MASK) {
2536                 if (asserted & ATTN_NIG_FOR_FUNC) {
2537
2538                         bnx2x_acquire_phy_lock(bp);
2539
2540                         /* save nig interrupt mask */
2541                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2542                         REG_WR(bp, nig_int_mask_addr, 0);
2543
2544                         bnx2x_link_attn(bp);
2545
2546                         /* handle unicore attn? */
2547                 }
2548                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2550
2551                 if (asserted & GPIO_2_FUNC)
2552                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2553
2554                 if (asserted & GPIO_3_FUNC)
2555                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2556
2557                 if (asserted & GPIO_4_FUNC)
2558                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2559
2560                 if (port == 0) {
2561                         if (asserted & ATTN_GENERAL_ATTN_1) {
2562                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2564                         }
2565                         if (asserted & ATTN_GENERAL_ATTN_2) {
2566                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2568                         }
2569                         if (asserted & ATTN_GENERAL_ATTN_3) {
2570                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2572                         }
2573                 } else {
2574                         if (asserted & ATTN_GENERAL_ATTN_4) {
2575                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2577                         }
2578                         if (asserted & ATTN_GENERAL_ATTN_5) {
2579                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2581                         }
2582                         if (asserted & ATTN_GENERAL_ATTN_6) {
2583                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2585                         }
2586                 }
2587
2588         } /* if hardwired */
2589
2590         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2591            asserted, hc_addr);
2592         REG_WR(bp, hc_addr, asserted);
2593
2594         /* now set back the mask */
2595         if (asserted & ATTN_NIG_FOR_FUNC) {
2596                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2597                 bnx2x_release_phy_lock(bp);
2598         }
2599 }
2600
2601 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2602 {
2603         int port = BP_PORT(bp);
2604         int reg_offset;
2605         u32 val;
2606
2607         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2608                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2609
2610         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2611
2612                 val = REG_RD(bp, reg_offset);
2613                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2614                 REG_WR(bp, reg_offset, val);
2615
2616                 BNX2X_ERR("SPIO5 hw attention\n");
2617
2618                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2619                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2620                         /* Fan failure attention */
2621
2622                         /* The PHY reset is controlled by GPIO 1 */
2623                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2624                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2625                         /* Low power mode is controlled by GPIO 2 */
2626                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2627                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2628                         /* mark the failure */
2629                         bp->link_params.ext_phy_config &=
2630                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2631                         bp->link_params.ext_phy_config |=
2632                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2633                         SHMEM_WR(bp,
2634                                  dev_info.port_hw_config[port].
2635                                                         external_phy_config,
2636                                  bp->link_params.ext_phy_config);
2637                         /* log the failure */
2638                         printk(KERN_ERR PFX "Fan Failure on Network"
2639                                " Controller %s has caused the driver to"
2640                                " shutdown the card to prevent permanent"
2641                                " damage.  Please contact Dell Support for"
2642                                " assistance\n", bp->dev->name);
2643                         break;
2644
2645                 default:
2646                         break;
2647                 }
2648         }
2649
2650         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2651                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2652                 bnx2x_acquire_phy_lock(bp);
2653                 bnx2x_handle_module_detect_int(&bp->link_params);
2654                 bnx2x_release_phy_lock(bp);
2655         }
2656
2657         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2658
2659                 val = REG_RD(bp, reg_offset);
2660                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2661                 REG_WR(bp, reg_offset, val);
2662
2663                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2664                           (attn & HW_INTERRUT_ASSERT_SET_0));
2665                 bnx2x_panic();
2666         }
2667 }
2668
2669 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2670 {
2671         u32 val;
2672
2673         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2674
2675                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2676                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2677                 /* DORQ discard attention */
2678                 if (val & 0x2)
2679                         BNX2X_ERR("FATAL error from DORQ\n");
2680         }
2681
2682         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2683
2684                 int port = BP_PORT(bp);
2685                 int reg_offset;
2686
2687                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2688                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2689
2690                 val = REG_RD(bp, reg_offset);
2691                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2692                 REG_WR(bp, reg_offset, val);
2693
2694                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2695                           (attn & HW_INTERRUT_ASSERT_SET_1));
2696                 bnx2x_panic();
2697         }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2701 {
2702         u32 val;
2703
2704         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2705
2706                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2707                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2708                 /* CFC error attention */
2709                 if (val & 0x2)
2710                         BNX2X_ERR("FATAL error from CFC\n");
2711         }
2712
2713         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2714
2715                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2716                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2717                 /* RQ_USDMDP_FIFO_OVERFLOW */
2718                 if (val & 0x18000)
2719                         BNX2X_ERR("FATAL error from PXP\n");
2720         }
2721
2722         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2723
2724                 int port = BP_PORT(bp);
2725                 int reg_offset;
2726
2727                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2728                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2729
2730                 val = REG_RD(bp, reg_offset);
2731                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2732                 REG_WR(bp, reg_offset, val);
2733
2734                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2735                           (attn & HW_INTERRUT_ASSERT_SET_2));
2736                 bnx2x_panic();
2737         }
2738 }
2739
2740 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2741 {
2742         u32 val;
2743
2744         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2745
2746                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2747                         int func = BP_FUNC(bp);
2748
2749                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2750                         bnx2x__link_status_update(bp);
2751                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2752                                                         DRV_STATUS_PMF)
2753                                 bnx2x_pmf_update(bp);
2754
2755                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2756
2757                         BNX2X_ERR("MC assert!\n");
2758                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2759                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2760                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2761                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2762                         bnx2x_panic();
2763
2764                 } else if (attn & BNX2X_MCP_ASSERT) {
2765
2766                         BNX2X_ERR("MCP assert!\n");
2767                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2768                         bnx2x_fw_dump(bp);
2769
2770                 } else
2771                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2772         }
2773
2774         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2775                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2776                 if (attn & BNX2X_GRC_TIMEOUT) {
2777                         val = CHIP_IS_E1H(bp) ?
2778                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2779                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2780                 }
2781                 if (attn & BNX2X_GRC_RSV) {
2782                         val = CHIP_IS_E1H(bp) ?
2783                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2784                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2785                 }
2786                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2787         }
2788 }
2789
2790 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2791 {
2792         struct attn_route attn;
2793         struct attn_route group_mask;
2794         int port = BP_PORT(bp);
2795         int index;
2796         u32 reg_addr;
2797         u32 val;
2798         u32 aeu_mask;
2799
2800         /* need to take HW lock because MCP or other port might also
2801            try to handle this event */
2802         bnx2x_acquire_alr(bp);
2803
2804         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2805         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2806         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2807         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2808         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2809            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2810
2811         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2812                 if (deasserted & (1 << index)) {
2813                         group_mask = bp->attn_group[index];
2814
2815                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2816                            index, group_mask.sig[0], group_mask.sig[1],
2817                            group_mask.sig[2], group_mask.sig[3]);
2818
2819                         bnx2x_attn_int_deasserted3(bp,
2820                                         attn.sig[3] & group_mask.sig[3]);
2821                         bnx2x_attn_int_deasserted1(bp,
2822                                         attn.sig[1] & group_mask.sig[1]);
2823                         bnx2x_attn_int_deasserted2(bp,
2824                                         attn.sig[2] & group_mask.sig[2]);
2825                         bnx2x_attn_int_deasserted0(bp,
2826                                         attn.sig[0] & group_mask.sig[0]);
2827
2828                         if ((attn.sig[0] & group_mask.sig[0] &
2829                                                 HW_PRTY_ASSERT_SET_0) ||
2830                             (attn.sig[1] & group_mask.sig[1] &
2831                                                 HW_PRTY_ASSERT_SET_1) ||
2832                             (attn.sig[2] & group_mask.sig[2] &
2833                                                 HW_PRTY_ASSERT_SET_2))
2834                                 BNX2X_ERR("FATAL HW block parity attention\n");
2835                 }
2836         }
2837
2838         bnx2x_release_alr(bp);
2839
2840         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2841
2842         val = ~deasserted;
2843         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2844            val, reg_addr);
2845         REG_WR(bp, reg_addr, val);
2846
2847         if (~bp->attn_state & deasserted)
2848                 BNX2X_ERR("IGU ERROR\n");
2849
2850         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2851                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2852
2853         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2854         aeu_mask = REG_RD(bp, reg_addr);
2855
2856         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2857            aeu_mask, deasserted);
2858         aeu_mask |= (deasserted & 0xff);
2859         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2860
2861         REG_WR(bp, reg_addr, aeu_mask);
2862         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2863
2864         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2865         bp->attn_state &= ~deasserted;
2866         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2867 }
2868
2869 static void bnx2x_attn_int(struct bnx2x *bp)
2870 {
2871         /* read local copy of bits */
2872         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2873                                                                 attn_bits);
2874         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2875                                                                 attn_bits_ack);
2876         u32 attn_state = bp->attn_state;
2877
2878         /* look for changed bits */
2879         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2880         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2881
2882         DP(NETIF_MSG_HW,
2883            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2884            attn_bits, attn_ack, asserted, deasserted);
2885
2886         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2887                 BNX2X_ERR("BAD attention state\n");
2888
2889         /* handle bits that were raised */
2890         if (asserted)
2891                 bnx2x_attn_int_asserted(bp, asserted);
2892
2893         if (deasserted)
2894                 bnx2x_attn_int_deasserted(bp, deasserted);
2895 }
2896
2897 static void bnx2x_sp_task(struct work_struct *work)
2898 {
2899         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2900         u16 status;
2901
2902
2903         /* Return here if interrupt is disabled */
2904         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2905                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2906                 return;
2907         }
2908
2909         status = bnx2x_update_dsb_idx(bp);
2910 /*      if (status == 0)                                     */
2911 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2912
2913         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2914
2915         /* HW attentions */
2916         if (status & 0x1)
2917                 bnx2x_attn_int(bp);
2918
2919         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2920                      IGU_INT_NOP, 1);
2921         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2922                      IGU_INT_NOP, 1);
2923         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2924                      IGU_INT_NOP, 1);
2925         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2926                      IGU_INT_NOP, 1);
2927         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2928                      IGU_INT_ENABLE, 1);
2929
2930 }
2931
2932 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2933 {
2934         struct net_device *dev = dev_instance;
2935         struct bnx2x *bp = netdev_priv(dev);
2936
2937         /* Return here if interrupt is disabled */
2938         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2939                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2940                 return IRQ_HANDLED;
2941         }
2942
2943         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2944
2945 #ifdef BNX2X_STOP_ON_ERROR
2946         if (unlikely(bp->panic))
2947                 return IRQ_HANDLED;
2948 #endif
2949
2950         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2951
2952         return IRQ_HANDLED;
2953 }
2954
2955 /* end of slow path */
2956
2957 /* Statistics */
2958
2959 /****************************************************************************
2960 * Macros
2961 ****************************************************************************/
2962
2963 /* sum[hi:lo] += add[hi:lo] */
2964 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2965         do { \
2966                 s_lo += a_lo; \
2967                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2968         } while (0)
2969
2970 /* difference = minuend - subtrahend */
2971 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2972         do { \
2973                 if (m_lo < s_lo) { \
2974                         /* underflow */ \
2975                         d_hi = m_hi - s_hi; \
2976                         if (d_hi > 0) { \
2977                                 /* we can 'loan' 1 */ \
2978                                 d_hi--; \
2979                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2980                         } else { \
2981                                 /* m_hi <= s_hi */ \
2982                                 d_hi = 0; \
2983                                 d_lo = 0; \
2984                         } \
2985                 } else { \
2986                         /* m_lo >= s_lo */ \
2987                         if (m_hi < s_hi) { \
2988                                 d_hi = 0; \
2989                                 d_lo = 0; \
2990                         } else { \
2991                                 /* m_hi >= s_hi */ \
2992                                 d_hi = m_hi - s_hi; \
2993                                 d_lo = m_lo - s_lo; \
2994                         } \
2995                 } \
2996         } while (0)
2997
2998 #define UPDATE_STAT64(s, t) \
2999         do { \
3000                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3001                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3002                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3003                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3004                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3005                        pstats->mac_stx[1].t##_lo, diff.lo); \
3006         } while (0)
3007
3008 #define UPDATE_STAT64_NIG(s, t) \
3009         do { \
3010                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3011                         diff.lo, new->s##_lo, old->s##_lo); \
3012                 ADD_64(estats->t##_hi, diff.hi, \
3013                        estats->t##_lo, diff.lo); \
3014         } while (0)
3015
3016 /* sum[hi:lo] += add */
3017 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3018         do { \
3019                 s_lo += a; \
3020                 s_hi += (s_lo < a) ? 1 : 0; \
3021         } while (0)
3022
3023 #define UPDATE_EXTEND_STAT(s) \
3024         do { \
3025                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3026                               pstats->mac_stx[1].s##_lo, \
3027                               new->s); \
3028         } while (0)
3029
3030 #define UPDATE_EXTEND_TSTAT(s, t) \
3031         do { \
3032                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3033                 old_tclient->s = tclient->s; \
3034                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035         } while (0)
3036
3037 #define UPDATE_EXTEND_USTAT(s, t) \
3038         do { \
3039                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3040                 old_uclient->s = uclient->s; \
3041                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042         } while (0)
3043
3044 #define UPDATE_EXTEND_XSTAT(s, t) \
3045         do { \
3046                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3047                 old_xclient->s = xclient->s; \
3048                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3049         } while (0)
3050
3051 /* minuend -= subtrahend */
3052 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3053         do { \
3054                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3055         } while (0)
3056
3057 /* minuend[hi:lo] -= subtrahend */
3058 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3059         do { \
3060                 SUB_64(m_hi, 0, m_lo, s); \
3061         } while (0)
3062
3063 #define SUB_EXTEND_USTAT(s, t) \
3064         do { \
3065                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3066                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067         } while (0)
3068
3069 /*
3070  * General service functions
3071  */
3072
3073 static inline long bnx2x_hilo(u32 *hiref)
3074 {
3075         u32 lo = *(hiref + 1);
3076 #if (BITS_PER_LONG == 64)
3077         u32 hi = *hiref;
3078
3079         return HILO_U64(hi, lo);
3080 #else
3081         return lo;
3082 #endif
3083 }
3084
3085 /*
3086  * Init service functions
3087  */
3088
3089 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3090 {
3091         if (!bp->stats_pending) {
3092                 struct eth_query_ramrod_data ramrod_data = {0};
3093                 int i, rc;
3094
3095                 ramrod_data.drv_counter = bp->stats_counter++;
3096                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3097                 for_each_queue(bp, i)
3098                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3099
3100                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3101                                    ((u32 *)&ramrod_data)[1],
3102                                    ((u32 *)&ramrod_data)[0], 0);
3103                 if (rc == 0) {
3104                         /* stats ramrod has it's own slot on the spq */
3105                         bp->spq_left++;
3106                         bp->stats_pending = 1;
3107                 }
3108         }
3109 }
3110
3111 static void bnx2x_stats_init(struct bnx2x *bp)
3112 {
3113         int port = BP_PORT(bp);
3114         int i;
3115
3116         bp->stats_pending = 0;
3117         bp->executer_idx = 0;
3118         bp->stats_counter = 0;
3119
3120         /* port stats */
3121         if (!BP_NOMCP(bp))
3122                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3123         else
3124                 bp->port.port_stx = 0;
3125         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3126
3127         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3128         bp->port.old_nig_stats.brb_discard =
3129                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3130         bp->port.old_nig_stats.brb_truncate =
3131                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3132         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3133                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3134         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3135                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3136
3137         /* function stats */
3138         for_each_queue(bp, i) {
3139                 struct bnx2x_fastpath *fp = &bp->fp[i];
3140
3141                 memset(&fp->old_tclient, 0,
3142                        sizeof(struct tstorm_per_client_stats));
3143                 memset(&fp->old_uclient, 0,
3144                        sizeof(struct ustorm_per_client_stats));
3145                 memset(&fp->old_xclient, 0,
3146                        sizeof(struct xstorm_per_client_stats));
3147                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3148         }
3149
3150         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3151         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3152
3153         bp->stats_state = STATS_STATE_DISABLED;
3154         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3155                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3156 }
3157
3158 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3159 {
3160         struct dmae_command *dmae = &bp->stats_dmae;
3161         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162
3163         *stats_comp = DMAE_COMP_VAL;
3164         if (CHIP_REV_IS_SLOW(bp))
3165                 return;
3166
3167         /* loader */
3168         if (bp->executer_idx) {
3169                 int loader_idx = PMF_DMAE_C(bp);
3170
3171                 memset(dmae, 0, sizeof(struct dmae_command));
3172
3173                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3174                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3175                                 DMAE_CMD_DST_RESET |
3176 #ifdef __BIG_ENDIAN
3177                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3178 #else
3179                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3180 #endif
3181                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3182                                                DMAE_CMD_PORT_0) |
3183                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3184                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3185                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3186                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3187                                      sizeof(struct dmae_command) *
3188                                      (loader_idx + 1)) >> 2;
3189                 dmae->dst_addr_hi = 0;
3190                 dmae->len = sizeof(struct dmae_command) >> 2;
3191                 if (CHIP_IS_E1(bp))
3192                         dmae->len--;
3193                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3194                 dmae->comp_addr_hi = 0;
3195                 dmae->comp_val = 1;
3196
3197                 *stats_comp = 0;
3198                 bnx2x_post_dmae(bp, dmae, loader_idx);
3199
3200         } else if (bp->func_stx) {
3201                 *stats_comp = 0;
3202                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3203         }
3204 }
3205
3206 static int bnx2x_stats_comp(struct bnx2x *bp)
3207 {
3208         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209         int cnt = 10;
3210
3211         might_sleep();
3212         while (*stats_comp != DMAE_COMP_VAL) {
3213                 if (!cnt) {
3214                         BNX2X_ERR("timeout waiting for stats finished\n");
3215                         break;
3216                 }
3217                 cnt--;
3218                 msleep(1);
3219         }
3220         return 1;
3221 }
3222
3223 /*
3224  * Statistics service functions
3225  */
3226
3227 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3228 {
3229         struct dmae_command *dmae;
3230         u32 opcode;
3231         int loader_idx = PMF_DMAE_C(bp);
3232         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3233
3234         /* sanity */
3235         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3236                 BNX2X_ERR("BUG!\n");
3237                 return;
3238         }
3239
3240         bp->executer_idx = 0;
3241
3242         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243                   DMAE_CMD_C_ENABLE |
3244                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245 #ifdef __BIG_ENDIAN
3246                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247 #else
3248                   DMAE_CMD_ENDIANITY_DW_SWAP |
3249 #endif
3250                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3252
3253         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3255         dmae->src_addr_lo = bp->port.port_stx >> 2;
3256         dmae->src_addr_hi = 0;
3257         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3258         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3259         dmae->len = DMAE_LEN32_RD_MAX;
3260         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3261         dmae->comp_addr_hi = 0;
3262         dmae->comp_val = 1;
3263
3264         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3266         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3267         dmae->src_addr_hi = 0;
3268         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3269                                    DMAE_LEN32_RD_MAX * 4);
3270         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3271                                    DMAE_LEN32_RD_MAX * 4);
3272         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3273         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3274         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3275         dmae->comp_val = DMAE_COMP_VAL;
3276
3277         *stats_comp = 0;
3278         bnx2x_hw_stats_post(bp);
3279         bnx2x_stats_comp(bp);
3280 }
3281
3282 static void bnx2x_port_stats_init(struct bnx2x *bp)
3283 {
3284         struct dmae_command *dmae;
3285         int port = BP_PORT(bp);
3286         int vn = BP_E1HVN(bp);
3287         u32 opcode;
3288         int loader_idx = PMF_DMAE_C(bp);
3289         u32 mac_addr;
3290         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3291
3292         /* sanity */
3293         if (!bp->link_vars.link_up || !bp->port.pmf) {
3294                 BNX2X_ERR("BUG!\n");
3295                 return;
3296         }
3297
3298         bp->executer_idx = 0;
3299
3300         /* MCP */
3301         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3302                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 #ifdef __BIG_ENDIAN
3305                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 #else
3307                   DMAE_CMD_ENDIANITY_DW_SWAP |
3308 #endif
3309                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310                   (vn << DMAE_CMD_E1HVN_SHIFT));
3311
3312         if (bp->port.port_stx) {
3313
3314                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3315                 dmae->opcode = opcode;
3316                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3319                 dmae->dst_addr_hi = 0;
3320                 dmae->len = sizeof(struct host_port_stats) >> 2;
3321                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322                 dmae->comp_addr_hi = 0;
3323                 dmae->comp_val = 1;
3324         }
3325
3326         if (bp->func_stx) {
3327
3328                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329                 dmae->opcode = opcode;
3330                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3331                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3332                 dmae->dst_addr_lo = bp->func_stx >> 2;
3333                 dmae->dst_addr_hi = 0;
3334                 dmae->len = sizeof(struct host_func_stats) >> 2;
3335                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336                 dmae->comp_addr_hi = 0;
3337                 dmae->comp_val = 1;
3338         }
3339
3340         /* MAC */
3341         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3342                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3343                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3344 #ifdef __BIG_ENDIAN
3345                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3346 #else
3347                   DMAE_CMD_ENDIANITY_DW_SWAP |
3348 #endif
3349                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3350                   (vn << DMAE_CMD_E1HVN_SHIFT));
3351
3352         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3353
3354                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3355                                    NIG_REG_INGRESS_BMAC0_MEM);
3356
3357                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3358                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3359                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360                 dmae->opcode = opcode;
3361                 dmae->src_addr_lo = (mac_addr +
3362                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3363                 dmae->src_addr_hi = 0;
3364                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3367                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3368                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369                 dmae->comp_addr_hi = 0;
3370                 dmae->comp_val = 1;
3371
3372                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3373                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3374                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3375                 dmae->opcode = opcode;
3376                 dmae->src_addr_lo = (mac_addr +
3377                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378                 dmae->src_addr_hi = 0;
3379                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3380                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3381                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3382                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3383                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3384                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3385                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3386                 dmae->comp_addr_hi = 0;
3387                 dmae->comp_val = 1;
3388
3389         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3390
3391                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3392
3393                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3394                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395                 dmae->opcode = opcode;
3396                 dmae->src_addr_lo = (mac_addr +
3397                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3398                 dmae->src_addr_hi = 0;
3399                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3400                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3401                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3402                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3403                 dmae->comp_addr_hi = 0;
3404                 dmae->comp_val = 1;
3405
3406                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3407                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3408                 dmae->opcode = opcode;
3409                 dmae->src_addr_lo = (mac_addr +
3410                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3411                 dmae->src_addr_hi = 0;
3412                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3413                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3414                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3415                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3416                 dmae->len = 1;
3417                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418                 dmae->comp_addr_hi = 0;
3419                 dmae->comp_val = 1;
3420
3421                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3422                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3423                 dmae->opcode = opcode;
3424                 dmae->src_addr_lo = (mac_addr +
3425                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3426                 dmae->src_addr_hi = 0;
3427                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3428                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3429                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3430                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3431                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3432                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3433                 dmae->comp_addr_hi = 0;
3434                 dmae->comp_val = 1;
3435         }
3436
3437         /* NIG */
3438         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439         dmae->opcode = opcode;
3440         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3441                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3442         dmae->src_addr_hi = 0;
3443         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3444         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3445         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3446         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3447         dmae->comp_addr_hi = 0;
3448         dmae->comp_val = 1;
3449
3450         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3451         dmae->opcode = opcode;
3452         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3453                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3454         dmae->src_addr_hi = 0;
3455         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3456                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3457         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3458                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3459         dmae->len = (2*sizeof(u32)) >> 2;
3460         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3461         dmae->comp_addr_hi = 0;
3462         dmae->comp_val = 1;
3463
3464         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3465         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3466                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3467                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468 #ifdef __BIG_ENDIAN
3469                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 #else
3471                         DMAE_CMD_ENDIANITY_DW_SWAP |
3472 #endif
3473                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3474                         (vn << DMAE_CMD_E1HVN_SHIFT));
3475         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3476                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3477         dmae->src_addr_hi = 0;
3478         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3479                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3480         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3481                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3482         dmae->len = (2*sizeof(u32)) >> 2;
3483         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485         dmae->comp_val = DMAE_COMP_VAL;
3486
3487         *stats_comp = 0;
3488 }
3489
3490 static void bnx2x_func_stats_init(struct bnx2x *bp)
3491 {
3492         struct dmae_command *dmae = &bp->stats_dmae;
3493         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3494
3495         /* sanity */
3496         if (!bp->func_stx) {
3497                 BNX2X_ERR("BUG!\n");
3498                 return;
3499         }
3500
3501         bp->executer_idx = 0;
3502         memset(dmae, 0, sizeof(struct dmae_command));
3503
3504         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3505                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3506                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3507 #ifdef __BIG_ENDIAN
3508                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3509 #else
3510                         DMAE_CMD_ENDIANITY_DW_SWAP |
3511 #endif
3512                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3513                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3514         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3515         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3516         dmae->dst_addr_lo = bp->func_stx >> 2;
3517         dmae->dst_addr_hi = 0;
3518         dmae->len = sizeof(struct host_func_stats) >> 2;
3519         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3520         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3521         dmae->comp_val = DMAE_COMP_VAL;
3522
3523         *stats_comp = 0;
3524 }
3525
3526 static void bnx2x_stats_start(struct bnx2x *bp)
3527 {
3528         if (bp->port.pmf)
3529                 bnx2x_port_stats_init(bp);
3530
3531         else if (bp->func_stx)
3532                 bnx2x_func_stats_init(bp);
3533
3534         bnx2x_hw_stats_post(bp);
3535         bnx2x_storm_stats_post(bp);
3536 }
3537
3538 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3539 {
3540         bnx2x_stats_comp(bp);
3541         bnx2x_stats_pmf_update(bp);
3542         bnx2x_stats_start(bp);
3543 }
3544
3545 static void bnx2x_stats_restart(struct bnx2x *bp)
3546 {
3547         bnx2x_stats_comp(bp);
3548         bnx2x_stats_start(bp);
3549 }
3550
3551 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3552 {
3553         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3554         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3555         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3556         struct {
3557                 u32 lo;
3558                 u32 hi;
3559         } diff;
3560
3561         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3562         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3563         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3564         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3565         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3566         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3567         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3568         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3569         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3570         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3571         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3572         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3573         UPDATE_STAT64(tx_stat_gt127,
3574                                 tx_stat_etherstatspkts65octetsto127octets);
3575         UPDATE_STAT64(tx_stat_gt255,
3576                                 tx_stat_etherstatspkts128octetsto255octets);
3577         UPDATE_STAT64(tx_stat_gt511,
3578                                 tx_stat_etherstatspkts256octetsto511octets);
3579         UPDATE_STAT64(tx_stat_gt1023,
3580                                 tx_stat_etherstatspkts512octetsto1023octets);
3581         UPDATE_STAT64(tx_stat_gt1518,
3582                                 tx_stat_etherstatspkts1024octetsto1522octets);
3583         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3584         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3585         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3586         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3587         UPDATE_STAT64(tx_stat_gterr,
3588                                 tx_stat_dot3statsinternalmactransmiterrors);
3589         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3590
3591         estats->pause_frames_received_hi =
3592                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3593         estats->pause_frames_received_lo =
3594                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3595
3596         estats->pause_frames_sent_hi =
3597                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3598         estats->pause_frames_sent_lo =
3599                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3600 }
3601
3602 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3603 {
3604         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3605         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3606         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607
3608         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3609         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3610         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3611         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3612         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3613         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3614         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3615         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3616         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3617         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3618         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3619         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3620         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3621         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3622         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3623         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3624         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3626         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3627         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3628         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3629         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3630         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3631         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3632         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3633         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3634         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3635         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3636         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3637         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3638         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3639
3640         estats->pause_frames_received_hi =
3641                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3642         estats->pause_frames_received_lo =
3643                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3644         ADD_64(estats->pause_frames_received_hi,
3645                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3646                estats->pause_frames_received_lo,
3647                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3648
3649         estats->pause_frames_sent_hi =
3650                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3651         estats->pause_frames_sent_lo =
3652                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3653         ADD_64(estats->pause_frames_sent_hi,
3654                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3655                estats->pause_frames_sent_lo,
3656                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3657 }
3658
3659 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3660 {
3661         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3662         struct nig_stats *old = &(bp->port.old_nig_stats);
3663         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3664         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3665         struct {
3666                 u32 lo;
3667                 u32 hi;
3668         } diff;
3669         u32 nig_timer_max;
3670
3671         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3672                 bnx2x_bmac_stats_update(bp);
3673
3674         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3675                 bnx2x_emac_stats_update(bp);
3676
3677         else { /* unreached */
3678                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3679                 return -1;
3680         }
3681
3682         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3683                       new->brb_discard - old->brb_discard);
3684         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3685                       new->brb_truncate - old->brb_truncate);
3686
3687         UPDATE_STAT64_NIG(egress_mac_pkt0,
3688                                         etherstatspkts1024octetsto1522octets);
3689         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3690
3691         memcpy(old, new, sizeof(struct nig_stats));
3692
3693         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3694                sizeof(struct mac_stx));
3695         estats->brb_drop_hi = pstats->brb_drop_hi;
3696         estats->brb_drop_lo = pstats->brb_drop_lo;
3697
3698         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3699
3700         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3701         if (nig_timer_max != estats->nig_timer_max) {
3702                 estats->nig_timer_max = nig_timer_max;
3703                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3704         }
3705
3706         return 0;
3707 }
3708
3709 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3710 {
3711         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3712         struct tstorm_per_port_stats *tport =
3713                                         &stats->tstorm_common.port_statistics;
3714         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3715         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3716         int i;
3717
3718         memset(&(fstats->total_bytes_received_hi), 0,
3719                sizeof(struct host_func_stats) - 2*sizeof(u32));
3720         estats->error_bytes_received_hi = 0;
3721         estats->error_bytes_received_lo = 0;
3722         estats->etherstatsoverrsizepkts_hi = 0;
3723         estats->etherstatsoverrsizepkts_lo = 0;
3724         estats->no_buff_discard_hi = 0;
3725         estats->no_buff_discard_lo = 0;
3726
3727         for_each_queue(bp, i) {
3728                 struct bnx2x_fastpath *fp = &bp->fp[i];
3729                 int cl_id = fp->cl_id;
3730                 struct tstorm_per_client_stats *tclient =
3731                                 &stats->tstorm_common.client_statistics[cl_id];
3732                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3733                 struct ustorm_per_client_stats *uclient =
3734                                 &stats->ustorm_common.client_statistics[cl_id];
3735                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3736                 struct xstorm_per_client_stats *xclient =
3737                                 &stats->xstorm_common.client_statistics[cl_id];
3738                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3739                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3740                 u32 diff;
3741
3742                 /* are storm stats valid? */
3743                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3744                                                         bp->stats_counter) {
3745                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3746                            "  xstorm counter (%d) != stats_counter (%d)\n",
3747                            i, xclient->stats_counter, bp->stats_counter);
3748                         return -1;
3749                 }
3750                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3751                                                         bp->stats_counter) {
3752                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3753                            "  tstorm counter (%d) != stats_counter (%d)\n",
3754                            i, tclient->stats_counter, bp->stats_counter);
3755                         return -2;
3756                 }
3757                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3758                                                         bp->stats_counter) {
3759                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3760                            "  ustorm counter (%d) != stats_counter (%d)\n",
3761                            i, uclient->stats_counter, bp->stats_counter);
3762                         return -4;
3763                 }
3764
3765                 qstats->total_bytes_received_hi =
3766                 qstats->valid_bytes_received_hi =
3767                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3768                 qstats->total_bytes_received_lo =
3769                 qstats->valid_bytes_received_lo =
3770                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3771
3772                 qstats->error_bytes_received_hi =
3773                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3774                 qstats->error_bytes_received_lo =
3775                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3776
3777                 ADD_64(qstats->total_bytes_received_hi,
3778                        qstats->error_bytes_received_hi,
3779                        qstats->total_bytes_received_lo,
3780                        qstats->error_bytes_received_lo);
3781
3782                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3783                                         total_unicast_packets_received);
3784                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3785                                         total_multicast_packets_received);
3786                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3787                                         total_broadcast_packets_received);
3788                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3789                                         etherstatsoverrsizepkts);
3790                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3791
3792                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3793                                         total_unicast_packets_received);
3794                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3795                                         total_multicast_packets_received);
3796                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3797                                         total_broadcast_packets_received);
3798                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3799                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3800                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3801
3802                 qstats->total_bytes_transmitted_hi =
3803                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3804                 qstats->total_bytes_transmitted_lo =
3805                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3806
3807                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3808                                         total_unicast_packets_transmitted);
3809                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3810                                         total_multicast_packets_transmitted);
3811                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3812                                         total_broadcast_packets_transmitted);
3813
3814                 old_tclient->checksum_discard = tclient->checksum_discard;
3815                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3816
3817                 ADD_64(fstats->total_bytes_received_hi,
3818                        qstats->total_bytes_received_hi,
3819                        fstats->total_bytes_received_lo,
3820                        qstats->total_bytes_received_lo);
3821                 ADD_64(fstats->total_bytes_transmitted_hi,
3822                        qstats->total_bytes_transmitted_hi,
3823                        fstats->total_bytes_transmitted_lo,
3824                        qstats->total_bytes_transmitted_lo);
3825                 ADD_64(fstats->total_unicast_packets_received_hi,
3826                        qstats->total_unicast_packets_received_hi,
3827                        fstats->total_unicast_packets_received_lo,
3828                        qstats->total_unicast_packets_received_lo);
3829                 ADD_64(fstats->total_multicast_packets_received_hi,
3830                        qstats->total_multicast_packets_received_hi,
3831                        fstats->total_multicast_packets_received_lo,
3832                        qstats->total_multicast_packets_received_lo);
3833                 ADD_64(fstats->total_broadcast_packets_received_hi,
3834                        qstats->total_broadcast_packets_received_hi,
3835                        fstats->total_broadcast_packets_received_lo,
3836                        qstats->total_broadcast_packets_received_lo);
3837                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3838                        qstats->total_unicast_packets_transmitted_hi,
3839                        fstats->total_unicast_packets_transmitted_lo,
3840                        qstats->total_unicast_packets_transmitted_lo);
3841                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3842                        qstats->total_multicast_packets_transmitted_hi,
3843                        fstats->total_multicast_packets_transmitted_lo,
3844                        qstats->total_multicast_packets_transmitted_lo);
3845                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3846                        qstats->total_broadcast_packets_transmitted_hi,
3847                        fstats->total_broadcast_packets_transmitted_lo,
3848                        qstats->total_broadcast_packets_transmitted_lo);
3849                 ADD_64(fstats->valid_bytes_received_hi,
3850                        qstats->valid_bytes_received_hi,
3851                        fstats->valid_bytes_received_lo,
3852                        qstats->valid_bytes_received_lo);
3853
3854                 ADD_64(estats->error_bytes_received_hi,
3855                        qstats->error_bytes_received_hi,
3856                        estats->error_bytes_received_lo,
3857                        qstats->error_bytes_received_lo);
3858                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3859                        qstats->etherstatsoverrsizepkts_hi,
3860                        estats->etherstatsoverrsizepkts_lo,
3861                        qstats->etherstatsoverrsizepkts_lo);
3862                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3863                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3864         }
3865
3866         ADD_64(fstats->total_bytes_received_hi,
3867                estats->rx_stat_ifhcinbadoctets_hi,
3868                fstats->total_bytes_received_lo,
3869                estats->rx_stat_ifhcinbadoctets_lo);
3870
3871         memcpy(estats, &(fstats->total_bytes_received_hi),
3872                sizeof(struct host_func_stats) - 2*sizeof(u32));
3873
3874         ADD_64(estats->etherstatsoverrsizepkts_hi,
3875                estats->rx_stat_dot3statsframestoolong_hi,
3876                estats->etherstatsoverrsizepkts_lo,
3877                estats->rx_stat_dot3statsframestoolong_lo);
3878         ADD_64(estats->error_bytes_received_hi,
3879                estats->rx_stat_ifhcinbadoctets_hi,
3880                estats->error_bytes_received_lo,
3881                estats->rx_stat_ifhcinbadoctets_lo);
3882
3883         if (bp->port.pmf) {
3884                 estats->mac_filter_discard =
3885                                 le32_to_cpu(tport->mac_filter_discard);
3886                 estats->xxoverflow_discard =
3887                                 le32_to_cpu(tport->xxoverflow_discard);
3888                 estats->brb_truncate_discard =
3889                                 le32_to_cpu(tport->brb_truncate_discard);
3890                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3891         }
3892
3893         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3894
3895         bp->stats_pending = 0;
3896
3897         return 0;
3898 }
3899
3900 static void bnx2x_net_stats_update(struct bnx2x *bp)
3901 {
3902         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3903         struct net_device_stats *nstats = &bp->dev->stats;
3904         int i;
3905
3906         nstats->rx_packets =
3907                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3908                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3909                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3910
3911         nstats->tx_packets =
3912                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3913                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3914                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3915
3916         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3917
3918         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3919
3920         nstats->rx_dropped = estats->mac_discard;
3921         for_each_queue(bp, i)
3922                 nstats->rx_dropped +=
3923                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3924
3925         nstats->tx_dropped = 0;
3926
3927         nstats->multicast =
3928                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3929
3930         nstats->collisions =
3931                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3932
3933         nstats->rx_length_errors =
3934                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3935                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3936         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3937                                  bnx2x_hilo(&estats->brb_truncate_hi);
3938         nstats->rx_crc_errors =
3939                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3940         nstats->rx_frame_errors =
3941                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3942         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3943         nstats->rx_missed_errors = estats->xxoverflow_discard;
3944
3945         nstats->rx_errors = nstats->rx_length_errors +
3946                             nstats->rx_over_errors +
3947                             nstats->rx_crc_errors +
3948                             nstats->rx_frame_errors +
3949                             nstats->rx_fifo_errors +
3950                             nstats->rx_missed_errors;
3951
3952         nstats->tx_aborted_errors =
3953                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3954                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3955         nstats->tx_carrier_errors =
3956                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3957         nstats->tx_fifo_errors = 0;
3958         nstats->tx_heartbeat_errors = 0;
3959         nstats->tx_window_errors = 0;
3960
3961         nstats->tx_errors = nstats->tx_aborted_errors +
3962                             nstats->tx_carrier_errors +
3963             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3964 }
3965
3966 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3967 {
3968         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3969         int i;
3970
3971         estats->driver_xoff = 0;
3972         estats->rx_err_discard_pkt = 0;
3973         estats->rx_skb_alloc_failed = 0;
3974         estats->hw_csum_err = 0;
3975         for_each_queue(bp, i) {
3976                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3977
3978                 estats->driver_xoff += qstats->driver_xoff;
3979                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3980                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3981                 estats->hw_csum_err += qstats->hw_csum_err;
3982         }
3983 }
3984
3985 static void bnx2x_stats_update(struct bnx2x *bp)
3986 {
3987         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3988
3989         if (*stats_comp != DMAE_COMP_VAL)
3990                 return;
3991
3992         if (bp->port.pmf)
3993                 bnx2x_hw_stats_update(bp);
3994
3995         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3996                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3997                 bnx2x_panic();
3998                 return;
3999         }
4000
4001         bnx2x_net_stats_update(bp);
4002         bnx2x_drv_stats_update(bp);
4003
4004         if (bp->msglevel & NETIF_MSG_TIMER) {
4005                 struct tstorm_per_client_stats *old_tclient =
4006                                                         &bp->fp->old_tclient;
4007                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4008                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4009                 struct net_device_stats *nstats = &bp->dev->stats;
4010                 int i;
4011
4012                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4013                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4014                                   "  tx pkt (%lx)\n",
4015                        bnx2x_tx_avail(bp->fp),
4016                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4017                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4018                                   "  rx pkt (%lx)\n",
4019                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4020                              bp->fp->rx_comp_cons),
4021                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4022                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4023                                   "brb truncate %u\n",
4024                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4025                        qstats->driver_xoff,
4026                        estats->brb_drop_lo, estats->brb_truncate_lo);
4027                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4028                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4029                         "mac_discard %u  mac_filter_discard %u  "
4030                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4031                         "ttl0_discard %u\n",
4032                        le32_to_cpu(old_tclient->checksum_discard),
4033                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4034                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4035                        estats->mac_discard, estats->mac_filter_discard,
4036                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4037                        le32_to_cpu(old_tclient->ttl0_discard));
4038
4039                 for_each_queue(bp, i) {
4040                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4041                                bnx2x_fp(bp, i, tx_pkt),
4042                                bnx2x_fp(bp, i, rx_pkt),
4043                                bnx2x_fp(bp, i, rx_calls));
4044                 }
4045         }
4046
4047         bnx2x_hw_stats_post(bp);
4048         bnx2x_storm_stats_post(bp);
4049 }
4050
4051 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4052 {
4053         struct dmae_command *dmae;
4054         u32 opcode;
4055         int loader_idx = PMF_DMAE_C(bp);
4056         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4057
4058         bp->executer_idx = 0;
4059
4060         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4061                   DMAE_CMD_C_ENABLE |
4062                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4063 #ifdef __BIG_ENDIAN
4064                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4065 #else
4066                   DMAE_CMD_ENDIANITY_DW_SWAP |
4067 #endif
4068                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4069                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4070
4071         if (bp->port.port_stx) {
4072
4073                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4074                 if (bp->func_stx)
4075                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4076                 else
4077                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4078                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4079                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4080                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4081                 dmae->dst_addr_hi = 0;
4082                 dmae->len = sizeof(struct host_port_stats) >> 2;
4083                 if (bp->func_stx) {
4084                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4085                         dmae->comp_addr_hi = 0;
4086                         dmae->comp_val = 1;
4087                 } else {
4088                         dmae->comp_addr_lo =
4089                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4090                         dmae->comp_addr_hi =
4091                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4092                         dmae->comp_val = DMAE_COMP_VAL;
4093
4094                         *stats_comp = 0;
4095                 }
4096         }
4097
4098         if (bp->func_stx) {
4099
4100                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4102                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4103                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4104                 dmae->dst_addr_lo = bp->func_stx >> 2;
4105                 dmae->dst_addr_hi = 0;
4106                 dmae->len = sizeof(struct host_func_stats) >> 2;
4107                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4109                 dmae->comp_val = DMAE_COMP_VAL;
4110
4111                 *stats_comp = 0;
4112         }
4113 }
4114
4115 static void bnx2x_stats_stop(struct bnx2x *bp)
4116 {
4117         int update = 0;
4118
4119         bnx2x_stats_comp(bp);
4120
4121         if (bp->port.pmf)
4122                 update = (bnx2x_hw_stats_update(bp) == 0);
4123
4124         update |= (bnx2x_storm_stats_update(bp) == 0);
4125
4126         if (update) {
4127                 bnx2x_net_stats_update(bp);
4128
4129                 if (bp->port.pmf)
4130                         bnx2x_port_stats_stop(bp);
4131
4132                 bnx2x_hw_stats_post(bp);
4133                 bnx2x_stats_comp(bp);
4134         }
4135 }
4136
4137 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4138 {
4139 }
4140
4141 static const struct {
4142         void (*action)(struct bnx2x *bp);
4143         enum bnx2x_stats_state next_state;
4144 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4145 /* state        event   */
4146 {
4147 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4148 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4149 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4150 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4151 },
4152 {
4153 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4154 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4155 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4156 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4157 }
4158 };
4159
4160 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4161 {
4162         enum bnx2x_stats_state state = bp->stats_state;
4163
4164         bnx2x_stats_stm[state][event].action(bp);
4165         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4166
4167         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4168                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4169                    state, event, bp->stats_state);
4170 }
4171
4172 static void bnx2x_timer(unsigned long data)
4173 {
4174         struct bnx2x *bp = (struct bnx2x *) data;
4175
4176         if (!netif_running(bp->dev))
4177                 return;
4178
4179         if (atomic_read(&bp->intr_sem) != 0)
4180                 goto timer_restart;
4181
4182         if (poll) {
4183                 struct bnx2x_fastpath *fp = &bp->fp[0];
4184                 int rc;
4185
4186                 bnx2x_tx_int(fp);
4187                 rc = bnx2x_rx_int(fp, 1000);
4188         }
4189
4190         if (!BP_NOMCP(bp)) {
4191                 int func = BP_FUNC(bp);
4192                 u32 drv_pulse;
4193                 u32 mcp_pulse;
4194
4195                 ++bp->fw_drv_pulse_wr_seq;
4196                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4197                 /* TBD - add SYSTEM_TIME */
4198                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4199                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4200
4201                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4202                              MCP_PULSE_SEQ_MASK);
4203                 /* The delta between driver pulse and mcp response
4204                  * should be 1 (before mcp response) or 0 (after mcp response)
4205                  */
4206                 if ((drv_pulse != mcp_pulse) &&
4207                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4208                         /* someone lost a heartbeat... */
4209                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4210                                   drv_pulse, mcp_pulse);
4211                 }
4212         }
4213
4214         if ((bp->state == BNX2X_STATE_OPEN) ||
4215             (bp->state == BNX2X_STATE_DISABLED))
4216                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4217
4218 timer_restart:
4219         mod_timer(&bp->timer, jiffies + bp->current_interval);
4220 }
4221
4222 /* end of Statistics */
4223
4224 /* nic init */
4225
4226 /*
4227  * nic init service functions
4228  */
4229
4230 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4231 {
4232         int port = BP_PORT(bp);
4233
4234         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4235                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4236                         sizeof(struct ustorm_status_block)/4);
4237         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4238                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4239                         sizeof(struct cstorm_status_block)/4);
4240 }
4241
4242 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4243                           dma_addr_t mapping, int sb_id)
4244 {
4245         int port = BP_PORT(bp);
4246         int func = BP_FUNC(bp);
4247         int index;
4248         u64 section;
4249
4250         /* USTORM */
4251         section = ((u64)mapping) + offsetof(struct host_status_block,
4252                                             u_status_block);
4253         sb->u_status_block.status_block_id = sb_id;
4254
4255         REG_WR(bp, BAR_USTRORM_INTMEM +
4256                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4257         REG_WR(bp, BAR_USTRORM_INTMEM +
4258                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4259                U64_HI(section));
4260         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4261                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4262
4263         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4264                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4265                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4266
4267         /* CSTORM */
4268         section = ((u64)mapping) + offsetof(struct host_status_block,
4269                                             c_status_block);
4270         sb->c_status_block.status_block_id = sb_id;
4271
4272         REG_WR(bp, BAR_CSTRORM_INTMEM +
4273                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4274         REG_WR(bp, BAR_CSTRORM_INTMEM +
4275                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4276                U64_HI(section));
4277         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4278                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4279
4280         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4281                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4282                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4283
4284         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4285 }
4286
4287 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4288 {
4289         int func = BP_FUNC(bp);
4290
4291         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4292                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293                         sizeof(struct tstorm_def_status_block)/4);
4294         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4295                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296                         sizeof(struct ustorm_def_status_block)/4);
4297         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4298                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4299                         sizeof(struct cstorm_def_status_block)/4);
4300         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4301                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4302                         sizeof(struct xstorm_def_status_block)/4);
4303 }
4304
4305 static void bnx2x_init_def_sb(struct bnx2x *bp,
4306                               struct host_def_status_block *def_sb,
4307                               dma_addr_t mapping, int sb_id)
4308 {
4309         int port = BP_PORT(bp);
4310         int func = BP_FUNC(bp);
4311         int index, val, reg_offset;
4312         u64 section;
4313
4314         /* ATTN */
4315         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4316                                             atten_status_block);
4317         def_sb->atten_status_block.status_block_id = sb_id;
4318
4319         bp->attn_state = 0;
4320
4321         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4322                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4323
4324         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4325                 bp->attn_group[index].sig[0] = REG_RD(bp,
4326                                                      reg_offset + 0x10*index);
4327                 bp->attn_group[index].sig[1] = REG_RD(bp,
4328                                                reg_offset + 0x4 + 0x10*index);
4329                 bp->attn_group[index].sig[2] = REG_RD(bp,
4330                                                reg_offset + 0x8 + 0x10*index);
4331                 bp->attn_group[index].sig[3] = REG_RD(bp,
4332                                                reg_offset + 0xc + 0x10*index);
4333         }
4334
4335         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4336                              HC_REG_ATTN_MSG0_ADDR_L);
4337
4338         REG_WR(bp, reg_offset, U64_LO(section));
4339         REG_WR(bp, reg_offset + 4, U64_HI(section));
4340
4341         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4342
4343         val = REG_RD(bp, reg_offset);
4344         val |= sb_id;
4345         REG_WR(bp, reg_offset, val);
4346
4347         /* USTORM */
4348         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4349                                             u_def_status_block);
4350         def_sb->u_def_status_block.status_block_id = sb_id;
4351
4352         REG_WR(bp, BAR_USTRORM_INTMEM +
4353                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4354         REG_WR(bp, BAR_USTRORM_INTMEM +
4355                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4356                U64_HI(section));
4357         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4358                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4359
4360         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4361                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4362                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4363
4364         /* CSTORM */
4365         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4366                                             c_def_status_block);
4367         def_sb->c_def_status_block.status_block_id = sb_id;
4368
4369         REG_WR(bp, BAR_CSTRORM_INTMEM +
4370                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4371         REG_WR(bp, BAR_CSTRORM_INTMEM +
4372                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4373                U64_HI(section));
4374         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4375                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4376
4377         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4378                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4379                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4380
4381         /* TSTORM */
4382         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4383                                             t_def_status_block);
4384         def_sb->t_def_status_block.status_block_id = sb_id;
4385
4386         REG_WR(bp, BAR_TSTRORM_INTMEM +
4387                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4388         REG_WR(bp, BAR_TSTRORM_INTMEM +
4389                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4390                U64_HI(section));
4391         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4392                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4393
4394         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4395                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4396                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4397
4398         /* XSTORM */
4399         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4400                                             x_def_status_block);
4401         def_sb->x_def_status_block.status_block_id = sb_id;
4402
4403         REG_WR(bp, BAR_XSTRORM_INTMEM +
4404                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4405         REG_WR(bp, BAR_XSTRORM_INTMEM +
4406                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4407                U64_HI(section));
4408         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4409                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4410
4411         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4412                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4413                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4414
4415         bp->stats_pending = 0;
4416         bp->set_mac_pending = 0;
4417
4418         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4419 }
4420
4421 static void bnx2x_update_coalesce(struct bnx2x *bp)
4422 {
4423         int port = BP_PORT(bp);
4424         int i;
4425
4426         for_each_queue(bp, i) {
4427                 int sb_id = bp->fp[i].sb_id;
4428
4429                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4430                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4431                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4432                                                     U_SB_ETH_RX_CQ_INDEX),
4433                         bp->rx_ticks/12);
4434                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4435                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4436                                                      U_SB_ETH_RX_CQ_INDEX),
4437                          bp->rx_ticks ? 0 : 1);
4438
4439                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4440                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4441                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4442                                                     C_SB_ETH_TX_CQ_INDEX),
4443                         bp->tx_ticks/12);
4444                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4445                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4446                                                      C_SB_ETH_TX_CQ_INDEX),
4447                          bp->tx_ticks ? 0 : 1);
4448         }
4449 }
4450
4451 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4452                                        struct bnx2x_fastpath *fp, int last)
4453 {
4454         int i;
4455
4456         for (i = 0; i < last; i++) {
4457                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4458                 struct sk_buff *skb = rx_buf->skb;
4459
4460                 if (skb == NULL) {
4461                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4462                         continue;
4463                 }
4464
4465                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4466                         pci_unmap_single(bp->pdev,
4467                                          pci_unmap_addr(rx_buf, mapping),
4468                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4469
4470                 dev_kfree_skb(skb);
4471                 rx_buf->skb = NULL;
4472         }
4473 }
4474
4475 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4476 {
4477         int func = BP_FUNC(bp);
4478         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4479                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4480         u16 ring_prod, cqe_ring_prod;
4481         int i, j;
4482
4483         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4484         DP(NETIF_MSG_IFUP,
4485            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4486
4487         if (bp->flags & TPA_ENABLE_FLAG) {
4488
4489                 for_each_rx_queue(bp, j) {
4490                         struct bnx2x_fastpath *fp = &bp->fp[j];
4491
4492                         for (i = 0; i < max_agg_queues; i++) {
4493                                 fp->tpa_pool[i].skb =
4494                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4495                                 if (!fp->tpa_pool[i].skb) {
4496                                         BNX2X_ERR("Failed to allocate TPA "
4497                                                   "skb pool for queue[%d] - "
4498                                                   "disabling TPA on this "
4499                                                   "queue!\n", j);
4500                                         bnx2x_free_tpa_pool(bp, fp, i);
4501                                         fp->disable_tpa = 1;
4502                                         break;
4503                                 }
4504                                 pci_unmap_addr_set((struct sw_rx_bd *)
4505                                                         &bp->fp->tpa_pool[i],
4506                                                    mapping, 0);
4507                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4508                         }
4509                 }
4510         }
4511
4512         for_each_rx_queue(bp, j) {
4513                 struct bnx2x_fastpath *fp = &bp->fp[j];
4514
4515                 fp->rx_bd_cons = 0;
4516                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4517                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4518
4519                 /* "next page" elements initialization */
4520                 /* SGE ring */
4521                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4522                         struct eth_rx_sge *sge;
4523
4524                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4525                         sge->addr_hi =
4526                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4527                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4528                         sge->addr_lo =
4529                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4530                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4531                 }
4532
4533                 bnx2x_init_sge_ring_bit_mask(fp);
4534
4535                 /* RX BD ring */
4536                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4537                         struct eth_rx_bd *rx_bd;
4538
4539                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4540                         rx_bd->addr_hi =
4541                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4542                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4543                         rx_bd->addr_lo =
4544                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4545                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4546                 }
4547
4548                 /* CQ ring */
4549                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4550                         struct eth_rx_cqe_next_page *nextpg;
4551
4552                         nextpg = (struct eth_rx_cqe_next_page *)
4553                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4554                         nextpg->addr_hi =
4555                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4556                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4557                         nextpg->addr_lo =
4558                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4559                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4560                 }
4561
4562                 /* Allocate SGEs and initialize the ring elements */
4563                 for (i = 0, ring_prod = 0;
4564                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4565
4566                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4567                                 BNX2X_ERR("was only able to allocate "
4568                                           "%d rx sges\n", i);
4569                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4570                                 /* Cleanup already allocated elements */
4571                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4572                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4573                                 fp->disable_tpa = 1;
4574                                 ring_prod = 0;
4575                                 break;
4576                         }
4577                         ring_prod = NEXT_SGE_IDX(ring_prod);
4578                 }
4579                 fp->rx_sge_prod = ring_prod;
4580
4581                 /* Allocate BDs and initialize BD ring */
4582                 fp->rx_comp_cons = 0;
4583                 cqe_ring_prod = ring_prod = 0;
4584                 for (i = 0; i < bp->rx_ring_size; i++) {
4585                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4586                                 BNX2X_ERR("was only able to allocate "
4587                                           "%d rx skbs on queue[%d]\n", i, j);
4588                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4589                                 break;
4590                         }
4591                         ring_prod = NEXT_RX_IDX(ring_prod);
4592                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4593                         WARN_ON(ring_prod <= i);
4594                 }
4595
4596                 fp->rx_bd_prod = ring_prod;
4597                 /* must not have more available CQEs than BDs */
4598                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4599                                        cqe_ring_prod);
4600                 fp->rx_pkt = fp->rx_calls = 0;
4601
4602                 /* Warning!
4603                  * this will generate an interrupt (to the TSTORM)
4604                  * must only be done after chip is initialized
4605                  */
4606                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4607                                      fp->rx_sge_prod);
4608                 if (j != 0)
4609                         continue;
4610
4611                 REG_WR(bp, BAR_USTRORM_INTMEM +
4612                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4613                        U64_LO(fp->rx_comp_mapping));
4614                 REG_WR(bp, BAR_USTRORM_INTMEM +
4615                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4616                        U64_HI(fp->rx_comp_mapping));
4617         }
4618 }
4619
4620 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4621 {
4622         int i, j;
4623
4624         for_each_tx_queue(bp, j) {
4625                 struct bnx2x_fastpath *fp = &bp->fp[j];
4626
4627                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4628                         struct eth_tx_bd *tx_bd =
4629                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4630
4631                         tx_bd->addr_hi =
4632                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4633                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4634                         tx_bd->addr_lo =
4635                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4636                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4637                 }
4638
4639                 fp->tx_pkt_prod = 0;
4640                 fp->tx_pkt_cons = 0;
4641                 fp->tx_bd_prod = 0;
4642                 fp->tx_bd_cons = 0;
4643                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4644                 fp->tx_pkt = 0;
4645         }
4646 }
4647
4648 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4649 {
4650         int func = BP_FUNC(bp);
4651
4652         spin_lock_init(&bp->spq_lock);
4653
4654         bp->spq_left = MAX_SPQ_PENDING;
4655         bp->spq_prod_idx = 0;
4656         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4657         bp->spq_prod_bd = bp->spq;
4658         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4659
4660         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4661                U64_LO(bp->spq_mapping));
4662         REG_WR(bp,
4663                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4664                U64_HI(bp->spq_mapping));
4665
4666         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4667                bp->spq_prod_idx);
4668 }
4669
4670 static void bnx2x_init_context(struct bnx2x *bp)
4671 {
4672         int i;
4673
4674         for_each_queue(bp, i) {
4675                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4676                 struct bnx2x_fastpath *fp = &bp->fp[i];
4677                 u8 cl_id = fp->cl_id;
4678                 u8 sb_id = fp->sb_id;
4679
4680                 context->ustorm_st_context.common.sb_index_numbers =
4681                                                 BNX2X_RX_SB_INDEX_NUM;
4682                 context->ustorm_st_context.common.clientId = cl_id;
4683                 context->ustorm_st_context.common.status_block_id = sb_id;
4684                 context->ustorm_st_context.common.flags =
4685                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4686                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4687                 context->ustorm_st_context.common.statistics_counter_id =
4688                                                 cl_id;
4689                 context->ustorm_st_context.common.mc_alignment_log_size =
4690                                                 BNX2X_RX_ALIGN_SHIFT;
4691                 context->ustorm_st_context.common.bd_buff_size =
4692                                                 bp->rx_buf_size;
4693                 context->ustorm_st_context.common.bd_page_base_hi =
4694                                                 U64_HI(fp->rx_desc_mapping);
4695                 context->ustorm_st_context.common.bd_page_base_lo =
4696                                                 U64_LO(fp->rx_desc_mapping);
4697                 if (!fp->disable_tpa) {
4698                         context->ustorm_st_context.common.flags |=
4699                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4700                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4701                         context->ustorm_st_context.common.sge_buff_size =
4702                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4703                                          (u32)0xffff);
4704                         context->ustorm_st_context.common.sge_page_base_hi =
4705                                                 U64_HI(fp->rx_sge_mapping);
4706                         context->ustorm_st_context.common.sge_page_base_lo =
4707                                                 U64_LO(fp->rx_sge_mapping);
4708                 }
4709
4710                 context->ustorm_ag_context.cdu_usage =
4711                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4712                                                CDU_REGION_NUMBER_UCM_AG,
4713                                                ETH_CONNECTION_TYPE);
4714
4715                 context->xstorm_st_context.tx_bd_page_base_hi =
4716                                                 U64_HI(fp->tx_desc_mapping);
4717                 context->xstorm_st_context.tx_bd_page_base_lo =
4718                                                 U64_LO(fp->tx_desc_mapping);
4719                 context->xstorm_st_context.db_data_addr_hi =
4720                                                 U64_HI(fp->tx_prods_mapping);
4721                 context->xstorm_st_context.db_data_addr_lo =
4722                                                 U64_LO(fp->tx_prods_mapping);
4723                 context->xstorm_st_context.statistics_data = (cl_id |
4724                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4725                 context->cstorm_st_context.sb_index_number =
4726                                                 C_SB_ETH_TX_CQ_INDEX;
4727                 context->cstorm_st_context.status_block_id = sb_id;
4728
4729                 context->xstorm_ag_context.cdu_reserved =
4730                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4731                                                CDU_REGION_NUMBER_XCM_AG,
4732                                                ETH_CONNECTION_TYPE);
4733         }
4734 }
4735
4736 static void bnx2x_init_ind_table(struct bnx2x *bp)
4737 {
4738         int func = BP_FUNC(bp);
4739         int i;
4740
4741         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4742                 return;
4743
4744         DP(NETIF_MSG_IFUP,
4745            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4746         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4747                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4748                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4749                         bp->fp->cl_id + (i % bp->num_rx_queues));
4750 }
4751
4752 static void bnx2x_set_client_config(struct bnx2x *bp)
4753 {
4754         struct tstorm_eth_client_config tstorm_client = {0};
4755         int port = BP_PORT(bp);
4756         int i;
4757
4758         tstorm_client.mtu = bp->dev->mtu;
4759         tstorm_client.config_flags =
4760                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4761                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4762 #ifdef BCM_VLAN
4763         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4764                 tstorm_client.config_flags |=
4765                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4766                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4767         }
4768 #endif
4769
4770         if (bp->flags & TPA_ENABLE_FLAG) {
4771                 tstorm_client.max_sges_for_packet =
4772                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4773                 tstorm_client.max_sges_for_packet =
4774                         ((tstorm_client.max_sges_for_packet +
4775                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4776                         PAGES_PER_SGE_SHIFT;
4777
4778                 tstorm_client.config_flags |=
4779                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4780         }
4781
4782         for_each_queue(bp, i) {
4783                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4784
4785                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4786                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4787                        ((u32 *)&tstorm_client)[0]);
4788                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4789                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4790                        ((u32 *)&tstorm_client)[1]);
4791         }
4792
4793         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4794            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4795 }
4796
4797 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4798 {
4799         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4800         int mode = bp->rx_mode;
4801         int mask = (1 << BP_L_ID(bp));
4802         int func = BP_FUNC(bp);
4803         int i;
4804
4805         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4806
4807         switch (mode) {
4808         case BNX2X_RX_MODE_NONE: /* no Rx */
4809                 tstorm_mac_filter.ucast_drop_all = mask;
4810                 tstorm_mac_filter.mcast_drop_all = mask;
4811                 tstorm_mac_filter.bcast_drop_all = mask;
4812                 break;
4813
4814         case BNX2X_RX_MODE_NORMAL:
4815                 tstorm_mac_filter.bcast_accept_all = mask;
4816                 break;
4817
4818         case BNX2X_RX_MODE_ALLMULTI:
4819                 tstorm_mac_filter.mcast_accept_all = mask;
4820                 tstorm_mac_filter.bcast_accept_all = mask;
4821                 break;
4822
4823         case BNX2X_RX_MODE_PROMISC:
4824                 tstorm_mac_filter.ucast_accept_all = mask;
4825                 tstorm_mac_filter.mcast_accept_all = mask;
4826                 tstorm_mac_filter.bcast_accept_all = mask;
4827                 break;
4828
4829         default:
4830                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4831                 break;
4832         }
4833
4834         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4835                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4836                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4837                        ((u32 *)&tstorm_mac_filter)[i]);
4838
4839 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4840                    ((u32 *)&tstorm_mac_filter)[i]); */
4841         }
4842
4843         if (mode != BNX2X_RX_MODE_NONE)
4844                 bnx2x_set_client_config(bp);
4845 }
4846
4847 static void bnx2x_init_internal_common(struct bnx2x *bp)
4848 {
4849         int i;
4850
4851         if (bp->flags & TPA_ENABLE_FLAG) {
4852                 struct tstorm_eth_tpa_exist tpa = {0};
4853
4854                 tpa.tpa_exist = 1;
4855
4856                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4857                        ((u32 *)&tpa)[0]);
4858                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4859                        ((u32 *)&tpa)[1]);
4860         }
4861
4862         /* Zero this manually as its initialization is
4863            currently missing in the initTool */
4864         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4865                 REG_WR(bp, BAR_USTRORM_INTMEM +
4866                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4867 }
4868
4869 static void bnx2x_init_internal_port(struct bnx2x *bp)
4870 {
4871         int port = BP_PORT(bp);
4872
4873         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4874         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877 }
4878
4879 /* Calculates the sum of vn_min_rates.
4880    It's needed for further normalizing of the min_rates.
4881    Returns:
4882      sum of vn_min_rates.
4883        or
4884      0 - if all the min_rates are 0.
4885      In the later case fainess algorithm should be deactivated.
4886      If not all min_rates are zero then those that are zeroes will be set to 1.
4887  */
4888 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4889 {
4890         int all_zero = 1;
4891         int port = BP_PORT(bp);
4892         int vn;
4893
4894         bp->vn_weight_sum = 0;
4895         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4896                 int func = 2*vn + port;
4897                 u32 vn_cfg =
4898                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4899                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4900                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4901
4902                 /* Skip hidden vns */
4903                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4904                         continue;
4905
4906                 /* If min rate is zero - set it to 1 */
4907                 if (!vn_min_rate)
4908                         vn_min_rate = DEF_MIN_RATE;
4909                 else
4910                         all_zero = 0;
4911
4912                 bp->vn_weight_sum += vn_min_rate;
4913         }
4914
4915         /* ... only if all min rates are zeros - disable fairness */
4916         if (all_zero)
4917                 bp->vn_weight_sum = 0;
4918 }
4919
4920 static void bnx2x_init_internal_func(struct bnx2x *bp)
4921 {
4922         struct tstorm_eth_function_common_config tstorm_config = {0};
4923         struct stats_indication_flags stats_flags = {0};
4924         int port = BP_PORT(bp);
4925         int func = BP_FUNC(bp);
4926         int i, j;
4927         u32 offset;
4928         u16 max_agg_size;
4929
4930         if (is_multi(bp)) {
4931                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4932                 tstorm_config.rss_result_mask = MULTI_MASK;
4933         }
4934         if (IS_E1HMF(bp))
4935                 tstorm_config.config_flags |=
4936                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4937
4938         tstorm_config.leading_client_id = BP_L_ID(bp);
4939
4940         REG_WR(bp, BAR_TSTRORM_INTMEM +
4941                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4942                (*(u32 *)&tstorm_config));
4943
4944         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4945         bnx2x_set_storm_rx_mode(bp);
4946
4947         for_each_queue(bp, i) {
4948                 u8 cl_id = bp->fp[i].cl_id;
4949
4950                 /* reset xstorm per client statistics */
4951                 offset = BAR_XSTRORM_INTMEM +
4952                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953                 for (j = 0;
4954                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4955                         REG_WR(bp, offset + j*4, 0);
4956
4957                 /* reset tstorm per client statistics */
4958                 offset = BAR_TSTRORM_INTMEM +
4959                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960                 for (j = 0;
4961                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4962                         REG_WR(bp, offset + j*4, 0);
4963
4964                 /* reset ustorm per client statistics */
4965                 offset = BAR_USTRORM_INTMEM +
4966                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4967                 for (j = 0;
4968                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4969                         REG_WR(bp, offset + j*4, 0);
4970         }
4971
4972         /* Init statistics related context */
4973         stats_flags.collect_eth = 1;
4974
4975         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4976                ((u32 *)&stats_flags)[0]);
4977         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4978                ((u32 *)&stats_flags)[1]);
4979
4980         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4981                ((u32 *)&stats_flags)[0]);
4982         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4983                ((u32 *)&stats_flags)[1]);
4984
4985         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4986                ((u32 *)&stats_flags)[0]);
4987         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4988                ((u32 *)&stats_flags)[1]);
4989
4990         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4991                ((u32 *)&stats_flags)[0]);
4992         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4993                ((u32 *)&stats_flags)[1]);
4994
4995         REG_WR(bp, BAR_XSTRORM_INTMEM +
4996                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998         REG_WR(bp, BAR_XSTRORM_INTMEM +
4999                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002         REG_WR(bp, BAR_TSTRORM_INTMEM +
5003                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005         REG_WR(bp, BAR_TSTRORM_INTMEM +
5006                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
5009         REG_WR(bp, BAR_USTRORM_INTMEM +
5010                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5011                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5012         REG_WR(bp, BAR_USTRORM_INTMEM +
5013                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5014                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5015
5016         if (CHIP_IS_E1H(bp)) {
5017                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5018                         IS_E1HMF(bp));
5019                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5020                         IS_E1HMF(bp));
5021                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5022                         IS_E1HMF(bp));
5023                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5024                         IS_E1HMF(bp));
5025
5026                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5027                          bp->e1hov);
5028         }
5029
5030         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5031         max_agg_size =
5032                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5033                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5034                     (u32)0xffff);
5035         for_each_rx_queue(bp, i) {
5036                 struct bnx2x_fastpath *fp = &bp->fp[i];
5037
5038                 REG_WR(bp, BAR_USTRORM_INTMEM +
5039                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5040                        U64_LO(fp->rx_comp_mapping));
5041                 REG_WR(bp, BAR_USTRORM_INTMEM +
5042                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5043                        U64_HI(fp->rx_comp_mapping));
5044
5045                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5046                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5047                          max_agg_size);
5048         }
5049
5050         /* dropless flow control */
5051         if (CHIP_IS_E1H(bp)) {
5052                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5053
5054                 rx_pause.bd_thr_low = 250;
5055                 rx_pause.cqe_thr_low = 250;
5056                 rx_pause.cos = 1;
5057                 rx_pause.sge_thr_low = 0;
5058                 rx_pause.bd_thr_high = 350;
5059                 rx_pause.cqe_thr_high = 350;
5060                 rx_pause.sge_thr_high = 0;
5061
5062                 for_each_rx_queue(bp, i) {
5063                         struct bnx2x_fastpath *fp = &bp->fp[i];
5064
5065                         if (!fp->disable_tpa) {
5066                                 rx_pause.sge_thr_low = 150;
5067                                 rx_pause.sge_thr_high = 250;
5068                         }
5069
5070
5071                         offset = BAR_USTRORM_INTMEM +
5072                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5073                                                                    fp->cl_id);
5074                         for (j = 0;
5075                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5076                              j++)
5077                                 REG_WR(bp, offset + j*4,
5078                                        ((u32 *)&rx_pause)[j]);
5079                 }
5080         }
5081
5082         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5083
5084         /* Init rate shaping and fairness contexts */
5085         if (IS_E1HMF(bp)) {
5086                 int vn;
5087
5088                 /* During init there is no active link
5089                    Until link is up, set link rate to 10Gbps */
5090                 bp->link_vars.line_speed = SPEED_10000;
5091                 bnx2x_init_port_minmax(bp);
5092
5093                 bnx2x_calc_vn_weight_sum(bp);
5094
5095                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5096                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5097
5098                 /* Enable rate shaping and fairness */
5099                 bp->cmng.flags.cmng_enables =
5100                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5101                 if (bp->vn_weight_sum)
5102                         bp->cmng.flags.cmng_enables |=
5103                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5104                 else
5105                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5106                            "  fairness will be disabled\n");
5107         } else {
5108                 /* rate shaping and fairness are disabled */
5109                 DP(NETIF_MSG_IFUP,
5110                    "single function mode  minmax will be disabled\n");
5111         }
5112
5113
5114         /* Store it to internal memory */
5115         if (bp->port.pmf)
5116                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5117                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5118                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5119                                ((u32 *)(&bp->cmng))[i]);
5120 }
5121
5122 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5123 {
5124         switch (load_code) {
5125         case FW_MSG_CODE_DRV_LOAD_COMMON:
5126                 bnx2x_init_internal_common(bp);
5127                 /* no break */
5128
5129         case FW_MSG_CODE_DRV_LOAD_PORT:
5130                 bnx2x_init_internal_port(bp);
5131                 /* no break */
5132
5133         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5134                 bnx2x_init_internal_func(bp);
5135                 break;
5136
5137         default:
5138                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5139                 break;
5140         }
5141 }
5142
5143 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5144 {
5145         int i;
5146
5147         for_each_queue(bp, i) {
5148                 struct bnx2x_fastpath *fp = &bp->fp[i];
5149
5150                 fp->bp = bp;
5151                 fp->state = BNX2X_FP_STATE_CLOSED;
5152                 fp->index = i;
5153                 fp->cl_id = BP_L_ID(bp) + i;
5154                 fp->sb_id = fp->cl_id;
5155                 DP(NETIF_MSG_IFUP,
5156                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5157                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5158                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5159                               fp->sb_id);
5160                 bnx2x_update_fpsb_idx(fp);
5161         }
5162
5163         /* ensure status block indices were read */
5164         rmb();
5165
5166
5167         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5168                           DEF_SB_ID);
5169         bnx2x_update_dsb_idx(bp);
5170         bnx2x_update_coalesce(bp);
5171         bnx2x_init_rx_rings(bp);
5172         bnx2x_init_tx_ring(bp);
5173         bnx2x_init_sp_ring(bp);
5174         bnx2x_init_context(bp);
5175         bnx2x_init_internal(bp, load_code);
5176         bnx2x_init_ind_table(bp);
5177         bnx2x_stats_init(bp);
5178
5179         /* At this point, we are ready for interrupts */
5180         atomic_set(&bp->intr_sem, 0);
5181
5182         /* flush all before enabling interrupts */
5183         mb();
5184         mmiowb();
5185
5186         bnx2x_int_enable(bp);
5187 }
5188
5189 /* end of nic init */
5190
5191 /*
5192  * gzip service functions
5193  */
5194
5195 static int bnx2x_gunzip_init(struct bnx2x *bp)
5196 {
5197         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5198                                               &bp->gunzip_mapping);
5199         if (bp->gunzip_buf  == NULL)
5200                 goto gunzip_nomem1;
5201
5202         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5203         if (bp->strm  == NULL)
5204                 goto gunzip_nomem2;
5205
5206         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5207                                       GFP_KERNEL);
5208         if (bp->strm->workspace == NULL)
5209                 goto gunzip_nomem3;
5210
5211         return 0;
5212
5213 gunzip_nomem3:
5214         kfree(bp->strm);
5215         bp->strm = NULL;
5216
5217 gunzip_nomem2:
5218         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219                             bp->gunzip_mapping);
5220         bp->gunzip_buf = NULL;
5221
5222 gunzip_nomem1:
5223         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5224                " un-compression\n", bp->dev->name);
5225         return -ENOMEM;
5226 }
5227
5228 static void bnx2x_gunzip_end(struct bnx2x *bp)
5229 {
5230         kfree(bp->strm->workspace);
5231
5232         kfree(bp->strm);
5233         bp->strm = NULL;
5234
5235         if (bp->gunzip_buf) {
5236                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5237                                     bp->gunzip_mapping);
5238                 bp->gunzip_buf = NULL;
5239         }
5240 }
5241
5242 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5243 {
5244         int n, rc;
5245
5246         /* check gzip header */
5247         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5248                 BNX2X_ERR("Bad gzip header\n");
5249                 return -EINVAL;
5250         }
5251
5252         n = 10;
5253
5254 #define FNAME                           0x8
5255
5256         if (zbuf[3] & FNAME)
5257                 while ((zbuf[n++] != 0) && (n < len));
5258
5259         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5260         bp->strm->avail_in = len - n;
5261         bp->strm->next_out = bp->gunzip_buf;
5262         bp->strm->avail_out = FW_BUF_SIZE;
5263
5264         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5265         if (rc != Z_OK)
5266                 return rc;
5267
5268         rc = zlib_inflate(bp->strm, Z_FINISH);
5269         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5270                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5271                        bp->dev->name, bp->strm->msg);
5272
5273         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5274         if (bp->gunzip_outlen & 0x3)
5275                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5276                                     " gunzip_outlen (%d) not aligned\n",
5277                        bp->dev->name, bp->gunzip_outlen);
5278         bp->gunzip_outlen >>= 2;
5279
5280         zlib_inflateEnd(bp->strm);
5281
5282         if (rc == Z_STREAM_END)
5283                 return 0;
5284
5285         return rc;
5286 }
5287
5288 /* nic load/unload */
5289
5290 /*
5291  * General service functions
5292  */
5293
5294 /* send a NIG loopback debug packet */
5295 static void bnx2x_lb_pckt(struct bnx2x *bp)
5296 {
5297         u32 wb_write[3];
5298
5299         /* Ethernet source and destination addresses */
5300         wb_write[0] = 0x55555555;
5301         wb_write[1] = 0x55555555;
5302         wb_write[2] = 0x20;             /* SOP */
5303         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5304
5305         /* NON-IP protocol */
5306         wb_write[0] = 0x09000000;
5307         wb_write[1] = 0x55555555;
5308         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5309         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5310 }
5311
5312 /* some of the internal memories
5313  * are not directly readable from the driver
5314  * to test them we send debug packets
5315  */
5316 static int bnx2x_int_mem_test(struct bnx2x *bp)
5317 {
5318         int factor;
5319         int count, i;
5320         u32 val = 0;
5321
5322         if (CHIP_REV_IS_FPGA(bp))
5323                 factor = 120;
5324         else if (CHIP_REV_IS_EMUL(bp))
5325                 factor = 200;
5326         else
5327                 factor = 1;
5328
5329         DP(NETIF_MSG_HW, "start part1\n");
5330
5331         /* Disable inputs of parser neighbor blocks */
5332         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5335         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5336
5337         /*  Write 0 to parser credits for CFC search request */
5338         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340         /* send Ethernet packet */
5341         bnx2x_lb_pckt(bp);
5342
5343         /* TODO do i reset NIG statistic? */
5344         /* Wait until NIG register shows 1 packet of size 0x10 */
5345         count = 1000 * factor;
5346         while (count) {
5347
5348                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349                 val = *bnx2x_sp(bp, wb_data[0]);
5350                 if (val == 0x10)
5351                         break;
5352
5353                 msleep(10);
5354                 count--;
5355         }
5356         if (val != 0x10) {
5357                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5358                 return -1;
5359         }
5360
5361         /* Wait until PRS register shows 1 packet */
5362         count = 1000 * factor;
5363         while (count) {
5364                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5365                 if (val == 1)
5366                         break;
5367
5368                 msleep(10);
5369                 count--;
5370         }
5371         if (val != 0x1) {
5372                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5373                 return -2;
5374         }
5375
5376         /* Reset and init BRB, PRS */
5377         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5378         msleep(50);
5379         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5380         msleep(50);
5381         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5382         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5383
5384         DP(NETIF_MSG_HW, "part2\n");
5385
5386         /* Disable inputs of parser neighbor blocks */
5387         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5388         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5389         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5390         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5391
5392         /* Write 0 to parser credits for CFC search request */
5393         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5394
5395         /* send 10 Ethernet packets */
5396         for (i = 0; i < 10; i++)
5397                 bnx2x_lb_pckt(bp);
5398
5399         /* Wait until NIG register shows 10 + 1
5400            packets of size 11*0x10 = 0xb0 */
5401         count = 1000 * factor;
5402         while (count) {
5403
5404                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405                 val = *bnx2x_sp(bp, wb_data[0]);
5406                 if (val == 0xb0)
5407                         break;
5408
5409                 msleep(10);
5410                 count--;
5411         }
5412         if (val != 0xb0) {
5413                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5414                 return -3;
5415         }
5416
5417         /* Wait until PRS register shows 2 packets */
5418         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419         if (val != 2)
5420                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5421
5422         /* Write 1 to parser credits for CFC search request */
5423         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5424
5425         /* Wait until PRS register shows 3 packets */
5426         msleep(10 * factor);
5427         /* Wait until NIG register shows 1 packet of size 0x10 */
5428         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5429         if (val != 3)
5430                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5431
5432         /* clear NIG EOP FIFO */
5433         for (i = 0; i < 11; i++)
5434                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5435         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5436         if (val != 1) {
5437                 BNX2X_ERR("clear of NIG failed\n");
5438                 return -4;
5439         }
5440
5441         /* Reset and init BRB, PRS, NIG */
5442         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5443         msleep(50);
5444         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5445         msleep(50);
5446         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5447         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5448 #ifndef BCM_ISCSI
5449         /* set NIC mode */
5450         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5451 #endif
5452
5453         /* Enable inputs of parser neighbor blocks */
5454         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5455         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5456         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5457         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5458
5459         DP(NETIF_MSG_HW, "done\n");
5460
5461         return 0; /* OK */
5462 }
5463
5464 static void enable_blocks_attention(struct bnx2x *bp)
5465 {
5466         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5467         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5468         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5469         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5470         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5471         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5472         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5473         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5474         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5475 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5476 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5477         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5478         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5479         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5480 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5481 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5482         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5483         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5484         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5485         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5486 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5487 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5488         if (CHIP_REV_IS_FPGA(bp))
5489                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5490         else
5491                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5492         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5493         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5494         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5495 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5496 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5497         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5498         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5499 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5500         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5501 }
5502
5503
5504 static void bnx2x_reset_common(struct bnx2x *bp)
5505 {
5506         /* reset_common */
5507         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5508                0xd3ffff7f);
5509         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5510 }
5511
5512 static int bnx2x_init_common(struct bnx2x *bp)
5513 {
5514         u32 val, i;
5515
5516         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5517
5518         bnx2x_reset_common(bp);
5519         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5520         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5521
5522         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5523         if (CHIP_IS_E1H(bp))
5524                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5525
5526         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5527         msleep(30);
5528         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5529
5530         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5531         if (CHIP_IS_E1(bp)) {
5532                 /* enable HW interrupt from PXP on USDM overflow
5533                    bit 16 on INT_MASK_0 */
5534                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5535         }
5536
5537         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5538         bnx2x_init_pxp(bp);
5539
5540 #ifdef __BIG_ENDIAN
5541         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5542         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5543         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5544         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5545         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5546         /* make sure this value is 0 */
5547         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5548
5549 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5550         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5551         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5552         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5553         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5554 #endif
5555
5556         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5557 #ifdef BCM_ISCSI
5558         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5559         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5560         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5561 #endif
5562
5563         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5564                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5565
5566         /* let the HW do it's magic ... */
5567         msleep(100);
5568         /* finish PXP init */
5569         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5570         if (val != 1) {
5571                 BNX2X_ERR("PXP2 CFG failed\n");
5572                 return -EBUSY;
5573         }
5574         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5575         if (val != 1) {
5576                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5577                 return -EBUSY;
5578         }
5579
5580         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5581         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5582
5583         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5584
5585         /* clean the DMAE memory */
5586         bp->dmae_ready = 1;
5587         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5588
5589         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5590         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5591         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5592         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5593
5594         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5595         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5596         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5597         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5598
5599         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5600         /* soft reset pulse */
5601         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5602         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5603
5604 #ifdef BCM_ISCSI
5605         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5606 #endif
5607
5608         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5609         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5610         if (!CHIP_REV_IS_SLOW(bp)) {
5611                 /* enable hw interrupt from doorbell Q */
5612                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5613         }
5614
5615         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5616         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5617         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5618         /* set NIC mode */
5619         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5620         if (CHIP_IS_E1H(bp))
5621                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5622
5623         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5624         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5625         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5626         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5627
5628         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5629         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5632
5633         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5634         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5635         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5636         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5637
5638         /* sync semi rtc */
5639         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5640                0x80000000);
5641         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5642                0x80000000);
5643
5644         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5645         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5646         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5647
5648         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5649         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5650                 REG_WR(bp, i, 0xc0cac01a);
5651                 /* TODO: replace with something meaningful */
5652         }
5653         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5654         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5655
5656         if (sizeof(union cdu_context) != 1024)
5657                 /* we currently assume that a context is 1024 bytes */
5658                 printk(KERN_ALERT PFX "please adjust the size of"
5659                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5660
5661         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5662         val = (4 << 24) + (0 << 12) + 1024;
5663         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5664         if (CHIP_IS_E1(bp)) {
5665                 /* !!! fix pxp client crdit until excel update */
5666                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5667                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5668         }
5669
5670         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5671         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5672         /* enable context validation interrupt from CFC */
5673         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5674
5675         /* set the thresholds to prevent CFC/CDU race */
5676         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5677
5678         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5679         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5680
5681         /* PXPCS COMMON comes here */
5682         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5683         /* Reset PCIE errors for debug */
5684         REG_WR(bp, 0x2814, 0xffffffff);
5685         REG_WR(bp, 0x3820, 0xffffffff);
5686
5687         /* EMAC0 COMMON comes here */
5688         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5689         /* EMAC1 COMMON comes here */
5690         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5691         /* DBU COMMON comes here */
5692         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5693         /* DBG COMMON comes here */
5694         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5695
5696         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5697         if (CHIP_IS_E1H(bp)) {
5698                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5699                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5700         }
5701
5702         if (CHIP_REV_IS_SLOW(bp))
5703                 msleep(200);
5704
5705         /* finish CFC init */
5706         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5707         if (val != 1) {
5708                 BNX2X_ERR("CFC LL_INIT failed\n");
5709                 return -EBUSY;
5710         }
5711         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5712         if (val != 1) {
5713                 BNX2X_ERR("CFC AC_INIT failed\n");
5714                 return -EBUSY;
5715         }
5716         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5717         if (val != 1) {
5718                 BNX2X_ERR("CFC CAM_INIT failed\n");
5719                 return -EBUSY;
5720         }
5721         REG_WR(bp, CFC_REG_DEBUG0, 0);
5722
5723         /* read NIG statistic
5724            to see if this is our first up since powerup */
5725         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5726         val = *bnx2x_sp(bp, wb_data[0]);
5727
5728         /* do internal memory self test */
5729         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5730                 BNX2X_ERR("internal mem self test failed\n");
5731                 return -EBUSY;
5732         }
5733
5734         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5735         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5736         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5737         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5738                 bp->port.need_hw_lock = 1;
5739                 break;
5740
5741         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5742                 /* Fan failure is indicated by SPIO 5 */
5743                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5744                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5745
5746                 /* set to active low mode */
5747                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5748                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5749                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5750                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5751
5752                 /* enable interrupt to signal the IGU */
5753                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5754                 val |= (1 << MISC_REGISTERS_SPIO_5);
5755                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5756                 break;
5757
5758         default:
5759                 break;
5760         }
5761
5762         /* clear PXP2 attentions */
5763         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5764
5765         enable_blocks_attention(bp);
5766
5767         if (!BP_NOMCP(bp)) {
5768                 bnx2x_acquire_phy_lock(bp);
5769                 bnx2x_common_init_phy(bp, bp->common.shmem_base);