665ed36a0d4883a1d5baa754820dd6aea00bacb5
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
488
489         printk(KERN_ERR PFX);
490         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
491                 for (word = 0; word < 8; word++)
492                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493                                                   offset + 4*word));
494                 data[8] = 0x0;
495                 printk(KERN_CONT "%s", (char *)data);
496         }
497         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
498                 for (word = 0; word < 8; word++)
499                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
500                                                   offset + 4*word));
501                 data[8] = 0x0;
502                 printk(KERN_CONT "%s", (char *)data);
503         }
504         printk(KERN_ERR PFX "end of fw dump\n");
505 }
506
507 static void bnx2x_panic_dump(struct bnx2x *bp)
508 {
509         int i;
510         u16 j, start, end;
511
512         bp->stats_state = STATS_STATE_DISABLED;
513         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514
515         BNX2X_ERR("begin crash dump -----------------\n");
516
517         /* Indices */
518         /* Common */
519         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
520                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
521                   "  spq_prod_idx(%u)\n",
522                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
523                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
524
525         /* Rx */
526         for_each_rx_queue(bp, i) {
527                 struct bnx2x_fastpath *fp = &bp->fp[i];
528
529                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
530                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
531                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
532                           i, fp->rx_bd_prod, fp->rx_bd_cons,
533                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
534                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
535                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
536                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
537                           fp->rx_sge_prod, fp->last_max_sge,
538                           le16_to_cpu(fp->fp_u_idx),
539                           fp->status_blk->u_status_block.status_block_index);
540         }
541
542         /* Tx */
543         for_each_tx_queue(bp, i) {
544                 struct bnx2x_fastpath *fp = &bp->fp[i];
545                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546
547                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
548                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
549                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
550                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
551                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
552                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
553                           fp->status_blk->c_status_block.status_block_index,
554                           hw_prods->packets_prod, hw_prods->bds_prod);
555         }
556
557         /* Rings */
558         /* Rx */
559         for_each_rx_queue(bp, i) {
560                 struct bnx2x_fastpath *fp = &bp->fp[i];
561
562                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
563                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
564                 for (j = start; j != end; j = RX_BD(j + 1)) {
565                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
566                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567
568                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
569                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
570                 }
571
572                 start = RX_SGE(fp->rx_sge_prod);
573                 end = RX_SGE(fp->last_max_sge);
574                 for (j = start; j != end; j = RX_SGE(j + 1)) {
575                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
576                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577
578                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
579                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
580                 }
581
582                 start = RCQ_BD(fp->rx_comp_cons - 10);
583                 end = RCQ_BD(fp->rx_comp_cons + 503);
584                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
585                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586
587                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
588                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
589                 }
590         }
591
592         /* Tx */
593         for_each_tx_queue(bp, i) {
594                 struct bnx2x_fastpath *fp = &bp->fp[i];
595
596                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
597                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
598                 for (j = start; j != end; j = TX_BD(j + 1)) {
599                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600
601                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
602                                   i, j, sw_bd->skb, sw_bd->first_bd);
603                 }
604
605                 start = TX_BD(fp->tx_bd_cons - 10);
606                 end = TX_BD(fp->tx_bd_cons + 254);
607                 for (j = start; j != end; j = TX_BD(j + 1)) {
608                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609
610                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
611                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
612                 }
613         }
614
615         bnx2x_fw_dump(bp);
616         bnx2x_mc_assert(bp);
617         BNX2X_ERR("end crash dump -----------------\n");
618 }
619
620 static void bnx2x_int_enable(struct bnx2x *bp)
621 {
622         int port = BP_PORT(bp);
623         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624         u32 val = REG_RD(bp, addr);
625         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
626         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
627
628         if (msix) {
629                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630                          HC_CONFIG_0_REG_INT_LINE_EN_0);
631                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633         } else if (msi) {
634                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
635                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
636                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
637                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
638         } else {
639                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645                    val, port, addr);
646
647                 REG_WR(bp, addr, val);
648
649                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
650         }
651
652         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
653            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
654
655         REG_WR(bp, addr, val);
656         /*
657          * Ensure that HC_CONFIG is written before leading/trailing edge config
658          */
659         mmiowb();
660         barrier();
661
662         if (CHIP_IS_E1H(bp)) {
663                 /* init leading/trailing edge */
664                 if (IS_E1HMF(bp)) {
665                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
666                         if (bp->port.pmf)
667                                 /* enable nig and gpio3 attention */
668                                 val |= 0x1100;
669                 } else
670                         val = 0xffff;
671
672                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
673                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
674         }
675
676         /* Make sure that interrupts are indeed enabled from here on */
677         mmiowb();
678 }
679
680 static void bnx2x_int_disable(struct bnx2x *bp)
681 {
682         int port = BP_PORT(bp);
683         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
684         u32 val = REG_RD(bp, addr);
685
686         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
687                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
688                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
689                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
690
691         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
692            val, port, addr);
693
694         /* flush all outstanding writes */
695         mmiowb();
696
697         REG_WR(bp, addr, val);
698         if (REG_RD(bp, addr) != val)
699                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
700
701 }
702
703 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
704 {
705         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
706         int i, offset;
707
708         /* disable interrupt handling */
709         atomic_inc(&bp->intr_sem);
710         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
711
712         if (disable_hw)
713                 /* prevent the HW from sending interrupts */
714                 bnx2x_int_disable(bp);
715
716         /* make sure all ISRs are done */
717         if (msix) {
718                 synchronize_irq(bp->msix_table[0].vector);
719                 offset = 1;
720                 for_each_queue(bp, i)
721                         synchronize_irq(bp->msix_table[i + offset].vector);
722         } else
723                 synchronize_irq(bp->pdev->irq);
724
725         /* make sure sp_task is not running */
726         cancel_delayed_work(&bp->sp_task);
727         flush_workqueue(bnx2x_wq);
728 }
729
730 /* fast path */
731
732 /*
733  * General service functions
734  */
735
736 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
737                                 u8 storm, u16 index, u8 op, u8 update)
738 {
739         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
740                        COMMAND_REG_INT_ACK);
741         struct igu_ack_register igu_ack;
742
743         igu_ack.status_block_index = index;
744         igu_ack.sb_id_and_flags =
745                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
746                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
747                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
748                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
749
750         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
751            (*(u32 *)&igu_ack), hc_addr);
752         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
753
754         /* Make sure that ACK is written */
755         mmiowb();
756         barrier();
757 }
758
759 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
760 {
761         struct host_status_block *fpsb = fp->status_blk;
762         u16 rc = 0;
763
764         barrier(); /* status block is written to by the chip */
765         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
766                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
767                 rc |= 1;
768         }
769         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
770                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
771                 rc |= 2;
772         }
773         return rc;
774 }
775
776 static u16 bnx2x_ack_int(struct bnx2x *bp)
777 {
778         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
779                        COMMAND_REG_SIMD_MASK);
780         u32 result = REG_RD(bp, hc_addr);
781
782         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
783            result, hc_addr);
784
785         return result;
786 }
787
788
789 /*
790  * fast path service functions
791  */
792
793 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
794 {
795         u16 tx_cons_sb;
796
797         /* Tell compiler that status block fields can change */
798         barrier();
799         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
800         return (fp->tx_pkt_cons != tx_cons_sb);
801 }
802
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 {
805         /* Tell compiler that consumer and producer can change */
806         barrier();
807         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
808 }
809
810 /* free skb in the packet ring at pos idx
811  * return idx of last bd freed
812  */
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814                              u16 idx)
815 {
816         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817         struct eth_tx_bd *tx_bd;
818         struct sk_buff *skb = tx_buf->skb;
819         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
820         int nbd;
821
822         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
823            idx, tx_buf, skb);
824
825         /* unmap first bd */
826         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
827         tx_bd = &fp->tx_desc_ring[bd_idx];
828         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
829                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
830
831         nbd = le16_to_cpu(tx_bd->nbd) - 1;
832         new_cons = nbd + tx_buf->first_bd;
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (nbd > (MAX_SKB_FRAGS + 2)) {
835                 BNX2X_ERR("BAD nbd!\n");
836                 bnx2x_panic();
837         }
838 #endif
839
840         /* Skip a parse bd and the TSO split header bd
841            since they have no mapping */
842         if (nbd)
843                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844
845         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
846                                            ETH_TX_BD_FLAGS_TCP_CSUM |
847                                            ETH_TX_BD_FLAGS_SW_LSO)) {
848                 if (--nbd)
849                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
850                 tx_bd = &fp->tx_desc_ring[bd_idx];
851                 /* is this a TSO split header bd? */
852                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
853                         if (--nbd)
854                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
855                 }
856         }
857
858         /* now free frags */
859         while (nbd > 0) {
860
861                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862                 tx_bd = &fp->tx_desc_ring[bd_idx];
863                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
864                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
865                 if (--nbd)
866                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867         }
868
869         /* release skb */
870         WARN_ON(!skb);
871         dev_kfree_skb(skb);
872         tx_buf->first_bd = 0;
873         tx_buf->skb = NULL;
874
875         return new_cons;
876 }
877
878 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
879 {
880         s16 used;
881         u16 prod;
882         u16 cons;
883
884         barrier(); /* Tell compiler that prod and cons can change */
885         prod = fp->tx_bd_prod;
886         cons = fp->tx_bd_cons;
887
888         /* NUM_TX_RINGS = number of "next-page" entries
889            It will be used as a threshold */
890         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
891
892 #ifdef BNX2X_STOP_ON_ERROR
893         WARN_ON(used < 0);
894         WARN_ON(used > fp->bp->tx_ring_size);
895         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
896 #endif
897
898         return (s16)(fp->bp->tx_ring_size) - used;
899 }
900
901 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
902 {
903         struct bnx2x *bp = fp->bp;
904         struct netdev_queue *txq;
905         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
906         int done = 0;
907
908 #ifdef BNX2X_STOP_ON_ERROR
909         if (unlikely(bp->panic))
910                 return;
911 #endif
912
913         txq = netdev_get_tx_queue(bp->dev, fp->index);
914         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915         sw_cons = fp->tx_pkt_cons;
916
917         while (sw_cons != hw_cons) {
918                 u16 pkt_cons;
919
920                 pkt_cons = TX_BD(sw_cons);
921
922                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923
924                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
925                    hw_cons, sw_cons, pkt_cons);
926
927 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
928                         rmb();
929                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
930                 }
931 */
932                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933                 sw_cons++;
934                 done++;
935         }
936
937         fp->tx_pkt_cons = sw_cons;
938         fp->tx_bd_cons = bd_cons;
939
940         /* TBD need a thresh? */
941         if (unlikely(netif_tx_queue_stopped(txq))) {
942
943                 __netif_tx_lock(txq, smp_processor_id());
944
945                 /* Need to make the tx_bd_cons update visible to start_xmit()
946                  * before checking for netif_tx_queue_stopped().  Without the
947                  * memory barrier, there is a small possibility that
948                  * start_xmit() will miss it and cause the queue to be stopped
949                  * forever.
950                  */
951                 smp_mb();
952
953                 if ((netif_tx_queue_stopped(txq)) &&
954                     (bp->state == BNX2X_STATE_OPEN) &&
955                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
956                         netif_tx_wake_queue(txq);
957
958                 __netif_tx_unlock(txq);
959         }
960 }
961
962
963 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
964                            union eth_rx_cqe *rr_cqe)
965 {
966         struct bnx2x *bp = fp->bp;
967         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
969
970         DP(BNX2X_MSG_SP,
971            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
972            fp->index, cid, command, bp->state,
973            rr_cqe->ramrod_cqe.ramrod_type);
974
975         bp->spq_left++;
976
977         if (fp->index) {
978                 switch (command | fp->state) {
979                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
980                                                 BNX2X_FP_STATE_OPENING):
981                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
982                            cid);
983                         fp->state = BNX2X_FP_STATE_OPEN;
984                         break;
985
986                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
987                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
988                            cid);
989                         fp->state = BNX2X_FP_STATE_HALTED;
990                         break;
991
992                 default:
993                         BNX2X_ERR("unexpected MC reply (%d)  "
994                                   "fp->state is %x\n", command, fp->state);
995                         break;
996                 }
997                 mb(); /* force bnx2x_wait_ramrod() to see the change */
998                 return;
999         }
1000
1001         switch (command | bp->state) {
1002         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1003                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1004                 bp->state = BNX2X_STATE_OPEN;
1005                 break;
1006
1007         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1008                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1009                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1010                 fp->state = BNX2X_FP_STATE_HALTED;
1011                 break;
1012
1013         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1014                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1015                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1016                 break;
1017
1018
1019         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1020         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1021                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1022                 bp->set_mac_pending = 0;
1023                 break;
1024
1025         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1026                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1027                 break;
1028
1029         default:
1030                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1031                           command, bp->state);
1032                 break;
1033         }
1034         mb(); /* force bnx2x_wait_ramrod() to see the change */
1035 }
1036
1037 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1038                                      struct bnx2x_fastpath *fp, u16 index)
1039 {
1040         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1041         struct page *page = sw_buf->page;
1042         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1043
1044         /* Skip "next page" elements */
1045         if (!page)
1046                 return;
1047
1048         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1049                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1050         __free_pages(page, PAGES_PER_SGE_SHIFT);
1051
1052         sw_buf->page = NULL;
1053         sge->addr_hi = 0;
1054         sge->addr_lo = 0;
1055 }
1056
1057 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1058                                            struct bnx2x_fastpath *fp, int last)
1059 {
1060         int i;
1061
1062         for (i = 0; i < last; i++)
1063                 bnx2x_free_rx_sge(bp, fp, i);
1064 }
1065
1066 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1070         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072         dma_addr_t mapping;
1073
1074         if (unlikely(page == NULL))
1075                 return -ENOMEM;
1076
1077         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1078                                PCI_DMA_FROMDEVICE);
1079         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1080                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081                 return -ENOMEM;
1082         }
1083
1084         sw_buf->page = page;
1085         pci_unmap_addr_set(sw_buf, mapping, mapping);
1086
1087         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1088         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1089
1090         return 0;
1091 }
1092
1093 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1094                                      struct bnx2x_fastpath *fp, u16 index)
1095 {
1096         struct sk_buff *skb;
1097         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1098         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1099         dma_addr_t mapping;
1100
1101         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1102         if (unlikely(skb == NULL))
1103                 return -ENOMEM;
1104
1105         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1106                                  PCI_DMA_FROMDEVICE);
1107         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1108                 dev_kfree_skb(skb);
1109                 return -ENOMEM;
1110         }
1111
1112         rx_buf->skb = skb;
1113         pci_unmap_addr_set(rx_buf, mapping, mapping);
1114
1115         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1116         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1117
1118         return 0;
1119 }
1120
1121 /* note that we are not allocating a new skb,
1122  * we are just moving one from cons to prod
1123  * we are not creating a new mapping,
1124  * so there is no need to check for dma_mapping_error().
1125  */
1126 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1127                                struct sk_buff *skb, u16 cons, u16 prod)
1128 {
1129         struct bnx2x *bp = fp->bp;
1130         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1131         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1132         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1133         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1134
1135         pci_dma_sync_single_for_device(bp->pdev,
1136                                        pci_unmap_addr(cons_rx_buf, mapping),
1137                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1138
1139         prod_rx_buf->skb = cons_rx_buf->skb;
1140         pci_unmap_addr_set(prod_rx_buf, mapping,
1141                            pci_unmap_addr(cons_rx_buf, mapping));
1142         *prod_bd = *cons_bd;
1143 }
1144
1145 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1146                                              u16 idx)
1147 {
1148         u16 last_max = fp->last_max_sge;
1149
1150         if (SUB_S16(idx, last_max) > 0)
1151                 fp->last_max_sge = idx;
1152 }
1153
1154 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1155 {
1156         int i, j;
1157
1158         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1159                 int idx = RX_SGE_CNT * i - 1;
1160
1161                 for (j = 0; j < 2; j++) {
1162                         SGE_MASK_CLEAR_BIT(fp, idx);
1163                         idx--;
1164                 }
1165         }
1166 }
1167
1168 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1169                                   struct eth_fast_path_rx_cqe *fp_cqe)
1170 {
1171         struct bnx2x *bp = fp->bp;
1172         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1173                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1174                       SGE_PAGE_SHIFT;
1175         u16 last_max, last_elem, first_elem;
1176         u16 delta = 0;
1177         u16 i;
1178
1179         if (!sge_len)
1180                 return;
1181
1182         /* First mark all used pages */
1183         for (i = 0; i < sge_len; i++)
1184                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1185
1186         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1187            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1188
1189         /* Here we assume that the last SGE index is the biggest */
1190         prefetch((void *)(fp->sge_mask));
1191         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1192
1193         last_max = RX_SGE(fp->last_max_sge);
1194         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1195         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1196
1197         /* If ring is not full */
1198         if (last_elem + 1 != first_elem)
1199                 last_elem++;
1200
1201         /* Now update the prod */
1202         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1203                 if (likely(fp->sge_mask[i]))
1204                         break;
1205
1206                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1207                 delta += RX_SGE_MASK_ELEM_SZ;
1208         }
1209
1210         if (delta > 0) {
1211                 fp->rx_sge_prod += delta;
1212                 /* clear page-end entries */
1213                 bnx2x_clear_sge_mask_next_elems(fp);
1214         }
1215
1216         DP(NETIF_MSG_RX_STATUS,
1217            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1218            fp->last_max_sge, fp->rx_sge_prod);
1219 }
1220
1221 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1222 {
1223         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1224         memset(fp->sge_mask, 0xff,
1225                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1226
1227         /* Clear the two last indices in the page to 1:
1228            these are the indices that correspond to the "next" element,
1229            hence will never be indicated and should be removed from
1230            the calculations. */
1231         bnx2x_clear_sge_mask_next_elems(fp);
1232 }
1233
1234 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1235                             struct sk_buff *skb, u16 cons, u16 prod)
1236 {
1237         struct bnx2x *bp = fp->bp;
1238         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1239         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1240         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1241         dma_addr_t mapping;
1242
1243         /* move empty skb from pool to prod and map it */
1244         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1245         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1246                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1247         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1248
1249         /* move partial skb from cons to pool (don't unmap yet) */
1250         fp->tpa_pool[queue] = *cons_rx_buf;
1251
1252         /* mark bin state as start - print error if current state != stop */
1253         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1254                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1255
1256         fp->tpa_state[queue] = BNX2X_TPA_START;
1257
1258         /* point prod_bd to new skb */
1259         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1260         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1261
1262 #ifdef BNX2X_STOP_ON_ERROR
1263         fp->tpa_queue_used |= (1 << queue);
1264 #ifdef __powerpc64__
1265         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1266 #else
1267         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1268 #endif
1269            fp->tpa_queue_used);
1270 #endif
1271 }
1272
1273 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1274                                struct sk_buff *skb,
1275                                struct eth_fast_path_rx_cqe *fp_cqe,
1276                                u16 cqe_idx)
1277 {
1278         struct sw_rx_page *rx_pg, old_rx_pg;
1279         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1280         u32 i, frag_len, frag_size, pages;
1281         int err;
1282         int j;
1283
1284         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1285         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1286
1287         /* This is needed in order to enable forwarding support */
1288         if (frag_size)
1289                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1290                                                max(frag_size, (u32)len_on_bd));
1291
1292 #ifdef BNX2X_STOP_ON_ERROR
1293         if (pages >
1294             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1295                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1296                           pages, cqe_idx);
1297                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1298                           fp_cqe->pkt_len, len_on_bd);
1299                 bnx2x_panic();
1300                 return -EINVAL;
1301         }
1302 #endif
1303
1304         /* Run through the SGL and compose the fragmented skb */
1305         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1306                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1307
1308                 /* FW gives the indices of the SGE as if the ring is an array
1309                    (meaning that "next" element will consume 2 indices) */
1310                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1311                 rx_pg = &fp->rx_page_ring[sge_idx];
1312                 old_rx_pg = *rx_pg;
1313
1314                 /* If we fail to allocate a substitute page, we simply stop
1315                    where we are and drop the whole packet */
1316                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1317                 if (unlikely(err)) {
1318                         fp->eth_q_stats.rx_skb_alloc_failed++;
1319                         return err;
1320                 }
1321
1322                 /* Unmap the page as we r going to pass it to the stack */
1323                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1324                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1325
1326                 /* Add one frag and update the appropriate fields in the skb */
1327                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1328
1329                 skb->data_len += frag_len;
1330                 skb->truesize += frag_len;
1331                 skb->len += frag_len;
1332
1333                 frag_size -= frag_len;
1334         }
1335
1336         return 0;
1337 }
1338
1339 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1340                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1341                            u16 cqe_idx)
1342 {
1343         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1344         struct sk_buff *skb = rx_buf->skb;
1345         /* alloc new skb */
1346         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1347
1348         /* Unmap skb in the pool anyway, as we are going to change
1349            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1350            fails. */
1351         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1352                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1353
1354         if (likely(new_skb)) {
1355                 /* fix ip xsum and give it to the stack */
1356                 /* (no need to map the new skb) */
1357 #ifdef BCM_VLAN
1358                 int is_vlan_cqe =
1359                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1360                          PARSING_FLAGS_VLAN);
1361                 int is_not_hwaccel_vlan_cqe =
1362                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1363 #endif
1364
1365                 prefetch(skb);
1366                 prefetch(((char *)(skb)) + 128);
1367
1368 #ifdef BNX2X_STOP_ON_ERROR
1369                 if (pad + len > bp->rx_buf_size) {
1370                         BNX2X_ERR("skb_put is about to fail...  "
1371                                   "pad %d  len %d  rx_buf_size %d\n",
1372                                   pad, len, bp->rx_buf_size);
1373                         bnx2x_panic();
1374                         return;
1375                 }
1376 #endif
1377
1378                 skb_reserve(skb, pad);
1379                 skb_put(skb, len);
1380
1381                 skb->protocol = eth_type_trans(skb, bp->dev);
1382                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383
1384                 {
1385                         struct iphdr *iph;
1386
1387                         iph = (struct iphdr *)skb->data;
1388 #ifdef BCM_VLAN
1389                         /* If there is no Rx VLAN offloading -
1390                            take VLAN tag into an account */
1391                         if (unlikely(is_not_hwaccel_vlan_cqe))
1392                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1393 #endif
1394                         iph->check = 0;
1395                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1396                 }
1397
1398                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1399                                          &cqe->fast_path_cqe, cqe_idx)) {
1400 #ifdef BCM_VLAN
1401                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1402                             (!is_not_hwaccel_vlan_cqe))
1403                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1404                                                 le16_to_cpu(cqe->fast_path_cqe.
1405                                                             vlan_tag));
1406                         else
1407 #endif
1408                                 netif_receive_skb(skb);
1409                 } else {
1410                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1411                            " - dropping packet!\n");
1412                         dev_kfree_skb(skb);
1413                 }
1414
1415
1416                 /* put new skb in bin */
1417                 fp->tpa_pool[queue].skb = new_skb;
1418
1419         } else {
1420                 /* else drop the packet and keep the buffer in the bin */
1421                 DP(NETIF_MSG_RX_STATUS,
1422                    "Failed to allocate new skb - dropping packet!\n");
1423                 fp->eth_q_stats.rx_skb_alloc_failed++;
1424         }
1425
1426         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1427 }
1428
1429 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1430                                         struct bnx2x_fastpath *fp,
1431                                         u16 bd_prod, u16 rx_comp_prod,
1432                                         u16 rx_sge_prod)
1433 {
1434         struct ustorm_eth_rx_producers rx_prods = {0};
1435         int i;
1436
1437         /* Update producers */
1438         rx_prods.bd_prod = bd_prod;
1439         rx_prods.cqe_prod = rx_comp_prod;
1440         rx_prods.sge_prod = rx_sge_prod;
1441
1442         /*
1443          * Make sure that the BD and SGE data is updated before updating the
1444          * producers since FW might read the BD/SGE right after the producer
1445          * is updated.
1446          * This is only applicable for weak-ordered memory model archs such
1447          * as IA-64. The following barrier is also mandatory since FW will
1448          * assumes BDs must have buffers.
1449          */
1450         wmb();
1451
1452         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1453                 REG_WR(bp, BAR_USTRORM_INTMEM +
1454                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1455                        ((u32 *)&rx_prods)[i]);
1456
1457         mmiowb(); /* keep prod updates ordered */
1458
1459         DP(NETIF_MSG_RX_STATUS,
1460            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1461            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1462 }
1463
1464 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1465 {
1466         struct bnx2x *bp = fp->bp;
1467         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1468         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1469         int rx_pkt = 0;
1470
1471 #ifdef BNX2X_STOP_ON_ERROR
1472         if (unlikely(bp->panic))
1473                 return 0;
1474 #endif
1475
1476         /* CQ "next element" is of the size of the regular element,
1477            that's why it's ok here */
1478         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1479         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1480                 hw_comp_cons++;
1481
1482         bd_cons = fp->rx_bd_cons;
1483         bd_prod = fp->rx_bd_prod;
1484         bd_prod_fw = bd_prod;
1485         sw_comp_cons = fp->rx_comp_cons;
1486         sw_comp_prod = fp->rx_comp_prod;
1487
1488         /* Memory barrier necessary as speculative reads of the rx
1489          * buffer can be ahead of the index in the status block
1490          */
1491         rmb();
1492
1493         DP(NETIF_MSG_RX_STATUS,
1494            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1495            fp->index, hw_comp_cons, sw_comp_cons);
1496
1497         while (sw_comp_cons != hw_comp_cons) {
1498                 struct sw_rx_bd *rx_buf = NULL;
1499                 struct sk_buff *skb;
1500                 union eth_rx_cqe *cqe;
1501                 u8 cqe_fp_flags;
1502                 u16 len, pad;
1503
1504                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1505                 bd_prod = RX_BD(bd_prod);
1506                 bd_cons = RX_BD(bd_cons);
1507
1508                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1509                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1510
1511                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1512                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1513                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1514                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1515                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1516                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1517
1518                 /* is this a slowpath msg? */
1519                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1520                         bnx2x_sp_event(fp, cqe);
1521                         goto next_cqe;
1522
1523                 /* this is an rx packet */
1524                 } else {
1525                         rx_buf = &fp->rx_buf_ring[bd_cons];
1526                         skb = rx_buf->skb;
1527                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1528                         pad = cqe->fast_path_cqe.placement_offset;
1529
1530                         /* If CQE is marked both TPA_START and TPA_END
1531                            it is a non-TPA CQE */
1532                         if ((!fp->disable_tpa) &&
1533                             (TPA_TYPE(cqe_fp_flags) !=
1534                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1535                                 u16 queue = cqe->fast_path_cqe.queue_index;
1536
1537                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1538                                         DP(NETIF_MSG_RX_STATUS,
1539                                            "calling tpa_start on queue %d\n",
1540                                            queue);
1541
1542                                         bnx2x_tpa_start(fp, queue, skb,
1543                                                         bd_cons, bd_prod);
1544                                         goto next_rx;
1545                                 }
1546
1547                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1548                                         DP(NETIF_MSG_RX_STATUS,
1549                                            "calling tpa_stop on queue %d\n",
1550                                            queue);
1551
1552                                         if (!BNX2X_RX_SUM_FIX(cqe))
1553                                                 BNX2X_ERR("STOP on none TCP "
1554                                                           "data\n");
1555
1556                                         /* This is a size of the linear data
1557                                            on this skb */
1558                                         len = le16_to_cpu(cqe->fast_path_cqe.
1559                                                                 len_on_bd);
1560                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1561                                                     len, cqe, comp_ring_cons);
1562 #ifdef BNX2X_STOP_ON_ERROR
1563                                         if (bp->panic)
1564                                                 return 0;
1565 #endif
1566
1567                                         bnx2x_update_sge_prod(fp,
1568                                                         &cqe->fast_path_cqe);
1569                                         goto next_cqe;
1570                                 }
1571                         }
1572
1573                         pci_dma_sync_single_for_device(bp->pdev,
1574                                         pci_unmap_addr(rx_buf, mapping),
1575                                                        pad + RX_COPY_THRESH,
1576                                                        PCI_DMA_FROMDEVICE);
1577                         prefetch(skb);
1578                         prefetch(((char *)(skb)) + 128);
1579
1580                         /* is this an error packet? */
1581                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1582                                 DP(NETIF_MSG_RX_ERR,
1583                                    "ERROR  flags %x  rx packet %u\n",
1584                                    cqe_fp_flags, sw_comp_cons);
1585                                 fp->eth_q_stats.rx_err_discard_pkt++;
1586                                 goto reuse_rx;
1587                         }
1588
1589                         /* Since we don't have a jumbo ring
1590                          * copy small packets if mtu > 1500
1591                          */
1592                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1593                             (len <= RX_COPY_THRESH)) {
1594                                 struct sk_buff *new_skb;
1595
1596                                 new_skb = netdev_alloc_skb(bp->dev,
1597                                                            len + pad);
1598                                 if (new_skb == NULL) {
1599                                         DP(NETIF_MSG_RX_ERR,
1600                                            "ERROR  packet dropped "
1601                                            "because of alloc failure\n");
1602                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1603                                         goto reuse_rx;
1604                                 }
1605
1606                                 /* aligned copy */
1607                                 skb_copy_from_linear_data_offset(skb, pad,
1608                                                     new_skb->data + pad, len);
1609                                 skb_reserve(new_skb, pad);
1610                                 skb_put(new_skb, len);
1611
1612                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1613
1614                                 skb = new_skb;
1615
1616                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1617                                 pci_unmap_single(bp->pdev,
1618                                         pci_unmap_addr(rx_buf, mapping),
1619                                                  bp->rx_buf_size,
1620                                                  PCI_DMA_FROMDEVICE);
1621                                 skb_reserve(skb, pad);
1622                                 skb_put(skb, len);
1623
1624                         } else {
1625                                 DP(NETIF_MSG_RX_ERR,
1626                                    "ERROR  packet dropped because "
1627                                    "of alloc failure\n");
1628                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1629 reuse_rx:
1630                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1631                                 goto next_rx;
1632                         }
1633
1634                         skb->protocol = eth_type_trans(skb, bp->dev);
1635
1636                         skb->ip_summed = CHECKSUM_NONE;
1637                         if (bp->rx_csum) {
1638                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1639                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1640                                 else
1641                                         fp->eth_q_stats.hw_csum_err++;
1642                         }
1643                 }
1644
1645                 skb_record_rx_queue(skb, fp->index);
1646 #ifdef BCM_VLAN
1647                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1648                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1649                      PARSING_FLAGS_VLAN))
1650                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1651                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1652                 else
1653 #endif
1654                         netif_receive_skb(skb);
1655
1656
1657 next_rx:
1658                 rx_buf->skb = NULL;
1659
1660                 bd_cons = NEXT_RX_IDX(bd_cons);
1661                 bd_prod = NEXT_RX_IDX(bd_prod);
1662                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1663                 rx_pkt++;
1664 next_cqe:
1665                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1666                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1667
1668                 if (rx_pkt == budget)
1669                         break;
1670         } /* while */
1671
1672         fp->rx_bd_cons = bd_cons;
1673         fp->rx_bd_prod = bd_prod_fw;
1674         fp->rx_comp_cons = sw_comp_cons;
1675         fp->rx_comp_prod = sw_comp_prod;
1676
1677         /* Update producers */
1678         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1679                              fp->rx_sge_prod);
1680
1681         fp->rx_pkt += rx_pkt;
1682         fp->rx_calls++;
1683
1684         return rx_pkt;
1685 }
1686
1687 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1688 {
1689         struct bnx2x_fastpath *fp = fp_cookie;
1690         struct bnx2x *bp = fp->bp;
1691         int index = fp->index;
1692
1693         /* Return here if interrupt is disabled */
1694         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1695                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1696                 return IRQ_HANDLED;
1697         }
1698
1699         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1700            index, fp->sb_id);
1701         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1702
1703 #ifdef BNX2X_STOP_ON_ERROR
1704         if (unlikely(bp->panic))
1705                 return IRQ_HANDLED;
1706 #endif
1707
1708         prefetch(fp->rx_cons_sb);
1709         prefetch(fp->tx_cons_sb);
1710         prefetch(&fp->status_blk->c_status_block.status_block_index);
1711         prefetch(&fp->status_blk->u_status_block.status_block_index);
1712
1713         napi_schedule(&bnx2x_fp(bp, index, napi));
1714
1715         return IRQ_HANDLED;
1716 }
1717
1718 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1719 {
1720         struct bnx2x *bp = netdev_priv(dev_instance);
1721         u16 status = bnx2x_ack_int(bp);
1722         u16 mask;
1723
1724         /* Return here if interrupt is shared and it's not for us */
1725         if (unlikely(status == 0)) {
1726                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1727                 return IRQ_NONE;
1728         }
1729         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1730
1731         /* Return here if interrupt is disabled */
1732         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1733                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1734                 return IRQ_HANDLED;
1735         }
1736
1737 #ifdef BNX2X_STOP_ON_ERROR
1738         if (unlikely(bp->panic))
1739                 return IRQ_HANDLED;
1740 #endif
1741
1742         mask = 0x2 << bp->fp[0].sb_id;
1743         if (status & mask) {
1744                 struct bnx2x_fastpath *fp = &bp->fp[0];
1745
1746                 prefetch(fp->rx_cons_sb);
1747                 prefetch(fp->tx_cons_sb);
1748                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1750
1751                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1752
1753                 status &= ~mask;
1754         }
1755
1756
1757         if (unlikely(status & 0x1)) {
1758                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1759
1760                 status &= ~0x1;
1761                 if (!status)
1762                         return IRQ_HANDLED;
1763         }
1764
1765         if (status)
1766                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1767                    status);
1768
1769         return IRQ_HANDLED;
1770 }
1771
1772 /* end of fast path */
1773
1774 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1775
1776 /* Link */
1777
1778 /*
1779  * General service functions
1780  */
1781
1782 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1783 {
1784         u32 lock_status;
1785         u32 resource_bit = (1 << resource);
1786         int func = BP_FUNC(bp);
1787         u32 hw_lock_control_reg;
1788         int cnt;
1789
1790         /* Validating that the resource is within range */
1791         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792                 DP(NETIF_MSG_HW,
1793                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795                 return -EINVAL;
1796         }
1797
1798         if (func <= 5) {
1799                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800         } else {
1801                 hw_lock_control_reg =
1802                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803         }
1804
1805         /* Validating that the resource is not already taken */
1806         lock_status = REG_RD(bp, hw_lock_control_reg);
1807         if (lock_status & resource_bit) {
1808                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1809                    lock_status, resource_bit);
1810                 return -EEXIST;
1811         }
1812
1813         /* Try for 5 second every 5ms */
1814         for (cnt = 0; cnt < 1000; cnt++) {
1815                 /* Try to acquire the lock */
1816                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1817                 lock_status = REG_RD(bp, hw_lock_control_reg);
1818                 if (lock_status & resource_bit)
1819                         return 0;
1820
1821                 msleep(5);
1822         }
1823         DP(NETIF_MSG_HW, "Timeout\n");
1824         return -EAGAIN;
1825 }
1826
1827 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1828 {
1829         u32 lock_status;
1830         u32 resource_bit = (1 << resource);
1831         int func = BP_FUNC(bp);
1832         u32 hw_lock_control_reg;
1833
1834         /* Validating that the resource is within range */
1835         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1836                 DP(NETIF_MSG_HW,
1837                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1838                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1839                 return -EINVAL;
1840         }
1841
1842         if (func <= 5) {
1843                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1844         } else {
1845                 hw_lock_control_reg =
1846                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1847         }
1848
1849         /* Validating that the resource is currently taken */
1850         lock_status = REG_RD(bp, hw_lock_control_reg);
1851         if (!(lock_status & resource_bit)) {
1852                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1853                    lock_status, resource_bit);
1854                 return -EFAULT;
1855         }
1856
1857         REG_WR(bp, hw_lock_control_reg, resource_bit);
1858         return 0;
1859 }
1860
1861 /* HW Lock for shared dual port PHYs */
1862 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1863 {
1864         mutex_lock(&bp->port.phy_mutex);
1865
1866         if (bp->port.need_hw_lock)
1867                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1868 }
1869
1870 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1871 {
1872         if (bp->port.need_hw_lock)
1873                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1874
1875         mutex_unlock(&bp->port.phy_mutex);
1876 }
1877
1878 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1879 {
1880         /* The GPIO should be swapped if swap register is set and active */
1881         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1882                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1883         int gpio_shift = gpio_num +
1884                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1885         u32 gpio_mask = (1 << gpio_shift);
1886         u32 gpio_reg;
1887         int value;
1888
1889         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1890                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1891                 return -EINVAL;
1892         }
1893
1894         /* read GPIO value */
1895         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1896
1897         /* get the requested pin value */
1898         if ((gpio_reg & gpio_mask) == gpio_mask)
1899                 value = 1;
1900         else
1901                 value = 0;
1902
1903         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1904
1905         return value;
1906 }
1907
1908 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1909 {
1910         /* The GPIO should be swapped if swap register is set and active */
1911         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1912                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1913         int gpio_shift = gpio_num +
1914                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1915         u32 gpio_mask = (1 << gpio_shift);
1916         u32 gpio_reg;
1917
1918         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1919                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1920                 return -EINVAL;
1921         }
1922
1923         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1924         /* read GPIO and mask except the float bits */
1925         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1926
1927         switch (mode) {
1928         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1929                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1930                    gpio_num, gpio_shift);
1931                 /* clear FLOAT and set CLR */
1932                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1934                 break;
1935
1936         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1937                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1938                    gpio_num, gpio_shift);
1939                 /* clear FLOAT and set SET */
1940                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1941                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1942                 break;
1943
1944         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1945                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1946                    gpio_num, gpio_shift);
1947                 /* set FLOAT */
1948                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1949                 break;
1950
1951         default:
1952                 break;
1953         }
1954
1955         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1956         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1957
1958         return 0;
1959 }
1960
1961 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1962 {
1963         /* The GPIO should be swapped if swap register is set and active */
1964         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1965                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1966         int gpio_shift = gpio_num +
1967                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1968         u32 gpio_mask = (1 << gpio_shift);
1969         u32 gpio_reg;
1970
1971         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1972                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1973                 return -EINVAL;
1974         }
1975
1976         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1977         /* read GPIO int */
1978         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1979
1980         switch (mode) {
1981         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1982                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1983                                    "output low\n", gpio_num, gpio_shift);
1984                 /* clear SET and set CLR */
1985                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1986                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1987                 break;
1988
1989         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1990                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1991                                    "output high\n", gpio_num, gpio_shift);
1992                 /* clear CLR and set SET */
1993                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1994                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1995                 break;
1996
1997         default:
1998                 break;
1999         }
2000
2001         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2002         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2003
2004         return 0;
2005 }
2006
2007 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2008 {
2009         u32 spio_mask = (1 << spio_num);
2010         u32 spio_reg;
2011
2012         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2013             (spio_num > MISC_REGISTERS_SPIO_7)) {
2014                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2015                 return -EINVAL;
2016         }
2017
2018         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2019         /* read SPIO and mask except the float bits */
2020         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2021
2022         switch (mode) {
2023         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2024                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2025                 /* clear FLOAT and set CLR */
2026                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2027                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2028                 break;
2029
2030         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2031                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2032                 /* clear FLOAT and set SET */
2033                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2034                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2035                 break;
2036
2037         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2038                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2039                 /* set FLOAT */
2040                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2041                 break;
2042
2043         default:
2044                 break;
2045         }
2046
2047         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2048         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2049
2050         return 0;
2051 }
2052
2053 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2054 {
2055         switch (bp->link_vars.ieee_fc &
2056                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2057         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2058                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2059                                           ADVERTISED_Pause);
2060                 break;
2061
2062         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2063                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2064                                          ADVERTISED_Pause);
2065                 break;
2066
2067         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2068                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2069                 break;
2070
2071         default:
2072                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2073                                           ADVERTISED_Pause);
2074                 break;
2075         }
2076 }
2077
2078 static void bnx2x_link_report(struct bnx2x *bp)
2079 {
2080         if (bp->link_vars.link_up) {
2081                 if (bp->state == BNX2X_STATE_OPEN)
2082                         netif_carrier_on(bp->dev);
2083                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2084
2085                 printk("%d Mbps ", bp->link_vars.line_speed);
2086
2087                 if (bp->link_vars.duplex == DUPLEX_FULL)
2088                         printk("full duplex");
2089                 else
2090                         printk("half duplex");
2091
2092                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2093                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2094                                 printk(", receive ");
2095                                 if (bp->link_vars.flow_ctrl &
2096                                     BNX2X_FLOW_CTRL_TX)
2097                                         printk("& transmit ");
2098                         } else {
2099                                 printk(", transmit ");
2100                         }
2101                         printk("flow control ON");
2102                 }
2103                 printk("\n");
2104
2105         } else { /* link_down */
2106                 netif_carrier_off(bp->dev);
2107                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2108         }
2109 }
2110
2111 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2112 {
2113         if (!BP_NOMCP(bp)) {
2114                 u8 rc;
2115
2116                 /* Initialize link parameters structure variables */
2117                 /* It is recommended to turn off RX FC for jumbo frames
2118                    for better performance */
2119                 if (IS_E1HMF(bp))
2120                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2121                 else if (bp->dev->mtu > 5000)
2122                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2123                 else
2124                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2125
2126                 bnx2x_acquire_phy_lock(bp);
2127
2128                 if (load_mode == LOAD_DIAG)
2129                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2130
2131                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2132
2133                 bnx2x_release_phy_lock(bp);
2134
2135                 bnx2x_calc_fc_adv(bp);
2136
2137                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2138                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2139                         bnx2x_link_report(bp);
2140                 }
2141
2142                 return rc;
2143         }
2144         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2145         return -EINVAL;
2146 }
2147
2148 static void bnx2x_link_set(struct bnx2x *bp)
2149 {
2150         if (!BP_NOMCP(bp)) {
2151                 bnx2x_acquire_phy_lock(bp);
2152                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2153                 bnx2x_release_phy_lock(bp);
2154
2155                 bnx2x_calc_fc_adv(bp);
2156         } else
2157                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2158 }
2159
2160 static void bnx2x__link_reset(struct bnx2x *bp)
2161 {
2162         if (!BP_NOMCP(bp)) {
2163                 bnx2x_acquire_phy_lock(bp);
2164                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2165                 bnx2x_release_phy_lock(bp);
2166         } else
2167                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2168 }
2169
2170 static u8 bnx2x_link_test(struct bnx2x *bp)
2171 {
2172         u8 rc;
2173
2174         bnx2x_acquire_phy_lock(bp);
2175         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2176         bnx2x_release_phy_lock(bp);
2177
2178         return rc;
2179 }
2180
2181 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2182 {
2183         u32 r_param = bp->link_vars.line_speed / 8;
2184         u32 fair_periodic_timeout_usec;
2185         u32 t_fair;
2186
2187         memset(&(bp->cmng.rs_vars), 0,
2188                sizeof(struct rate_shaping_vars_per_port));
2189         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2190
2191         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2192         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2193
2194         /* this is the threshold below which no timer arming will occur
2195            1.25 coefficient is for the threshold to be a little bigger
2196            than the real time, to compensate for timer in-accuracy */
2197         bp->cmng.rs_vars.rs_threshold =
2198                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2199
2200         /* resolution of fairness timer */
2201         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2202         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2203         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2204
2205         /* this is the threshold below which we won't arm the timer anymore */
2206         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2207
2208         /* we multiply by 1e3/8 to get bytes/msec.
2209            We don't want the credits to pass a credit
2210            of the t_fair*FAIR_MEM (algorithm resolution) */
2211         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2212         /* since each tick is 4 usec */
2213         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2214 }
2215
2216 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2217 {
2218         struct rate_shaping_vars_per_vn m_rs_vn;
2219         struct fairness_vars_per_vn m_fair_vn;
2220         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2221         u16 vn_min_rate, vn_max_rate;
2222         int i;
2223
2224         /* If function is hidden - set min and max to zeroes */
2225         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2226                 vn_min_rate = 0;
2227                 vn_max_rate = 0;
2228
2229         } else {
2230                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2231                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2232                 /* If fairness is enabled (not all min rates are zeroes) and
2233                    if current min rate is zero - set it to 1.
2234                    This is a requirement of the algorithm. */
2235                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2236                         vn_min_rate = DEF_MIN_RATE;
2237                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2238                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2239         }
2240
2241         DP(NETIF_MSG_IFUP,
2242            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2243            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2244
2245         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2246         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2247
2248         /* global vn counter - maximal Mbps for this vn */
2249         m_rs_vn.vn_counter.rate = vn_max_rate;
2250
2251         /* quota - number of bytes transmitted in this period */
2252         m_rs_vn.vn_counter.quota =
2253                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2254
2255         if (bp->vn_weight_sum) {
2256                 /* credit for each period of the fairness algorithm:
2257                    number of bytes in T_FAIR (the vn share the port rate).
2258                    vn_weight_sum should not be larger than 10000, thus
2259                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2260                    than zero */
2261                 m_fair_vn.vn_credit_delta =
2262                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2263                                                  (8 * bp->vn_weight_sum))),
2264                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2265                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2266                    m_fair_vn.vn_credit_delta);
2267         }
2268
2269         /* Store it to internal memory */
2270         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2271                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2272                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2273                        ((u32 *)(&m_rs_vn))[i]);
2274
2275         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2276                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2277                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2278                        ((u32 *)(&m_fair_vn))[i]);
2279 }
2280
2281
2282 /* This function is called upon link interrupt */
2283 static void bnx2x_link_attn(struct bnx2x *bp)
2284 {
2285         /* Make sure that we are synced with the current statistics */
2286         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2287
2288         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2289
2290         if (bp->link_vars.link_up) {
2291
2292                 /* dropless flow control */
2293                 if (CHIP_IS_E1H(bp)) {
2294                         int port = BP_PORT(bp);
2295                         u32 pause_enabled = 0;
2296
2297                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2298                                 pause_enabled = 1;
2299
2300                         REG_WR(bp, BAR_USTRORM_INTMEM +
2301                                USTORM_PAUSE_ENABLED_OFFSET(port),
2302                                pause_enabled);
2303                 }
2304
2305                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2306                         struct host_port_stats *pstats;
2307
2308                         pstats = bnx2x_sp(bp, port_stats);
2309                         /* reset old bmac stats */
2310                         memset(&(pstats->mac_stx[0]), 0,
2311                                sizeof(struct mac_stx));
2312                 }
2313                 if ((bp->state == BNX2X_STATE_OPEN) ||
2314                     (bp->state == BNX2X_STATE_DISABLED))
2315                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2316         }
2317
2318         /* indicate link status */
2319         bnx2x_link_report(bp);
2320
2321         if (IS_E1HMF(bp)) {
2322                 int port = BP_PORT(bp);
2323                 int func;
2324                 int vn;
2325
2326                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2327                         if (vn == BP_E1HVN(bp))
2328                                 continue;
2329
2330                         func = ((vn << 1) | port);
2331
2332                         /* Set the attention towards other drivers
2333                            on the same port */
2334                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2335                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2336                 }
2337
2338                 if (bp->link_vars.link_up) {
2339                         int i;
2340
2341                         /* Init rate shaping and fairness contexts */
2342                         bnx2x_init_port_minmax(bp);
2343
2344                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2345                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2346
2347                         /* Store it to internal memory */
2348                         for (i = 0;
2349                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2350                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2351                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2352                                        ((u32 *)(&bp->cmng))[i]);
2353                 }
2354         }
2355 }
2356
2357 static void bnx2x__link_status_update(struct bnx2x *bp)
2358 {
2359         if (bp->state != BNX2X_STATE_OPEN)
2360                 return;
2361
2362         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2363
2364         if (bp->link_vars.link_up)
2365                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2366         else
2367                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2368
2369         /* indicate link status */
2370         bnx2x_link_report(bp);
2371 }
2372
2373 static void bnx2x_pmf_update(struct bnx2x *bp)
2374 {
2375         int port = BP_PORT(bp);
2376         u32 val;
2377
2378         bp->port.pmf = 1;
2379         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2380
2381         /* enable nig attention */
2382         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2383         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2384         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2385
2386         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2387 }
2388
2389 /* end of Link */
2390
2391 /* slow path */
2392
2393 /*
2394  * General service functions
2395  */
2396
2397 /* the slow path queue is odd since completions arrive on the fastpath ring */
2398 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2399                          u32 data_hi, u32 data_lo, int common)
2400 {
2401         int func = BP_FUNC(bp);
2402
2403         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2404            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2405            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2406            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2407            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2408
2409 #ifdef BNX2X_STOP_ON_ERROR
2410         if (unlikely(bp->panic))
2411                 return -EIO;
2412 #endif
2413
2414         spin_lock_bh(&bp->spq_lock);
2415
2416         if (!bp->spq_left) {
2417                 BNX2X_ERR("BUG! SPQ ring full!\n");
2418                 spin_unlock_bh(&bp->spq_lock);
2419                 bnx2x_panic();
2420                 return -EBUSY;
2421         }
2422
2423         /* CID needs port number to be encoded int it */
2424         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2425                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2426                                      HW_CID(bp, cid)));
2427         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2428         if (common)
2429                 bp->spq_prod_bd->hdr.type |=
2430                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2431
2432         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2433         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2434
2435         bp->spq_left--;
2436
2437         if (bp->spq_prod_bd == bp->spq_last_bd) {
2438                 bp->spq_prod_bd = bp->spq;
2439                 bp->spq_prod_idx = 0;
2440                 DP(NETIF_MSG_TIMER, "end of spq\n");
2441
2442         } else {
2443                 bp->spq_prod_bd++;
2444                 bp->spq_prod_idx++;
2445         }
2446
2447         /* Make sure that BD data is updated before writing the producer */
2448         wmb();
2449
2450         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2451                bp->spq_prod_idx);
2452
2453         mmiowb();
2454
2455         spin_unlock_bh(&bp->spq_lock);
2456         return 0;
2457 }
2458
2459 /* acquire split MCP access lock register */
2460 static int bnx2x_acquire_alr(struct bnx2x *bp)
2461 {
2462         u32 i, j, val;
2463         int rc = 0;
2464
2465         might_sleep();
2466         i = 100;
2467         for (j = 0; j < i*10; j++) {
2468                 val = (1UL << 31);
2469                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2471                 if (val & (1L << 31))
2472                         break;
2473
2474                 msleep(5);
2475         }
2476         if (!(val & (1L << 31))) {
2477                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2478                 rc = -EBUSY;
2479         }
2480
2481         return rc;
2482 }
2483
2484 /* release split MCP access lock register */
2485 static void bnx2x_release_alr(struct bnx2x *bp)
2486 {
2487         u32 val = 0;
2488
2489         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2490 }
2491
2492 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2493 {
2494         struct host_def_status_block *def_sb = bp->def_status_blk;
2495         u16 rc = 0;
2496
2497         barrier(); /* status block is written to by the chip */
2498         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2499                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2500                 rc |= 1;
2501         }
2502         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2503                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2504                 rc |= 2;
2505         }
2506         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2507                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2508                 rc |= 4;
2509         }
2510         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2511                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2512                 rc |= 8;
2513         }
2514         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2515                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2516                 rc |= 16;
2517         }
2518         return rc;
2519 }
2520
2521 /*
2522  * slow path service functions
2523  */
2524
2525 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2526 {
2527         int port = BP_PORT(bp);
2528         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2529                        COMMAND_REG_ATTN_BITS_SET);
2530         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2531                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2532         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2533                                        NIG_REG_MASK_INTERRUPT_PORT0;
2534         u32 aeu_mask;
2535         u32 nig_mask = 0;
2536
2537         if (bp->attn_state & asserted)
2538                 BNX2X_ERR("IGU ERROR\n");
2539
2540         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2541         aeu_mask = REG_RD(bp, aeu_addr);
2542
2543         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2544            aeu_mask, asserted);
2545         aeu_mask &= ~(asserted & 0xff);
2546         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2547
2548         REG_WR(bp, aeu_addr, aeu_mask);
2549         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2550
2551         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2552         bp->attn_state |= asserted;
2553         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2554
2555         if (asserted & ATTN_HARD_WIRED_MASK) {
2556                 if (asserted & ATTN_NIG_FOR_FUNC) {
2557
2558                         bnx2x_acquire_phy_lock(bp);
2559
2560                         /* save nig interrupt mask */
2561                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2562                         REG_WR(bp, nig_int_mask_addr, 0);
2563
2564                         bnx2x_link_attn(bp);
2565
2566                         /* handle unicore attn? */
2567                 }
2568                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2569                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2570
2571                 if (asserted & GPIO_2_FUNC)
2572                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2573
2574                 if (asserted & GPIO_3_FUNC)
2575                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2576
2577                 if (asserted & GPIO_4_FUNC)
2578                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2579
2580                 if (port == 0) {
2581                         if (asserted & ATTN_GENERAL_ATTN_1) {
2582                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2583                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2584                         }
2585                         if (asserted & ATTN_GENERAL_ATTN_2) {
2586                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2587                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2588                         }
2589                         if (asserted & ATTN_GENERAL_ATTN_3) {
2590                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2591                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2592                         }
2593                 } else {
2594                         if (asserted & ATTN_GENERAL_ATTN_4) {
2595                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2596                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2597                         }
2598                         if (asserted & ATTN_GENERAL_ATTN_5) {
2599                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2600                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2601                         }
2602                         if (asserted & ATTN_GENERAL_ATTN_6) {
2603                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2604                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2605                         }
2606                 }
2607
2608         } /* if hardwired */
2609
2610         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2611            asserted, hc_addr);
2612         REG_WR(bp, hc_addr, asserted);
2613
2614         /* now set back the mask */
2615         if (asserted & ATTN_NIG_FOR_FUNC) {
2616                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2617                 bnx2x_release_phy_lock(bp);
2618         }
2619 }
2620
2621 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         /* mark the failure */
2626         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2627         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2628         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2629                  bp->link_params.ext_phy_config);
2630
2631         /* log the failure */
2632         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2633                " the driver to shutdown the card to prevent permanent"
2634                " damage.  Please contact Dell Support for assistance\n",
2635                bp->dev->name);
2636 }
2637 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2638 {
2639         int port = BP_PORT(bp);
2640         int reg_offset;
2641         u32 val, swap_val, swap_override;
2642
2643         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2644                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2645
2646         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2647
2648                 val = REG_RD(bp, reg_offset);
2649                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2650                 REG_WR(bp, reg_offset, val);
2651
2652                 BNX2X_ERR("SPIO5 hw attention\n");
2653
2654                 /* Fan failure attention */
2655                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2656                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2657                         /* Low power mode is controlled by GPIO 2 */
2658                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2659                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2660                         /* The PHY reset is controlled by GPIO 1 */
2661                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2662                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2663                         break;
2664
2665                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2666                         /* The PHY reset is controlled by GPIO 1 */
2667                         /* fake the port number to cancel the swap done in
2668                            set_gpio() */
2669                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2670                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2671                         port = (swap_val && swap_override) ^ 1;
2672                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2673                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2674                         break;
2675
2676                 default:
2677                         break;
2678                 }
2679                 bnx2x_fan_failure(bp);
2680         }
2681
2682         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2683                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2684                 bnx2x_acquire_phy_lock(bp);
2685                 bnx2x_handle_module_detect_int(&bp->link_params);
2686                 bnx2x_release_phy_lock(bp);
2687         }
2688
2689         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2690
2691                 val = REG_RD(bp, reg_offset);
2692                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2693                 REG_WR(bp, reg_offset, val);
2694
2695                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2696                           (attn & HW_INTERRUT_ASSERT_SET_0));
2697                 bnx2x_panic();
2698         }
2699 }
2700
2701 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2702 {
2703         u32 val;
2704
2705         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2706
2707                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2708                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2709                 /* DORQ discard attention */
2710                 if (val & 0x2)
2711                         BNX2X_ERR("FATAL error from DORQ\n");
2712         }
2713
2714         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2715
2716                 int port = BP_PORT(bp);
2717                 int reg_offset;
2718
2719                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2720                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2721
2722                 val = REG_RD(bp, reg_offset);
2723                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2724                 REG_WR(bp, reg_offset, val);
2725
2726                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2727                           (attn & HW_INTERRUT_ASSERT_SET_1));
2728                 bnx2x_panic();
2729         }
2730 }
2731
2732 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2733 {
2734         u32 val;
2735
2736         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2737
2738                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2739                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2740                 /* CFC error attention */
2741                 if (val & 0x2)
2742                         BNX2X_ERR("FATAL error from CFC\n");
2743         }
2744
2745         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2746
2747                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2748                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2749                 /* RQ_USDMDP_FIFO_OVERFLOW */
2750                 if (val & 0x18000)
2751                         BNX2X_ERR("FATAL error from PXP\n");
2752         }
2753
2754         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2755
2756                 int port = BP_PORT(bp);
2757                 int reg_offset;
2758
2759                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2760                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2761
2762                 val = REG_RD(bp, reg_offset);
2763                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2764                 REG_WR(bp, reg_offset, val);
2765
2766                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2767                           (attn & HW_INTERRUT_ASSERT_SET_2));
2768                 bnx2x_panic();
2769         }
2770 }
2771
2772 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2773 {
2774         u32 val;
2775
2776         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2777
2778                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2779                         int func = BP_FUNC(bp);
2780
2781                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2782                         bnx2x__link_status_update(bp);
2783                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2784                                                         DRV_STATUS_PMF)
2785                                 bnx2x_pmf_update(bp);
2786
2787                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2788
2789                         BNX2X_ERR("MC assert!\n");
2790                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2791                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2792                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2793                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2794                         bnx2x_panic();
2795
2796                 } else if (attn & BNX2X_MCP_ASSERT) {
2797
2798                         BNX2X_ERR("MCP assert!\n");
2799                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2800                         bnx2x_fw_dump(bp);
2801
2802                 } else
2803                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2804         }
2805
2806         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2807                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2808                 if (attn & BNX2X_GRC_TIMEOUT) {
2809                         val = CHIP_IS_E1H(bp) ?
2810                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2811                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2812                 }
2813                 if (attn & BNX2X_GRC_RSV) {
2814                         val = CHIP_IS_E1H(bp) ?
2815                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2816                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2817                 }
2818                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2819         }
2820 }
2821
2822 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2823 {
2824         struct attn_route attn;
2825         struct attn_route group_mask;
2826         int port = BP_PORT(bp);
2827         int index;
2828         u32 reg_addr;
2829         u32 val;
2830         u32 aeu_mask;
2831
2832         /* need to take HW lock because MCP or other port might also
2833            try to handle this event */
2834         bnx2x_acquire_alr(bp);
2835
2836         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2837         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2838         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2839         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2840         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2841            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2842
2843         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2844                 if (deasserted & (1 << index)) {
2845                         group_mask = bp->attn_group[index];
2846
2847                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2848                            index, group_mask.sig[0], group_mask.sig[1],
2849                            group_mask.sig[2], group_mask.sig[3]);
2850
2851                         bnx2x_attn_int_deasserted3(bp,
2852                                         attn.sig[3] & group_mask.sig[3]);
2853                         bnx2x_attn_int_deasserted1(bp,
2854                                         attn.sig[1] & group_mask.sig[1]);
2855                         bnx2x_attn_int_deasserted2(bp,
2856                                         attn.sig[2] & group_mask.sig[2]);
2857                         bnx2x_attn_int_deasserted0(bp,
2858                                         attn.sig[0] & group_mask.sig[0]);
2859
2860                         if ((attn.sig[0] & group_mask.sig[0] &
2861                                                 HW_PRTY_ASSERT_SET_0) ||
2862                             (attn.sig[1] & group_mask.sig[1] &
2863                                                 HW_PRTY_ASSERT_SET_1) ||
2864                             (attn.sig[2] & group_mask.sig[2] &
2865                                                 HW_PRTY_ASSERT_SET_2))
2866                                 BNX2X_ERR("FATAL HW block parity attention\n");
2867                 }
2868         }
2869
2870         bnx2x_release_alr(bp);
2871
2872         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2873
2874         val = ~deasserted;
2875         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2876            val, reg_addr);
2877         REG_WR(bp, reg_addr, val);
2878
2879         if (~bp->attn_state & deasserted)
2880                 BNX2X_ERR("IGU ERROR\n");
2881
2882         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2883                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2884
2885         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2886         aeu_mask = REG_RD(bp, reg_addr);
2887
2888         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2889            aeu_mask, deasserted);
2890         aeu_mask |= (deasserted & 0xff);
2891         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2892
2893         REG_WR(bp, reg_addr, aeu_mask);
2894         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2895
2896         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2897         bp->attn_state &= ~deasserted;
2898         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2899 }
2900
2901 static void bnx2x_attn_int(struct bnx2x *bp)
2902 {
2903         /* read local copy of bits */
2904         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2905                                                                 attn_bits);
2906         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2907                                                                 attn_bits_ack);
2908         u32 attn_state = bp->attn_state;
2909
2910         /* look for changed bits */
2911         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2912         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2913
2914         DP(NETIF_MSG_HW,
2915            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2916            attn_bits, attn_ack, asserted, deasserted);
2917
2918         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2919                 BNX2X_ERR("BAD attention state\n");
2920
2921         /* handle bits that were raised */
2922         if (asserted)
2923                 bnx2x_attn_int_asserted(bp, asserted);
2924
2925         if (deasserted)
2926                 bnx2x_attn_int_deasserted(bp, deasserted);
2927 }
2928
2929 static void bnx2x_sp_task(struct work_struct *work)
2930 {
2931         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2932         u16 status;
2933
2934
2935         /* Return here if interrupt is disabled */
2936         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2937                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2938                 return;
2939         }
2940
2941         status = bnx2x_update_dsb_idx(bp);
2942 /*      if (status == 0)                                     */
2943 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2944
2945         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2946
2947         /* HW attentions */
2948         if (status & 0x1)
2949                 bnx2x_attn_int(bp);
2950
2951         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2952                      IGU_INT_NOP, 1);
2953         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2954                      IGU_INT_NOP, 1);
2955         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2956                      IGU_INT_NOP, 1);
2957         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2958                      IGU_INT_NOP, 1);
2959         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2960                      IGU_INT_ENABLE, 1);
2961
2962 }
2963
2964 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2965 {
2966         struct net_device *dev = dev_instance;
2967         struct bnx2x *bp = netdev_priv(dev);
2968
2969         /* Return here if interrupt is disabled */
2970         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2971                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2972                 return IRQ_HANDLED;
2973         }
2974
2975         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2976
2977 #ifdef BNX2X_STOP_ON_ERROR
2978         if (unlikely(bp->panic))
2979                 return IRQ_HANDLED;
2980 #endif
2981
2982         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2983
2984         return IRQ_HANDLED;
2985 }
2986
2987 /* end of slow path */
2988
2989 /* Statistics */
2990
2991 /****************************************************************************
2992 * Macros
2993 ****************************************************************************/
2994
2995 /* sum[hi:lo] += add[hi:lo] */
2996 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2997         do { \
2998                 s_lo += a_lo; \
2999                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3000         } while (0)
3001
3002 /* difference = minuend - subtrahend */
3003 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3004         do { \
3005                 if (m_lo < s_lo) { \
3006                         /* underflow */ \
3007                         d_hi = m_hi - s_hi; \
3008                         if (d_hi > 0) { \
3009                                 /* we can 'loan' 1 */ \
3010                                 d_hi--; \
3011                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3012                         } else { \
3013                                 /* m_hi <= s_hi */ \
3014                                 d_hi = 0; \
3015                                 d_lo = 0; \
3016                         } \
3017                 } else { \
3018                         /* m_lo >= s_lo */ \
3019                         if (m_hi < s_hi) { \
3020                                 d_hi = 0; \
3021                                 d_lo = 0; \
3022                         } else { \
3023                                 /* m_hi >= s_hi */ \
3024                                 d_hi = m_hi - s_hi; \
3025                                 d_lo = m_lo - s_lo; \
3026                         } \
3027                 } \
3028         } while (0)
3029
3030 #define UPDATE_STAT64(s, t) \
3031         do { \
3032                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3033                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3034                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3035                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3036                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3037                        pstats->mac_stx[1].t##_lo, diff.lo); \
3038         } while (0)
3039
3040 #define UPDATE_STAT64_NIG(s, t) \
3041         do { \
3042                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3043                         diff.lo, new->s##_lo, old->s##_lo); \
3044                 ADD_64(estats->t##_hi, diff.hi, \
3045                        estats->t##_lo, diff.lo); \
3046         } while (0)
3047
3048 /* sum[hi:lo] += add */
3049 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3050         do { \
3051                 s_lo += a; \
3052                 s_hi += (s_lo < a) ? 1 : 0; \
3053         } while (0)
3054
3055 #define UPDATE_EXTEND_STAT(s) \
3056         do { \
3057                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3058                               pstats->mac_stx[1].s##_lo, \
3059                               new->s); \
3060         } while (0)
3061
3062 #define UPDATE_EXTEND_TSTAT(s, t) \
3063         do { \
3064                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3065                 old_tclient->s = tclient->s; \
3066                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067         } while (0)
3068
3069 #define UPDATE_EXTEND_USTAT(s, t) \
3070         do { \
3071                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3072                 old_uclient->s = uclient->s; \
3073                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3074         } while (0)
3075
3076 #define UPDATE_EXTEND_XSTAT(s, t) \
3077         do { \
3078                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3079                 old_xclient->s = xclient->s; \
3080                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3081         } while (0)
3082
3083 /* minuend -= subtrahend */
3084 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3085         do { \
3086                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3087         } while (0)
3088
3089 /* minuend[hi:lo] -= subtrahend */
3090 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3091         do { \
3092                 SUB_64(m_hi, 0, m_lo, s); \
3093         } while (0)
3094
3095 #define SUB_EXTEND_USTAT(s, t) \
3096         do { \
3097                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3098                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3099         } while (0)
3100
3101 /*
3102  * General service functions
3103  */
3104
3105 static inline long bnx2x_hilo(u32 *hiref)
3106 {
3107         u32 lo = *(hiref + 1);
3108 #if (BITS_PER_LONG == 64)
3109         u32 hi = *hiref;
3110
3111         return HILO_U64(hi, lo);
3112 #else
3113         return lo;
3114 #endif
3115 }
3116
3117 /*
3118  * Init service functions
3119  */
3120
3121 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3122 {
3123         if (!bp->stats_pending) {
3124                 struct eth_query_ramrod_data ramrod_data = {0};
3125                 int i, rc;
3126
3127                 ramrod_data.drv_counter = bp->stats_counter++;
3128                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3129                 for_each_queue(bp, i)
3130                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3131
3132                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3133                                    ((u32 *)&ramrod_data)[1],
3134                                    ((u32 *)&ramrod_data)[0], 0);
3135                 if (rc == 0) {
3136                         /* stats ramrod has it's own slot on the spq */
3137                         bp->spq_left++;
3138                         bp->stats_pending = 1;
3139                 }
3140         }
3141 }
3142
3143 static void bnx2x_stats_init(struct bnx2x *bp)
3144 {
3145         int port = BP_PORT(bp);
3146         int i;
3147
3148         bp->stats_pending = 0;
3149         bp->executer_idx = 0;
3150         bp->stats_counter = 0;
3151
3152         /* port stats */
3153         if (!BP_NOMCP(bp))
3154                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3155         else
3156                 bp->port.port_stx = 0;
3157         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3158
3159         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3160         bp->port.old_nig_stats.brb_discard =
3161                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3162         bp->port.old_nig_stats.brb_truncate =
3163                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3164         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3165                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3166         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3167                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3168
3169         /* function stats */
3170         for_each_queue(bp, i) {
3171                 struct bnx2x_fastpath *fp = &bp->fp[i];
3172
3173                 memset(&fp->old_tclient, 0,
3174                        sizeof(struct tstorm_per_client_stats));
3175                 memset(&fp->old_uclient, 0,
3176                        sizeof(struct ustorm_per_client_stats));
3177                 memset(&fp->old_xclient, 0,
3178                        sizeof(struct xstorm_per_client_stats));
3179                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3180         }
3181
3182         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3183         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3184
3185         bp->stats_state = STATS_STATE_DISABLED;
3186         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3187                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3188 }
3189
3190 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3191 {
3192         struct dmae_command *dmae = &bp->stats_dmae;
3193         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3194
3195         *stats_comp = DMAE_COMP_VAL;
3196         if (CHIP_REV_IS_SLOW(bp))
3197                 return;
3198
3199         /* loader */
3200         if (bp->executer_idx) {
3201                 int loader_idx = PMF_DMAE_C(bp);
3202
3203                 memset(dmae, 0, sizeof(struct dmae_command));
3204
3205                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3206                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3207                                 DMAE_CMD_DST_RESET |
3208 #ifdef __BIG_ENDIAN
3209                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3210 #else
3211                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3212 #endif
3213                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3214                                                DMAE_CMD_PORT_0) |
3215                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3216                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3217                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3218                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3219                                      sizeof(struct dmae_command) *
3220                                      (loader_idx + 1)) >> 2;
3221                 dmae->dst_addr_hi = 0;
3222                 dmae->len = sizeof(struct dmae_command) >> 2;
3223                 if (CHIP_IS_E1(bp))
3224                         dmae->len--;
3225                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3226                 dmae->comp_addr_hi = 0;
3227                 dmae->comp_val = 1;
3228
3229                 *stats_comp = 0;
3230                 bnx2x_post_dmae(bp, dmae, loader_idx);
3231
3232         } else if (bp->func_stx) {
3233                 *stats_comp = 0;
3234                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3235         }
3236 }
3237
3238 static int bnx2x_stats_comp(struct bnx2x *bp)
3239 {
3240         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3241         int cnt = 10;
3242
3243         might_sleep();
3244         while (*stats_comp != DMAE_COMP_VAL) {
3245                 if (!cnt) {
3246                         BNX2X_ERR("timeout waiting for stats finished\n");
3247                         break;
3248                 }
3249                 cnt--;
3250                 msleep(1);
3251         }
3252         return 1;
3253 }
3254
3255 /*
3256  * Statistics service functions
3257  */
3258
3259 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3260 {
3261         struct dmae_command *dmae;
3262         u32 opcode;
3263         int loader_idx = PMF_DMAE_C(bp);
3264         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3265
3266         /* sanity */
3267         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3268                 BNX2X_ERR("BUG!\n");
3269                 return;
3270         }
3271
3272         bp->executer_idx = 0;
3273
3274         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3275                   DMAE_CMD_C_ENABLE |
3276                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3277 #ifdef __BIG_ENDIAN
3278                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3279 #else
3280                   DMAE_CMD_ENDIANITY_DW_SWAP |
3281 #endif
3282                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3283                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3284
3285         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3287         dmae->src_addr_lo = bp->port.port_stx >> 2;
3288         dmae->src_addr_hi = 0;
3289         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3290         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3291         dmae->len = DMAE_LEN32_RD_MAX;
3292         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293         dmae->comp_addr_hi = 0;
3294         dmae->comp_val = 1;
3295
3296         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3298         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3299         dmae->src_addr_hi = 0;
3300         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3301                                    DMAE_LEN32_RD_MAX * 4);
3302         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3303                                    DMAE_LEN32_RD_MAX * 4);
3304         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3305         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3306         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3307         dmae->comp_val = DMAE_COMP_VAL;
3308
3309         *stats_comp = 0;
3310         bnx2x_hw_stats_post(bp);
3311         bnx2x_stats_comp(bp);
3312 }
3313
3314 static void bnx2x_port_stats_init(struct bnx2x *bp)
3315 {
3316         struct dmae_command *dmae;
3317         int port = BP_PORT(bp);
3318         int vn = BP_E1HVN(bp);
3319         u32 opcode;
3320         int loader_idx = PMF_DMAE_C(bp);
3321         u32 mac_addr;
3322         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3323
3324         /* sanity */
3325         if (!bp->link_vars.link_up || !bp->port.pmf) {
3326                 BNX2X_ERR("BUG!\n");
3327                 return;
3328         }
3329
3330         bp->executer_idx = 0;
3331
3332         /* MCP */
3333         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3334                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3336 #ifdef __BIG_ENDIAN
3337                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3338 #else
3339                   DMAE_CMD_ENDIANITY_DW_SWAP |
3340 #endif
3341                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342                   (vn << DMAE_CMD_E1HVN_SHIFT));
3343
3344         if (bp->port.port_stx) {
3345
3346                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347                 dmae->opcode = opcode;
3348                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3349                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3350                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3351                 dmae->dst_addr_hi = 0;
3352                 dmae->len = sizeof(struct host_port_stats) >> 2;
3353                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3354                 dmae->comp_addr_hi = 0;
3355                 dmae->comp_val = 1;
3356         }
3357
3358         if (bp->func_stx) {
3359
3360                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361                 dmae->opcode = opcode;
3362                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3363                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3364                 dmae->dst_addr_lo = bp->func_stx >> 2;
3365                 dmae->dst_addr_hi = 0;
3366                 dmae->len = sizeof(struct host_func_stats) >> 2;
3367                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368                 dmae->comp_addr_hi = 0;
3369                 dmae->comp_val = 1;
3370         }
3371
3372         /* MAC */
3373         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3374                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3375                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3376 #ifdef __BIG_ENDIAN
3377                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3378 #else
3379                   DMAE_CMD_ENDIANITY_DW_SWAP |
3380 #endif
3381                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3382                   (vn << DMAE_CMD_E1HVN_SHIFT));
3383
3384         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3385
3386                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3387                                    NIG_REG_INGRESS_BMAC0_MEM);
3388
3389                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3390                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3391                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392                 dmae->opcode = opcode;
3393                 dmae->src_addr_lo = (mac_addr +
3394                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3395                 dmae->src_addr_hi = 0;
3396                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3397                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3398                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3399                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3400                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3401                 dmae->comp_addr_hi = 0;
3402                 dmae->comp_val = 1;
3403
3404                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3405                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3406                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3407                 dmae->opcode = opcode;
3408                 dmae->src_addr_lo = (mac_addr +
3409                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3410                 dmae->src_addr_hi = 0;
3411                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3412                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3413                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3414                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3415                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3416                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3417                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418                 dmae->comp_addr_hi = 0;
3419                 dmae->comp_val = 1;
3420
3421         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3422
3423                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3424
3425                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3426                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3427                 dmae->opcode = opcode;
3428                 dmae->src_addr_lo = (mac_addr +
3429                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3430                 dmae->src_addr_hi = 0;
3431                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3432                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3433                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3434                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3435                 dmae->comp_addr_hi = 0;
3436                 dmae->comp_val = 1;
3437
3438                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3439                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440                 dmae->opcode = opcode;
3441                 dmae->src_addr_lo = (mac_addr +
3442                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3443                 dmae->src_addr_hi = 0;
3444                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3445                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3446                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3447                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3448                 dmae->len = 1;
3449                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450                 dmae->comp_addr_hi = 0;
3451                 dmae->comp_val = 1;
3452
3453                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3454                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3455                 dmae->opcode = opcode;
3456                 dmae->src_addr_lo = (mac_addr +
3457                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3458                 dmae->src_addr_hi = 0;
3459                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3460                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3461                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3462                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3463                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3464                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3465                 dmae->comp_addr_hi = 0;
3466                 dmae->comp_val = 1;
3467         }
3468
3469         /* NIG */
3470         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471         dmae->opcode = opcode;
3472         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3473                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3474         dmae->src_addr_hi = 0;
3475         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3476         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3477         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3478         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3479         dmae->comp_addr_hi = 0;
3480         dmae->comp_val = 1;
3481
3482         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3483         dmae->opcode = opcode;
3484         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3485                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3486         dmae->src_addr_hi = 0;
3487         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3488                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3489         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3490                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3491         dmae->len = (2*sizeof(u32)) >> 2;
3492         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3493         dmae->comp_addr_hi = 0;
3494         dmae->comp_val = 1;
3495
3496         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3497         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3498                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500 #ifdef __BIG_ENDIAN
3501                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502 #else
3503                         DMAE_CMD_ENDIANITY_DW_SWAP |
3504 #endif
3505                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506                         (vn << DMAE_CMD_E1HVN_SHIFT));
3507         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3508                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3509         dmae->src_addr_hi = 0;
3510         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3511                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3512         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3513                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3514         dmae->len = (2*sizeof(u32)) >> 2;
3515         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3516         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3517         dmae->comp_val = DMAE_COMP_VAL;
3518
3519         *stats_comp = 0;
3520 }
3521
3522 static void bnx2x_func_stats_init(struct bnx2x *bp)
3523 {
3524         struct dmae_command *dmae = &bp->stats_dmae;
3525         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3526
3527         /* sanity */
3528         if (!bp->func_stx) {
3529                 BNX2X_ERR("BUG!\n");
3530                 return;
3531         }
3532
3533         bp->executer_idx = 0;
3534         memset(dmae, 0, sizeof(struct dmae_command));
3535
3536         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3537                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3538                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3539 #ifdef __BIG_ENDIAN
3540                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3541 #else
3542                         DMAE_CMD_ENDIANITY_DW_SWAP |
3543 #endif
3544                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3546         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3547         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3548         dmae->dst_addr_lo = bp->func_stx >> 2;
3549         dmae->dst_addr_hi = 0;
3550         dmae->len = sizeof(struct host_func_stats) >> 2;
3551         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3552         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3553         dmae->comp_val = DMAE_COMP_VAL;
3554
3555         *stats_comp = 0;
3556 }
3557
3558 static void bnx2x_stats_start(struct bnx2x *bp)
3559 {
3560         if (bp->port.pmf)
3561                 bnx2x_port_stats_init(bp);
3562
3563         else if (bp->func_stx)
3564                 bnx2x_func_stats_init(bp);
3565
3566         bnx2x_hw_stats_post(bp);
3567         bnx2x_storm_stats_post(bp);
3568 }
3569
3570 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3571 {
3572         bnx2x_stats_comp(bp);
3573         bnx2x_stats_pmf_update(bp);
3574         bnx2x_stats_start(bp);
3575 }
3576
3577 static void bnx2x_stats_restart(struct bnx2x *bp)
3578 {
3579         bnx2x_stats_comp(bp);
3580         bnx2x_stats_start(bp);
3581 }
3582
3583 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3584 {
3585         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3586         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3587         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3588         struct {
3589                 u32 lo;
3590                 u32 hi;
3591         } diff;
3592
3593         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3594         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3595         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3596         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3597         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3598         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3599         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3600         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3601         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3602         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3603         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3604         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3605         UPDATE_STAT64(tx_stat_gt127,
3606                                 tx_stat_etherstatspkts65octetsto127octets);
3607         UPDATE_STAT64(tx_stat_gt255,
3608                                 tx_stat_etherstatspkts128octetsto255octets);
3609         UPDATE_STAT64(tx_stat_gt511,
3610                                 tx_stat_etherstatspkts256octetsto511octets);
3611         UPDATE_STAT64(tx_stat_gt1023,
3612                                 tx_stat_etherstatspkts512octetsto1023octets);
3613         UPDATE_STAT64(tx_stat_gt1518,
3614                                 tx_stat_etherstatspkts1024octetsto1522octets);
3615         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3616         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3617         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3618         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3619         UPDATE_STAT64(tx_stat_gterr,
3620                                 tx_stat_dot3statsinternalmactransmiterrors);
3621         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3622
3623         estats->pause_frames_received_hi =
3624                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3625         estats->pause_frames_received_lo =
3626                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3627
3628         estats->pause_frames_sent_hi =
3629                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3630         estats->pause_frames_sent_lo =
3631                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3632 }
3633
3634 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3635 {
3636         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3637         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3638         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3639
3640         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3641         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3642         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3643         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3644         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3645         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3646         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3647         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3648         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3649         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3650         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3651         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3652         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3653         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3654         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3655         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3656         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3657         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3658         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3659         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3660         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3661         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3662         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3663         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3664         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3665         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3666         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3667         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3668         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3669         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3670         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3671
3672         estats->pause_frames_received_hi =
3673                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3674         estats->pause_frames_received_lo =
3675                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3676         ADD_64(estats->pause_frames_received_hi,
3677                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3678                estats->pause_frames_received_lo,
3679                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3680
3681         estats->pause_frames_sent_hi =
3682                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3683         estats->pause_frames_sent_lo =
3684                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3685         ADD_64(estats->pause_frames_sent_hi,
3686                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3687                estats->pause_frames_sent_lo,
3688                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3689 }
3690
3691 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3692 {
3693         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3694         struct nig_stats *old = &(bp->port.old_nig_stats);
3695         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3696         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3697         struct {
3698                 u32 lo;
3699                 u32 hi;
3700         } diff;
3701         u32 nig_timer_max;
3702
3703         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3704                 bnx2x_bmac_stats_update(bp);
3705
3706         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3707                 bnx2x_emac_stats_update(bp);
3708
3709         else { /* unreached */
3710                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3711                 return -1;
3712         }
3713
3714         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3715                       new->brb_discard - old->brb_discard);
3716         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3717                       new->brb_truncate - old->brb_truncate);
3718
3719         UPDATE_STAT64_NIG(egress_mac_pkt0,
3720                                         etherstatspkts1024octetsto1522octets);
3721         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3722
3723         memcpy(old, new, sizeof(struct nig_stats));
3724
3725         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3726                sizeof(struct mac_stx));
3727         estats->brb_drop_hi = pstats->brb_drop_hi;
3728         estats->brb_drop_lo = pstats->brb_drop_lo;
3729
3730         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3731
3732         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3733         if (nig_timer_max != estats->nig_timer_max) {
3734                 estats->nig_timer_max = nig_timer_max;
3735                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3736         }
3737
3738         return 0;
3739 }
3740
3741 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3742 {
3743         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3744         struct tstorm_per_port_stats *tport =
3745                                         &stats->tstorm_common.port_statistics;
3746         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3747         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3748         int i;
3749
3750         memset(&(fstats->total_bytes_received_hi), 0,
3751                sizeof(struct host_func_stats) - 2*sizeof(u32));
3752         estats->error_bytes_received_hi = 0;
3753         estats->error_bytes_received_lo = 0;
3754         estats->etherstatsoverrsizepkts_hi = 0;
3755         estats->etherstatsoverrsizepkts_lo = 0;
3756         estats->no_buff_discard_hi = 0;
3757         estats->no_buff_discard_lo = 0;
3758
3759         for_each_queue(bp, i) {
3760                 struct bnx2x_fastpath *fp = &bp->fp[i];
3761                 int cl_id = fp->cl_id;
3762                 struct tstorm_per_client_stats *tclient =
3763                                 &stats->tstorm_common.client_statistics[cl_id];
3764                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3765                 struct ustorm_per_client_stats *uclient =
3766                                 &stats->ustorm_common.client_statistics[cl_id];
3767                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3768                 struct xstorm_per_client_stats *xclient =
3769                                 &stats->xstorm_common.client_statistics[cl_id];
3770                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3771                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3772                 u32 diff;
3773
3774                 /* are storm stats valid? */
3775                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3776                                                         bp->stats_counter) {
3777                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3778                            "  xstorm counter (%d) != stats_counter (%d)\n",
3779                            i, xclient->stats_counter, bp->stats_counter);
3780                         return -1;
3781                 }
3782                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3783                                                         bp->stats_counter) {
3784                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3785                            "  tstorm counter (%d) != stats_counter (%d)\n",
3786                            i, tclient->stats_counter, bp->stats_counter);
3787                         return -2;
3788                 }
3789                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3790                                                         bp->stats_counter) {
3791                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3792                            "  ustorm counter (%d) != stats_counter (%d)\n",
3793                            i, uclient->stats_counter, bp->stats_counter);
3794                         return -4;
3795                 }
3796
3797                 qstats->total_bytes_received_hi =
3798                 qstats->valid_bytes_received_hi =
3799                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3800                 qstats->total_bytes_received_lo =
3801                 qstats->valid_bytes_received_lo =
3802                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3803
3804                 qstats->error_bytes_received_hi =
3805                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3806                 qstats->error_bytes_received_lo =
3807                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3808
3809                 ADD_64(qstats->total_bytes_received_hi,
3810                        qstats->error_bytes_received_hi,
3811                        qstats->total_bytes_received_lo,
3812                        qstats->error_bytes_received_lo);
3813
3814                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3815                                         total_unicast_packets_received);
3816                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3817                                         total_multicast_packets_received);
3818                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3819                                         total_broadcast_packets_received);
3820                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3821                                         etherstatsoverrsizepkts);
3822                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3823
3824                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3825                                         total_unicast_packets_received);
3826                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3827                                         total_multicast_packets_received);
3828                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3829                                         total_broadcast_packets_received);
3830                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3831                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3832                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3833
3834                 qstats->total_bytes_transmitted_hi =
3835                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3836                 qstats->total_bytes_transmitted_lo =
3837                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3838
3839                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3840                                         total_unicast_packets_transmitted);
3841                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3842                                         total_multicast_packets_transmitted);
3843                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3844                                         total_broadcast_packets_transmitted);
3845
3846                 old_tclient->checksum_discard = tclient->checksum_discard;
3847                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3848
3849                 ADD_64(fstats->total_bytes_received_hi,
3850                        qstats->total_bytes_received_hi,
3851                        fstats->total_bytes_received_lo,
3852                        qstats->total_bytes_received_lo);
3853                 ADD_64(fstats->total_bytes_transmitted_hi,
3854                        qstats->total_bytes_transmitted_hi,
3855                        fstats->total_bytes_transmitted_lo,
3856                        qstats->total_bytes_transmitted_lo);
3857                 ADD_64(fstats->total_unicast_packets_received_hi,
3858                        qstats->total_unicast_packets_received_hi,
3859                        fstats->total_unicast_packets_received_lo,
3860                        qstats->total_unicast_packets_received_lo);
3861                 ADD_64(fstats->total_multicast_packets_received_hi,
3862                        qstats->total_multicast_packets_received_hi,
3863                        fstats->total_multicast_packets_received_lo,
3864                        qstats->total_multicast_packets_received_lo);
3865                 ADD_64(fstats->total_broadcast_packets_received_hi,
3866                        qstats->total_broadcast_packets_received_hi,
3867                        fstats->total_broadcast_packets_received_lo,
3868                        qstats->total_broadcast_packets_received_lo);
3869                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3870                        qstats->total_unicast_packets_transmitted_hi,
3871                        fstats->total_unicast_packets_transmitted_lo,
3872                        qstats->total_unicast_packets_transmitted_lo);
3873                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3874                        qstats->total_multicast_packets_transmitted_hi,
3875                        fstats->total_multicast_packets_transmitted_lo,
3876                        qstats->total_multicast_packets_transmitted_lo);
3877                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3878                        qstats->total_broadcast_packets_transmitted_hi,
3879                        fstats->total_broadcast_packets_transmitted_lo,
3880                        qstats->total_broadcast_packets_transmitted_lo);
3881                 ADD_64(fstats->valid_bytes_received_hi,
3882                        qstats->valid_bytes_received_hi,
3883                        fstats->valid_bytes_received_lo,
3884                        qstats->valid_bytes_received_lo);
3885
3886                 ADD_64(estats->error_bytes_received_hi,
3887                        qstats->error_bytes_received_hi,
3888                        estats->error_bytes_received_lo,
3889                        qstats->error_bytes_received_lo);
3890                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3891                        qstats->etherstatsoverrsizepkts_hi,
3892                        estats->etherstatsoverrsizepkts_lo,
3893                        qstats->etherstatsoverrsizepkts_lo);
3894                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3895                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3896         }
3897
3898         ADD_64(fstats->total_bytes_received_hi,
3899                estats->rx_stat_ifhcinbadoctets_hi,
3900                fstats->total_bytes_received_lo,
3901                estats->rx_stat_ifhcinbadoctets_lo);
3902
3903         memcpy(estats, &(fstats->total_bytes_received_hi),
3904                sizeof(struct host_func_stats) - 2*sizeof(u32));
3905
3906         ADD_64(estats->etherstatsoverrsizepkts_hi,
3907                estats->rx_stat_dot3statsframestoolong_hi,
3908                estats->etherstatsoverrsizepkts_lo,
3909                estats->rx_stat_dot3statsframestoolong_lo);
3910         ADD_64(estats->error_bytes_received_hi,
3911                estats->rx_stat_ifhcinbadoctets_hi,
3912                estats->error_bytes_received_lo,
3913                estats->rx_stat_ifhcinbadoctets_lo);
3914
3915         if (bp->port.pmf) {
3916                 estats->mac_filter_discard =
3917                                 le32_to_cpu(tport->mac_filter_discard);
3918                 estats->xxoverflow_discard =
3919                                 le32_to_cpu(tport->xxoverflow_discard);
3920                 estats->brb_truncate_discard =
3921                                 le32_to_cpu(tport->brb_truncate_discard);
3922                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3923         }
3924
3925         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3926
3927         bp->stats_pending = 0;
3928
3929         return 0;
3930 }
3931
3932 static void bnx2x_net_stats_update(struct bnx2x *bp)
3933 {
3934         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3935         struct net_device_stats *nstats = &bp->dev->stats;
3936         int i;
3937
3938         nstats->rx_packets =
3939                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3940                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3941                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3942
3943         nstats->tx_packets =
3944                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3945                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3946                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3947
3948         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3949
3950         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3951
3952         nstats->rx_dropped = estats->mac_discard;
3953         for_each_queue(bp, i)
3954                 nstats->rx_dropped +=
3955                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3956
3957         nstats->tx_dropped = 0;
3958
3959         nstats->multicast =
3960                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3961
3962         nstats->collisions =
3963                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3964
3965         nstats->rx_length_errors =
3966                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3967                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3968         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3969                                  bnx2x_hilo(&estats->brb_truncate_hi);
3970         nstats->rx_crc_errors =
3971                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3972         nstats->rx_frame_errors =
3973                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3974         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3975         nstats->rx_missed_errors = estats->xxoverflow_discard;
3976
3977         nstats->rx_errors = nstats->rx_length_errors +
3978                             nstats->rx_over_errors +
3979                             nstats->rx_crc_errors +
3980                             nstats->rx_frame_errors +
3981                             nstats->rx_fifo_errors +
3982                             nstats->rx_missed_errors;
3983
3984         nstats->tx_aborted_errors =
3985                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3986                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3987         nstats->tx_carrier_errors =
3988                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3989         nstats->tx_fifo_errors = 0;
3990         nstats->tx_heartbeat_errors = 0;
3991         nstats->tx_window_errors = 0;
3992
3993         nstats->tx_errors = nstats->tx_aborted_errors +
3994                             nstats->tx_carrier_errors +
3995             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3996 }
3997
3998 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3999 {
4000         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4001         int i;
4002
4003         estats->driver_xoff = 0;
4004         estats->rx_err_discard_pkt = 0;
4005         estats->rx_skb_alloc_failed = 0;
4006         estats->hw_csum_err = 0;
4007         for_each_queue(bp, i) {
4008                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4009
4010                 estats->driver_xoff += qstats->driver_xoff;
4011                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4012                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4013                 estats->hw_csum_err += qstats->hw_csum_err;
4014         }
4015 }
4016
4017 static void bnx2x_stats_update(struct bnx2x *bp)
4018 {
4019         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4020
4021         if (*stats_comp != DMAE_COMP_VAL)
4022                 return;
4023
4024         if (bp->port.pmf)
4025                 bnx2x_hw_stats_update(bp);
4026
4027         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4028                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4029                 bnx2x_panic();
4030                 return;
4031         }
4032
4033         bnx2x_net_stats_update(bp);
4034         bnx2x_drv_stats_update(bp);
4035
4036         if (bp->msglevel & NETIF_MSG_TIMER) {
4037                 struct tstorm_per_client_stats *old_tclient =
4038                                                         &bp->fp->old_tclient;
4039                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4040                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4041                 struct net_device_stats *nstats = &bp->dev->stats;
4042                 int i;
4043
4044                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4045                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4046                                   "  tx pkt (%lx)\n",
4047                        bnx2x_tx_avail(bp->fp),
4048                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4049                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4050                                   "  rx pkt (%lx)\n",
4051                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4052                              bp->fp->rx_comp_cons),
4053                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4054                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4055                                   "brb truncate %u\n",
4056                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4057                        qstats->driver_xoff,
4058                        estats->brb_drop_lo, estats->brb_truncate_lo);
4059                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4060                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4061                         "mac_discard %u  mac_filter_discard %u  "
4062                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4063                         "ttl0_discard %u\n",
4064                        le32_to_cpu(old_tclient->checksum_discard),
4065                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4066                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4067                        estats->mac_discard, estats->mac_filter_discard,
4068                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4069                        le32_to_cpu(old_tclient->ttl0_discard));
4070
4071                 for_each_queue(bp, i) {
4072                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4073                                bnx2x_fp(bp, i, tx_pkt),
4074                                bnx2x_fp(bp, i, rx_pkt),
4075                                bnx2x_fp(bp, i, rx_calls));
4076                 }
4077         }
4078
4079         bnx2x_hw_stats_post(bp);
4080         bnx2x_storm_stats_post(bp);
4081 }
4082
4083 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4084 {
4085         struct dmae_command *dmae;
4086         u32 opcode;
4087         int loader_idx = PMF_DMAE_C(bp);
4088         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4089
4090         bp->executer_idx = 0;
4091
4092         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4093                   DMAE_CMD_C_ENABLE |
4094                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4095 #ifdef __BIG_ENDIAN
4096                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4097 #else
4098                   DMAE_CMD_ENDIANITY_DW_SWAP |
4099 #endif
4100                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4101                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4102
4103         if (bp->port.port_stx) {
4104
4105                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4106                 if (bp->func_stx)
4107                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4108                 else
4109                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4110                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4111                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4112                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4113                 dmae->dst_addr_hi = 0;
4114                 dmae->len = sizeof(struct host_port_stats) >> 2;
4115                 if (bp->func_stx) {
4116                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4117                         dmae->comp_addr_hi = 0;
4118                         dmae->comp_val = 1;
4119                 } else {
4120                         dmae->comp_addr_lo =
4121                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4122                         dmae->comp_addr_hi =
4123                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4124                         dmae->comp_val = DMAE_COMP_VAL;
4125
4126                         *stats_comp = 0;
4127                 }
4128         }
4129
4130         if (bp->func_stx) {
4131
4132                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4133                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4134                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4135                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4136                 dmae->dst_addr_lo = bp->func_stx >> 2;
4137                 dmae->dst_addr_hi = 0;
4138                 dmae->len = sizeof(struct host_func_stats) >> 2;
4139                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4140                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4141                 dmae->comp_val = DMAE_COMP_VAL;
4142
4143                 *stats_comp = 0;
4144         }
4145 }
4146
4147 static void bnx2x_stats_stop(struct bnx2x *bp)
4148 {
4149         int update = 0;
4150
4151         bnx2x_stats_comp(bp);
4152
4153         if (bp->port.pmf)
4154                 update = (bnx2x_hw_stats_update(bp) == 0);
4155
4156         update |= (bnx2x_storm_stats_update(bp) == 0);
4157
4158         if (update) {
4159                 bnx2x_net_stats_update(bp);
4160
4161                 if (bp->port.pmf)
4162                         bnx2x_port_stats_stop(bp);
4163
4164                 bnx2x_hw_stats_post(bp);
4165                 bnx2x_stats_comp(bp);
4166         }
4167 }
4168
4169 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4170 {
4171 }
4172
4173 static const struct {
4174         void (*action)(struct bnx2x *bp);
4175         enum bnx2x_stats_state next_state;
4176 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4177 /* state        event   */
4178 {
4179 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4180 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4181 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4182 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4183 },
4184 {
4185 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4186 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4187 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4188 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4189 }
4190 };
4191
4192 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4193 {
4194         enum bnx2x_stats_state state = bp->stats_state;
4195
4196         bnx2x_stats_stm[state][event].action(bp);
4197         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4198
4199         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4200                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4201                    state, event, bp->stats_state);
4202 }
4203
4204 static void bnx2x_timer(unsigned long data)
4205 {
4206         struct bnx2x *bp = (struct bnx2x *) data;
4207
4208         if (!netif_running(bp->dev))
4209                 return;
4210
4211         if (atomic_read(&bp->intr_sem) != 0)
4212                 goto timer_restart;
4213
4214         if (poll) {
4215                 struct bnx2x_fastpath *fp = &bp->fp[0];
4216                 int rc;
4217
4218                 bnx2x_tx_int(fp);
4219                 rc = bnx2x_rx_int(fp, 1000);
4220         }
4221
4222         if (!BP_NOMCP(bp)) {
4223                 int func = BP_FUNC(bp);
4224                 u32 drv_pulse;
4225                 u32 mcp_pulse;
4226
4227                 ++bp->fw_drv_pulse_wr_seq;
4228                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4229                 /* TBD - add SYSTEM_TIME */
4230                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4231                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4232
4233                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4234                              MCP_PULSE_SEQ_MASK);
4235                 /* The delta between driver pulse and mcp response
4236                  * should be 1 (before mcp response) or 0 (after mcp response)
4237                  */
4238                 if ((drv_pulse != mcp_pulse) &&
4239                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4240                         /* someone lost a heartbeat... */
4241                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4242                                   drv_pulse, mcp_pulse);
4243                 }
4244         }
4245
4246         if ((bp->state == BNX2X_STATE_OPEN) ||
4247             (bp->state == BNX2X_STATE_DISABLED))
4248                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4249
4250 timer_restart:
4251         mod_timer(&bp->timer, jiffies + bp->current_interval);
4252 }
4253
4254 /* end of Statistics */
4255
4256 /* nic init */
4257
4258 /*
4259  * nic init service functions
4260  */
4261
4262 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4263 {
4264         int port = BP_PORT(bp);
4265
4266         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4267                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4268                         sizeof(struct ustorm_status_block)/4);
4269         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4270                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4271                         sizeof(struct cstorm_status_block)/4);
4272 }
4273
4274 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4275                           dma_addr_t mapping, int sb_id)
4276 {
4277         int port = BP_PORT(bp);
4278         int func = BP_FUNC(bp);
4279         int index;
4280         u64 section;
4281
4282         /* USTORM */
4283         section = ((u64)mapping) + offsetof(struct host_status_block,
4284                                             u_status_block);
4285         sb->u_status_block.status_block_id = sb_id;
4286
4287         REG_WR(bp, BAR_USTRORM_INTMEM +
4288                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4289         REG_WR(bp, BAR_USTRORM_INTMEM +
4290                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4291                U64_HI(section));
4292         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4293                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4294
4295         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4296                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4297                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4298
4299         /* CSTORM */
4300         section = ((u64)mapping) + offsetof(struct host_status_block,
4301                                             c_status_block);
4302         sb->c_status_block.status_block_id = sb_id;
4303
4304         REG_WR(bp, BAR_CSTRORM_INTMEM +
4305                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4306         REG_WR(bp, BAR_CSTRORM_INTMEM +
4307                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4308                U64_HI(section));
4309         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4310                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4311
4312         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4313                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4314                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4315
4316         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4317 }
4318
4319 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4320 {
4321         int func = BP_FUNC(bp);
4322
4323         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4324                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4325                         sizeof(struct tstorm_def_status_block)/4);
4326         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4327                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4328                         sizeof(struct ustorm_def_status_block)/4);
4329         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4330                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4331                         sizeof(struct cstorm_def_status_block)/4);
4332         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4333                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4334                         sizeof(struct xstorm_def_status_block)/4);
4335 }
4336
4337 static void bnx2x_init_def_sb(struct bnx2x *bp,
4338                               struct host_def_status_block *def_sb,
4339                               dma_addr_t mapping, int sb_id)
4340 {
4341         int port = BP_PORT(bp);
4342         int func = BP_FUNC(bp);
4343         int index, val, reg_offset;
4344         u64 section;
4345
4346         /* ATTN */
4347         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4348                                             atten_status_block);
4349         def_sb->atten_status_block.status_block_id = sb_id;
4350
4351         bp->attn_state = 0;
4352
4353         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4354                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4355
4356         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4357                 bp->attn_group[index].sig[0] = REG_RD(bp,
4358                                                      reg_offset + 0x10*index);
4359                 bp->attn_group[index].sig[1] = REG_RD(bp,
4360                                                reg_offset + 0x4 + 0x10*index);
4361                 bp->attn_group[index].sig[2] = REG_RD(bp,
4362                                                reg_offset + 0x8 + 0x10*index);
4363                 bp->attn_group[index].sig[3] = REG_RD(bp,
4364                                                reg_offset + 0xc + 0x10*index);
4365         }
4366
4367         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4368                              HC_REG_ATTN_MSG0_ADDR_L);
4369
4370         REG_WR(bp, reg_offset, U64_LO(section));
4371         REG_WR(bp, reg_offset + 4, U64_HI(section));
4372
4373         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4374
4375         val = REG_RD(bp, reg_offset);
4376         val |= sb_id;
4377         REG_WR(bp, reg_offset, val);
4378
4379         /* USTORM */
4380         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4381                                             u_def_status_block);
4382         def_sb->u_def_status_block.status_block_id = sb_id;
4383
4384         REG_WR(bp, BAR_USTRORM_INTMEM +
4385                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4386         REG_WR(bp, BAR_USTRORM_INTMEM +
4387                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4388                U64_HI(section));
4389         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4390                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4391
4392         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4393                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4394                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4395
4396         /* CSTORM */
4397         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4398                                             c_def_status_block);
4399         def_sb->c_def_status_block.status_block_id = sb_id;
4400
4401         REG_WR(bp, BAR_CSTRORM_INTMEM +
4402                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4403         REG_WR(bp, BAR_CSTRORM_INTMEM +
4404                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4405                U64_HI(section));
4406         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4407                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4408
4409         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4410                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4411                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4412
4413         /* TSTORM */
4414         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4415                                             t_def_status_block);
4416         def_sb->t_def_status_block.status_block_id = sb_id;
4417
4418         REG_WR(bp, BAR_TSTRORM_INTMEM +
4419                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4420         REG_WR(bp, BAR_TSTRORM_INTMEM +
4421                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4422                U64_HI(section));
4423         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4424                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4425
4426         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4427                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4428                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4429
4430         /* XSTORM */
4431         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4432                                             x_def_status_block);
4433         def_sb->x_def_status_block.status_block_id = sb_id;
4434
4435         REG_WR(bp, BAR_XSTRORM_INTMEM +
4436                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4437         REG_WR(bp, BAR_XSTRORM_INTMEM +
4438                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4439                U64_HI(section));
4440         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4441                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4442
4443         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4444                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4445                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4446
4447         bp->stats_pending = 0;
4448         bp->set_mac_pending = 0;
4449
4450         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4451 }
4452
4453 static void bnx2x_update_coalesce(struct bnx2x *bp)
4454 {
4455         int port = BP_PORT(bp);
4456         int i;
4457
4458         for_each_queue(bp, i) {
4459                 int sb_id = bp->fp[i].sb_id;
4460
4461                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4462                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4463                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4464                                                     U_SB_ETH_RX_CQ_INDEX),
4465                         bp->rx_ticks/12);
4466                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4467                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4468                                                      U_SB_ETH_RX_CQ_INDEX),
4469                          (bp->rx_ticks/12) ? 0 : 1);
4470
4471                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4472                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4473                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4474                                                     C_SB_ETH_TX_CQ_INDEX),
4475                         bp->tx_ticks/12);
4476                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4477                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4478                                                      C_SB_ETH_TX_CQ_INDEX),
4479                          (bp->tx_ticks/12) ? 0 : 1);
4480         }
4481 }
4482
4483 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4484                                        struct bnx2x_fastpath *fp, int last)
4485 {
4486         int i;
4487
4488         for (i = 0; i < last; i++) {
4489                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4490                 struct sk_buff *skb = rx_buf->skb;
4491
4492                 if (skb == NULL) {
4493                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4494                         continue;
4495                 }
4496
4497                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4498                         pci_unmap_single(bp->pdev,
4499                                          pci_unmap_addr(rx_buf, mapping),
4500                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4501
4502                 dev_kfree_skb(skb);
4503                 rx_buf->skb = NULL;
4504         }
4505 }
4506
4507 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4508 {
4509         int func = BP_FUNC(bp);
4510         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4511                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4512         u16 ring_prod, cqe_ring_prod;
4513         int i, j;
4514
4515         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4516         DP(NETIF_MSG_IFUP,
4517            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4518
4519         if (bp->flags & TPA_ENABLE_FLAG) {
4520
4521                 for_each_rx_queue(bp, j) {
4522                         struct bnx2x_fastpath *fp = &bp->fp[j];
4523
4524                         for (i = 0; i < max_agg_queues; i++) {
4525                                 fp->tpa_pool[i].skb =
4526                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4527                                 if (!fp->tpa_pool[i].skb) {
4528                                         BNX2X_ERR("Failed to allocate TPA "
4529                                                   "skb pool for queue[%d] - "
4530                                                   "disabling TPA on this "
4531                                                   "queue!\n", j);
4532                                         bnx2x_free_tpa_pool(bp, fp, i);
4533                                         fp->disable_tpa = 1;
4534                                         break;
4535                                 }
4536                                 pci_unmap_addr_set((struct sw_rx_bd *)
4537                                                         &bp->fp->tpa_pool[i],
4538                                                    mapping, 0);
4539                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4540                         }
4541                 }
4542         }
4543
4544         for_each_rx_queue(bp, j) {
4545                 struct bnx2x_fastpath *fp = &bp->fp[j];
4546
4547                 fp->rx_bd_cons = 0;
4548                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4549                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4550
4551                 /* "next page" elements initialization */
4552                 /* SGE ring */
4553                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4554                         struct eth_rx_sge *sge;
4555
4556                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4557                         sge->addr_hi =
4558                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4559                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4560                         sge->addr_lo =
4561                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4562                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4563                 }
4564
4565                 bnx2x_init_sge_ring_bit_mask(fp);
4566
4567                 /* RX BD ring */
4568                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4569                         struct eth_rx_bd *rx_bd;
4570
4571                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4572                         rx_bd->addr_hi =
4573                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4574                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4575                         rx_bd->addr_lo =
4576                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4577                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4578                 }
4579
4580                 /* CQ ring */
4581                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4582                         struct eth_rx_cqe_next_page *nextpg;
4583
4584                         nextpg = (struct eth_rx_cqe_next_page *)
4585                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4586                         nextpg->addr_hi =
4587                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4588                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4589                         nextpg->addr_lo =
4590                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4591                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4592                 }
4593
4594                 /* Allocate SGEs and initialize the ring elements */
4595                 for (i = 0, ring_prod = 0;
4596                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4597
4598                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4599                                 BNX2X_ERR("was only able to allocate "
4600                                           "%d rx sges\n", i);
4601                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4602                                 /* Cleanup already allocated elements */
4603                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4604                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4605                                 fp->disable_tpa = 1;
4606                                 ring_prod = 0;
4607                                 break;
4608                         }
4609                         ring_prod = NEXT_SGE_IDX(ring_prod);
4610                 }
4611                 fp->rx_sge_prod = ring_prod;
4612
4613                 /* Allocate BDs and initialize BD ring */
4614                 fp->rx_comp_cons = 0;
4615                 cqe_ring_prod = ring_prod = 0;
4616                 for (i = 0; i < bp->rx_ring_size; i++) {
4617                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4618                                 BNX2X_ERR("was only able to allocate "
4619                                           "%d rx skbs on queue[%d]\n", i, j);
4620                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4621                                 break;
4622                         }
4623                         ring_prod = NEXT_RX_IDX(ring_prod);
4624                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4625                         WARN_ON(ring_prod <= i);
4626                 }
4627
4628                 fp->rx_bd_prod = ring_prod;
4629                 /* must not have more available CQEs than BDs */
4630                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4631                                        cqe_ring_prod);
4632                 fp->rx_pkt = fp->rx_calls = 0;
4633
4634                 /* Warning!
4635                  * this will generate an interrupt (to the TSTORM)
4636                  * must only be done after chip is initialized
4637                  */
4638                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4639                                      fp->rx_sge_prod);
4640                 if (j != 0)
4641                         continue;
4642
4643                 REG_WR(bp, BAR_USTRORM_INTMEM +
4644                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4645                        U64_LO(fp->rx_comp_mapping));
4646                 REG_WR(bp, BAR_USTRORM_INTMEM +
4647                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4648                        U64_HI(fp->rx_comp_mapping));
4649         }
4650 }
4651
4652 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4653 {
4654         int i, j;
4655
4656         for_each_tx_queue(bp, j) {
4657                 struct bnx2x_fastpath *fp = &bp->fp[j];
4658
4659                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4660                         struct eth_tx_bd *tx_bd =
4661                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4662
4663                         tx_bd->addr_hi =
4664                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4665                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4666                         tx_bd->addr_lo =
4667                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4668                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4669                 }
4670
4671                 fp->tx_pkt_prod = 0;
4672                 fp->tx_pkt_cons = 0;
4673                 fp->tx_bd_prod = 0;
4674                 fp->tx_bd_cons = 0;
4675                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4676                 fp->tx_pkt = 0;
4677         }
4678 }
4679
4680 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4681 {
4682         int func = BP_FUNC(bp);
4683
4684         spin_lock_init(&bp->spq_lock);
4685
4686         bp->spq_left = MAX_SPQ_PENDING;
4687         bp->spq_prod_idx = 0;
4688         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4689         bp->spq_prod_bd = bp->spq;
4690         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4691
4692         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4693                U64_LO(bp->spq_mapping));
4694         REG_WR(bp,
4695                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4696                U64_HI(bp->spq_mapping));
4697
4698         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4699                bp->spq_prod_idx);
4700 }
4701
4702 static void bnx2x_init_context(struct bnx2x *bp)
4703 {
4704         int i;
4705
4706         for_each_queue(bp, i) {
4707                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4708                 struct bnx2x_fastpath *fp = &bp->fp[i];
4709                 u8 cl_id = fp->cl_id;
4710                 u8 sb_id = fp->sb_id;
4711
4712                 context->ustorm_st_context.common.sb_index_numbers =
4713                                                 BNX2X_RX_SB_INDEX_NUM;
4714                 context->ustorm_st_context.common.clientId = cl_id;
4715                 context->ustorm_st_context.common.status_block_id = sb_id;
4716                 context->ustorm_st_context.common.flags =
4717                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4718                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4719                 context->ustorm_st_context.common.statistics_counter_id =
4720                                                 cl_id;
4721                 context->ustorm_st_context.common.mc_alignment_log_size =
4722                                                 BNX2X_RX_ALIGN_SHIFT;
4723                 context->ustorm_st_context.common.bd_buff_size =
4724                                                 bp->rx_buf_size;
4725                 context->ustorm_st_context.common.bd_page_base_hi =
4726                                                 U64_HI(fp->rx_desc_mapping);
4727                 context->ustorm_st_context.common.bd_page_base_lo =
4728                                                 U64_LO(fp->rx_desc_mapping);
4729                 if (!fp->disable_tpa) {
4730                         context->ustorm_st_context.common.flags |=
4731                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4732                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4733                         context->ustorm_st_context.common.sge_buff_size =
4734                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4735                                          (u32)0xffff);
4736                         context->ustorm_st_context.common.sge_page_base_hi =
4737                                                 U64_HI(fp->rx_sge_mapping);
4738                         context->ustorm_st_context.common.sge_page_base_lo =
4739                                                 U64_LO(fp->rx_sge_mapping);
4740                 }
4741
4742                 context->ustorm_ag_context.cdu_usage =
4743                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4744                                                CDU_REGION_NUMBER_UCM_AG,
4745                                                ETH_CONNECTION_TYPE);
4746
4747                 context->xstorm_st_context.tx_bd_page_base_hi =
4748                                                 U64_HI(fp->tx_desc_mapping);
4749                 context->xstorm_st_context.tx_bd_page_base_lo =
4750                                                 U64_LO(fp->tx_desc_mapping);
4751                 context->xstorm_st_context.db_data_addr_hi =
4752                                                 U64_HI(fp->tx_prods_mapping);
4753                 context->xstorm_st_context.db_data_addr_lo =
4754                                                 U64_LO(fp->tx_prods_mapping);
4755                 context->xstorm_st_context.statistics_data = (cl_id |
4756                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4757                 context->cstorm_st_context.sb_index_number =
4758                                                 C_SB_ETH_TX_CQ_INDEX;
4759                 context->cstorm_st_context.status_block_id = sb_id;
4760
4761                 context->xstorm_ag_context.cdu_reserved =
4762                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4763                                                CDU_REGION_NUMBER_XCM_AG,
4764                                                ETH_CONNECTION_TYPE);
4765         }
4766 }
4767
4768 static void bnx2x_init_ind_table(struct bnx2x *bp)
4769 {
4770         int func = BP_FUNC(bp);
4771         int i;
4772
4773         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4774                 return;
4775
4776         DP(NETIF_MSG_IFUP,
4777            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4778         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4779                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4780                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4781                         bp->fp->cl_id + (i % bp->num_rx_queues));
4782 }
4783
4784 static void bnx2x_set_client_config(struct bnx2x *bp)
4785 {
4786         struct tstorm_eth_client_config tstorm_client = {0};
4787         int port = BP_PORT(bp);
4788         int i;
4789
4790         tstorm_client.mtu = bp->dev->mtu;
4791         tstorm_client.config_flags =
4792                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4793                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4794 #ifdef BCM_VLAN
4795         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4796                 tstorm_client.config_flags |=
4797                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4798                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4799         }
4800 #endif
4801
4802         if (bp->flags & TPA_ENABLE_FLAG) {
4803                 tstorm_client.max_sges_for_packet =
4804                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4805                 tstorm_client.max_sges_for_packet =
4806                         ((tstorm_client.max_sges_for_packet +
4807                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4808                         PAGES_PER_SGE_SHIFT;
4809
4810                 tstorm_client.config_flags |=
4811                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4812         }
4813
4814         for_each_queue(bp, i) {
4815                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4816
4817                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4818                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4819                        ((u32 *)&tstorm_client)[0]);
4820                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4821                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4822                        ((u32 *)&tstorm_client)[1]);
4823         }
4824
4825         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4826            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4827 }
4828
4829 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4830 {
4831         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4832         int mode = bp->rx_mode;
4833         int mask = (1 << BP_L_ID(bp));
4834         int func = BP_FUNC(bp);
4835         int port = BP_PORT(bp);
4836         int i;
4837         /* All but management unicast packets should pass to the host as well */
4838         u32 llh_mask =
4839                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4840                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4841                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4842                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4843
4844         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4845
4846         switch (mode) {
4847         case BNX2X_RX_MODE_NONE: /* no Rx */
4848                 tstorm_mac_filter.ucast_drop_all = mask;
4849                 tstorm_mac_filter.mcast_drop_all = mask;
4850                 tstorm_mac_filter.bcast_drop_all = mask;
4851                 break;
4852
4853         case BNX2X_RX_MODE_NORMAL:
4854                 tstorm_mac_filter.bcast_accept_all = mask;
4855                 break;
4856
4857         case BNX2X_RX_MODE_ALLMULTI:
4858                 tstorm_mac_filter.mcast_accept_all = mask;
4859                 tstorm_mac_filter.bcast_accept_all = mask;
4860                 break;
4861
4862         case BNX2X_RX_MODE_PROMISC:
4863                 tstorm_mac_filter.ucast_accept_all = mask;
4864                 tstorm_mac_filter.mcast_accept_all = mask;
4865                 tstorm_mac_filter.bcast_accept_all = mask;
4866                 /* pass management unicast packets as well */
4867                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4868                 break;
4869
4870         default:
4871                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4872                 break;
4873         }
4874
4875         REG_WR(bp,
4876                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4877                llh_mask);
4878
4879         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4880                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4881                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4882                        ((u32 *)&tstorm_mac_filter)[i]);
4883
4884 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4885                    ((u32 *)&tstorm_mac_filter)[i]); */
4886         }
4887
4888         if (mode != BNX2X_RX_MODE_NONE)
4889                 bnx2x_set_client_config(bp);
4890 }
4891
4892 static void bnx2x_init_internal_common(struct bnx2x *bp)
4893 {
4894         int i;
4895
4896         if (bp->flags & TPA_ENABLE_FLAG) {
4897                 struct tstorm_eth_tpa_exist tpa = {0};
4898
4899                 tpa.tpa_exist = 1;
4900
4901                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4902                        ((u32 *)&tpa)[0]);
4903                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4904                        ((u32 *)&tpa)[1]);
4905         }
4906
4907         /* Zero this manually as its initialization is
4908            currently missing in the initTool */
4909         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4910                 REG_WR(bp, BAR_USTRORM_INTMEM +
4911                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4912 }
4913
4914 static void bnx2x_init_internal_port(struct bnx2x *bp)
4915 {
4916         int port = BP_PORT(bp);
4917
4918         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4919         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4920         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4921         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4922 }
4923
4924 /* Calculates the sum of vn_min_rates.
4925    It's needed for further normalizing of the min_rates.
4926    Returns:
4927      sum of vn_min_rates.
4928        or
4929      0 - if all the min_rates are 0.
4930      In the later case fainess algorithm should be deactivated.
4931      If not all min_rates are zero then those that are zeroes will be set to 1.
4932  */
4933 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4934 {
4935         int all_zero = 1;
4936         int port = BP_PORT(bp);
4937         int vn;
4938
4939         bp->vn_weight_sum = 0;
4940         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4941                 int func = 2*vn + port;
4942                 u32 vn_cfg =
4943                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4944                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4945                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4946
4947                 /* Skip hidden vns */
4948                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4949                         continue;
4950
4951                 /* If min rate is zero - set it to 1 */
4952                 if (!vn_min_rate)
4953                         vn_min_rate = DEF_MIN_RATE;
4954                 else
4955                         all_zero = 0;
4956
4957                 bp->vn_weight_sum += vn_min_rate;
4958         }
4959
4960         /* ... only if all min rates are zeros - disable fairness */
4961         if (all_zero)
4962                 bp->vn_weight_sum = 0;
4963 }
4964
4965 static void bnx2x_init_internal_func(struct bnx2x *bp)
4966 {
4967         struct tstorm_eth_function_common_config tstorm_config = {0};
4968         struct stats_indication_flags stats_flags = {0};
4969         int port = BP_PORT(bp);
4970         int func = BP_FUNC(bp);
4971         int i, j;
4972         u32 offset;
4973         u16 max_agg_size;
4974
4975         if (is_multi(bp)) {
4976                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4977                 tstorm_config.rss_result_mask = MULTI_MASK;
4978         }
4979         if (IS_E1HMF(bp))
4980                 tstorm_config.config_flags |=
4981                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4982
4983         tstorm_config.leading_client_id = BP_L_ID(bp);
4984
4985         REG_WR(bp, BAR_TSTRORM_INTMEM +
4986                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4987                (*(u32 *)&tstorm_config));
4988
4989         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4990         bnx2x_set_storm_rx_mode(bp);
4991
4992         for_each_queue(bp, i) {
4993                 u8 cl_id = bp->fp[i].cl_id;
4994
4995                 /* reset xstorm per client statistics */
4996                 offset = BAR_XSTRORM_INTMEM +
4997                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4998                 for (j = 0;
4999                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5000                         REG_WR(bp, offset + j*4, 0);
5001
5002                 /* reset tstorm per client statistics */
5003                 offset = BAR_TSTRORM_INTMEM +
5004                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5005                 for (j = 0;
5006                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5007                         REG_WR(bp, offset + j*4, 0);
5008
5009                 /* reset ustorm per client statistics */
5010                 offset = BAR_USTRORM_INTMEM +
5011                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5012                 for (j = 0;
5013                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5014                         REG_WR(bp, offset + j*4, 0);
5015         }
5016
5017         /* Init statistics related context */
5018         stats_flags.collect_eth = 1;
5019
5020         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5021                ((u32 *)&stats_flags)[0]);
5022         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5023                ((u32 *)&stats_flags)[1]);
5024
5025         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5026                ((u32 *)&stats_flags)[0]);
5027         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5028                ((u32 *)&stats_flags)[1]);
5029
5030         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5031                ((u32 *)&stats_flags)[0]);
5032         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5033                ((u32 *)&stats_flags)[1]);
5034
5035         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5036                ((u32 *)&stats_flags)[0]);
5037         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5038                ((u32 *)&stats_flags)[1]);
5039
5040         REG_WR(bp, BAR_XSTRORM_INTMEM +
5041                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5042                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5043         REG_WR(bp, BAR_XSTRORM_INTMEM +
5044                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5045                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5046
5047         REG_WR(bp, BAR_TSTRORM_INTMEM +
5048                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5049                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5050         REG_WR(bp, BAR_TSTRORM_INTMEM +
5051                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5052                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5053
5054         REG_WR(bp, BAR_USTRORM_INTMEM +
5055                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5056                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5057         REG_WR(bp, BAR_USTRORM_INTMEM +
5058                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5059                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5060
5061         if (CHIP_IS_E1H(bp)) {
5062                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5063                         IS_E1HMF(bp));
5064                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5065                         IS_E1HMF(bp));
5066                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5067                         IS_E1HMF(bp));
5068                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5069                         IS_E1HMF(bp));
5070
5071                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5072                          bp->e1hov);
5073         }
5074
5075         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5076         max_agg_size =
5077                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5078                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5079                     (u32)0xffff);
5080         for_each_rx_queue(bp, i) {
5081                 struct bnx2x_fastpath *fp = &bp->fp[i];
5082
5083                 REG_WR(bp, BAR_USTRORM_INTMEM +
5084                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5085                        U64_LO(fp->rx_comp_mapping));
5086                 REG_WR(bp, BAR_USTRORM_INTMEM +
5087                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5088                        U64_HI(fp->rx_comp_mapping));
5089
5090                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5091                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5092                          max_agg_size);
5093         }
5094
5095         /* dropless flow control */
5096         if (CHIP_IS_E1H(bp)) {
5097                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5098
5099                 rx_pause.bd_thr_low = 250;
5100                 rx_pause.cqe_thr_low = 250;
5101                 rx_pause.cos = 1;
5102                 rx_pause.sge_thr_low = 0;
5103                 rx_pause.bd_thr_high = 350;
5104                 rx_pause.cqe_thr_high = 350;
5105                 rx_pause.sge_thr_high = 0;
5106
5107                 for_each_rx_queue(bp, i) {
5108                         struct bnx2x_fastpath *fp = &bp->fp[i];
5109
5110                         if (!fp->disable_tpa) {
5111                                 rx_pause.sge_thr_low = 150;
5112                                 rx_pause.sge_thr_high = 250;
5113                         }
5114
5115
5116                         offset = BAR_USTRORM_INTMEM +
5117                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5118                                                                    fp->cl_id);
5119                         for (j = 0;
5120                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5121                              j++)
5122                                 REG_WR(bp, offset + j*4,
5123                                        ((u32 *)&rx_pause)[j]);
5124                 }
5125         }
5126
5127         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5128
5129         /* Init rate shaping and fairness contexts */
5130         if (IS_E1HMF(bp)) {
5131                 int vn;
5132
5133                 /* During init there is no active link
5134                    Until link is up, set link rate to 10Gbps */
5135                 bp->link_vars.line_speed = SPEED_10000;
5136                 bnx2x_init_port_minmax(bp);
5137
5138                 bnx2x_calc_vn_weight_sum(bp);
5139
5140                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5141                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5142
5143                 /* Enable rate shaping and fairness */
5144                 bp->cmng.flags.cmng_enables =
5145                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5146                 if (bp->vn_weight_sum)
5147                         bp->cmng.flags.cmng_enables |=
5148                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5149                 else
5150                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5151                            "  fairness will be disabled\n");
5152         } else {
5153                 /* rate shaping and fairness are disabled */
5154                 DP(NETIF_MSG_IFUP,
5155                    "single function mode  minmax will be disabled\n");
5156         }
5157
5158
5159         /* Store it to internal memory */
5160         if (bp->port.pmf)
5161                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5162                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5163                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5164                                ((u32 *)(&bp->cmng))[i]);
5165 }
5166
5167 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5168 {
5169         switch (load_code) {
5170         case FW_MSG_CODE_DRV_LOAD_COMMON:
5171                 bnx2x_init_internal_common(bp);
5172                 /* no break */
5173
5174         case FW_MSG_CODE_DRV_LOAD_PORT:
5175                 bnx2x_init_internal_port(bp);
5176                 /* no break */
5177
5178         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5179                 bnx2x_init_internal_func(bp);
5180                 break;
5181
5182         default:
5183                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5184                 break;
5185         }
5186 }
5187
5188 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5189 {
5190         int i;
5191
5192         for_each_queue(bp, i) {
5193                 struct bnx2x_fastpath *fp = &bp->fp[i];
5194
5195                 fp->bp = bp;
5196                 fp->state = BNX2X_FP_STATE_CLOSED;
5197                 fp->index = i;
5198                 fp->cl_id = BP_L_ID(bp) + i;
5199                 fp->sb_id = fp->cl_id;
5200                 DP(NETIF_MSG_IFUP,
5201                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5202                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5203                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5204                               fp->sb_id);
5205                 bnx2x_update_fpsb_idx(fp);
5206         }
5207
5208         /* ensure status block indices were read */
5209         rmb();
5210
5211
5212         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5213                           DEF_SB_ID);
5214         bnx2x_update_dsb_idx(bp);
5215         bnx2x_update_coalesce(bp);
5216         bnx2x_init_rx_rings(bp);
5217         bnx2x_init_tx_ring(bp);
5218         bnx2x_init_sp_ring(bp);
5219         bnx2x_init_context(bp);
5220         bnx2x_init_internal(bp, load_code);
5221         bnx2x_init_ind_table(bp);
5222         bnx2x_stats_init(bp);
5223
5224         /* At this point, we are ready for interrupts */
5225         atomic_set(&bp->intr_sem, 0);
5226
5227         /* flush all before enabling interrupts */
5228         mb();
5229         mmiowb();
5230
5231         bnx2x_int_enable(bp);
5232
5233         /* Check for SPIO5 */
5234         bnx2x_attn_int_deasserted0(bp,
5235                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5236                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5237 }
5238
5239 /* end of nic init */
5240
5241 /*
5242  * gzip service functions
5243  */
5244
5245 static int bnx2x_gunzip_init(struct bnx2x *bp)
5246 {
5247         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5248                                               &bp->gunzip_mapping);
5249         if (bp->gunzip_buf  == NULL)
5250                 goto gunzip_nomem1;
5251
5252         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5253         if (bp->strm  == NULL)
5254                 goto gunzip_nomem2;
5255
5256         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5257                                       GFP_KERNEL);
5258         if (bp->strm->workspace == NULL)
5259                 goto gunzip_nomem3;
5260
5261         return 0;
5262
5263 gunzip_nomem3:
5264         kfree(bp->strm);
5265         bp->strm = NULL;
5266
5267 gunzip_nomem2:
5268         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5269                             bp->gunzip_mapping);
5270         bp->gunzip_buf = NULL;
5271
5272 gunzip_nomem1:
5273         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5274                " un-compression\n", bp->dev->name);
5275         return -ENOMEM;
5276 }
5277
5278 static void bnx2x_gunzip_end(struct bnx2x *bp)
5279 {
5280         kfree(bp->strm->workspace);
5281
5282         kfree(bp->strm);
5283         bp->strm = NULL;
5284
5285         if (bp->gunzip_buf) {
5286                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5287                                     bp->gunzip_mapping);
5288                 bp->gunzip_buf = NULL;
5289         }
5290 }
5291
5292 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5293 {
5294         int n, rc;
5295
5296         /* check gzip header */
5297         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5298                 BNX2X_ERR("Bad gzip header\n");
5299                 return -EINVAL;
5300         }
5301
5302         n = 10;
5303
5304 #define FNAME                           0x8
5305
5306         if (zbuf[3] & FNAME)
5307                 while ((zbuf[n++] != 0) && (n < len));
5308
5309         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5310         bp->strm->avail_in = len - n;
5311         bp->strm->next_out = bp->gunzip_buf;
5312         bp->strm->avail_out = FW_BUF_SIZE;
5313
5314         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5315         if (rc != Z_OK)
5316                 return rc;
5317
5318         rc = zlib_inflate(bp->strm, Z_FINISH);
5319         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5320                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5321                        bp->dev->name, bp->strm->msg);
5322
5323         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5324         if (bp->gunzip_outlen & 0x3)
5325                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5326                                     " gunzip_outlen (%d) not aligned\n",
5327                        bp->dev->name, bp->gunzip_outlen);
5328         bp->gunzip_outlen >>= 2;
5329
5330         zlib_inflateEnd(bp->strm);
5331
5332         if (rc == Z_STREAM_END)
5333                 return 0;
5334
5335         return rc;
5336 }
5337
5338 /* nic load/unload */
5339
5340 /*
5341  * General service functions
5342  */
5343
5344 /* send a NIG loopback debug packet */
5345 static void bnx2x_lb_pckt(struct bnx2x *bp)
5346 {
5347         u32 wb_write[3];
5348
5349         /* Ethernet source and destination addresses */
5350         wb_write[0] = 0x55555555;
5351         wb_write[1] = 0x55555555;
5352         wb_write[2] = 0x20;             /* SOP */
5353         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5354
5355         /* NON-IP protocol */
5356         wb_write[0] = 0x09000000;
5357         wb_write[1] = 0x55555555;
5358         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5359         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5360 }
5361
5362 /* some of the internal memories
5363  * are not directly readable from the driver
5364  * to test them we send debug packets
5365  */
5366 static int bnx2x_int_mem_test(struct bnx2x *bp)
5367 {
5368         int factor;
5369         int count, i;
5370         u32 val = 0;
5371
5372         if (CHIP_REV_IS_FPGA(bp))
5373                 factor = 120;
5374         else if (CHIP_REV_IS_EMUL(bp))
5375                 factor = 200;
5376         else
5377                 factor = 1;
5378
5379         DP(NETIF_MSG_HW, "start part1\n");
5380
5381         /* Disable inputs of parser neighbor blocks */
5382         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5383         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5384         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5385         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5386
5387         /*  Write 0 to parser credits for CFC search request */
5388         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5389
5390         /* send Ethernet packet */
5391         bnx2x_lb_pckt(bp);
5392
5393         /* TODO do i reset NIG statistic? */
5394         /* Wait until NIG register shows 1 packet of size 0x10 */
5395         count = 1000 * factor;
5396         while (count) {
5397
5398                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5399                 val = *bnx2x_sp(bp, wb_data[0]);
5400                 if (val == 0x10)
5401                         break;
5402
5403                 msleep(10);
5404                 count--;
5405         }
5406         if (val != 0x10) {
5407                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5408                 return -1;
5409         }
5410
5411         /* Wait until PRS register shows 1 packet */
5412         count = 1000 * factor;
5413         while (count) {
5414                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5415                 if (val == 1)
5416                         break;
5417
5418                 msleep(10);
5419                 count--;
5420         }
5421         if (val != 0x1) {
5422                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5423                 return -2;
5424         }
5425
5426         /* Reset and init BRB, PRS */
5427         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5428         msleep(50);
5429         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5430         msleep(50);
5431         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5432         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5433
5434         DP(NETIF_MSG_HW, "part2\n");
5435
5436         /* Disable inputs of parser neighbor blocks */
5437         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5438         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5439         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5440         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5441
5442         /* Write 0 to parser credits for CFC search request */
5443         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5444
5445         /* send 10 Ethernet packets */
5446         for (i = 0; i < 10; i++)
5447                 bnx2x_lb_pckt(bp);
5448
5449         /* Wait until NIG register shows 10 + 1
5450            packets of size 11*0x10 = 0xb0 */
5451         count = 1000 * factor;
5452         while (count) {
5453
5454                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5455                 val = *bnx2x_sp(bp, wb_data[0]);
5456                 if (val == 0xb0)
5457                         break;
5458
5459                 msleep(10);
5460                 count--;
5461         }
5462         if (val != 0xb0) {
5463                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5464                 return -3;
5465         }
5466
5467         /* Wait until PRS register shows 2 packets */
5468         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5469         if (val != 2)
5470                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5471
5472         /* Write 1 to parser credits for CFC search request */
5473         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5474
5475         /* Wait until PRS register shows 3 packets */
5476         msleep(10 * factor);
5477         /* Wait until NIG register shows 1 packet of size 0x10 */
5478         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5479         if (val != 3)
5480                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5481
5482         /* clear NIG EOP FIFO */
5483         for (i = 0; i < 11; i++)
5484                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5485         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5486         if (val != 1) {
5487                 BNX2X_ERR("clear of NIG failed\n");
5488                 return -4;
5489         }
5490
5491         /* Reset and init BRB, PRS, NIG */
5492         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5493         msleep(50);
5494         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5495         msleep(50);
5496         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5497         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5498 #ifndef BCM_ISCSI
5499         /* set NIC mode */
5500         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5501 #endif
5502
5503         /* Enable inputs of parser neighbor blocks */
5504         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5505         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5506         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5507         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5508
5509         DP(NETIF_MSG_HW, "done\n");
5510
5511         return 0; /* OK */
5512 }
5513
5514 static void enable_blocks_attention(struct bnx2x *bp)
5515 {
5516         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5517         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5518         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5519         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5520         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5521         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5522         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5523         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5524         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5525 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5526 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5527         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5528         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5529         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5530 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5531 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5532         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5533         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5534         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5535         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5536 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5537 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5538         if (CHIP_REV_IS_FPGA(bp))
5539                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5540         else
5541                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5542         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5543         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5544         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5545 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5546 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5547         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5548         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5549 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5550         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5551 }
5552
5553
5554 static void bnx2x_reset_common(struct bnx2x *bp)
5555 {
5556         /* reset_common */
5557         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5558                0xd3ffff7f);
5559         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5560 }
5561
5562
5563 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5564 {
5565         u32 val;
5566         u8 port;
5567         u8 is_required = 0;
5568
5569         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5570               SHARED_HW_CFG_FAN_FAILURE_MASK;
5571
5572         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5573                 is_required = 1;
5574
5575         /*
5576          * The fan failure mechanism is usually related to the PHY type since
5577          * the power consumption of the board is affected by the PHY. Currently,
5578          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5579          */
5580         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5581                 for (port = PORT_0; port < PORT_MAX; port++) {
5582                         u32 phy_type =
5583                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5584                                          external_phy_config) &
5585                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5586                         is_required |=
5587                                 ((phy_type ==
5588                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5589                                  (phy_type ==
5590                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5591                                  (phy_type ==
5592                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5593                 }
5594
5595         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5596
5597         if (is_required == 0)
5598                 return;
5599
5600         /* Fan failure is indicated by SPIO 5 */
5601         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5602                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5603
5604         /* set to active low mode */
5605         val = REG_RD(bp, MISC_REG_SPIO_INT);
5606         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5607                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5608         REG_WR(bp, MISC_REG_SPIO_INT, val);
5609
5610         /* enable interrupt to signal the IGU */
5611         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5612         val |= (1 << MISC_REGISTERS_SPIO_5);
5613         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5614 }
5615
5616 static int bnx2x_init_common(struct bnx2x *bp)
5617 {
5618         u32 val, i;
5619
5620         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5621
5622         bnx2x_reset_common(bp);
5623         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5624         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5625
5626         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5627         if (CHIP_IS_E1H(bp))
5628                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5629
5630         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5631         msleep(30);
5632         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5633
5634         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5635         if (CHIP_IS_E1(bp)) {
5636                 /* enable HW interrupt from PXP on USDM overflow
5637                    bit 16 on INT_MASK_0 */
5638                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5639         }
5640
5641         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5642         bnx2x_init_pxp(bp);
5643
5644 #ifdef __BIG_ENDIAN
5645         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5646         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5647         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5648         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5649         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5650         /* make sure this value is 0 */
5651         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5652
5653 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5654         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5655         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5656         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5657         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5658 #endif
5659
5660         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5661 #ifdef BCM_ISCSI
5662         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5663         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5664         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5665 #endif
5666
5667         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5668                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5669
5670         /* let the HW do it's magic ... */
5671         msleep(100);
5672         /* finish PXP init */
5673         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5674         if (val != 1) {
5675                 BNX2X_ERR("PXP2 CFG failed\n");
5676                 return -EBUSY;
5677         }
5678         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5679         if (val != 1) {
5680                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5681                 return -EBUSY;
5682         }
5683
5684         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5685         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5686
5687         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5688
5689         /* clean the DMAE memory */
5690         bp->dmae_ready = 1;
5691         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5692
5693         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5694         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5695         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5696         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5697
5698         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5699         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5700         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5701         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5702
5703         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5704         /* soft reset pulse */
5705         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5706         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5707
5708 #ifdef BCM_ISCSI
5709         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5710 #endif
5711
5712         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5713         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5714         if (!CHIP_REV_IS_SLOW(bp)) {
5715                 /* enable hw interrupt from doorbell Q */
5716                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5717         }
5718
5719         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5720         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5721         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5722         /* set NIC mode */
5723         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5724         if (CHIP_IS_E1H(bp))
5725                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5726
5727         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5728         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5729         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5730         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5731
5732         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5733         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5734         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5735         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5736
5737         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5738         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5739         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5740         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5741
5742         /* sync semi rtc */
5743         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5744                0x80000000);
5745         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5746                0x80000000);
5747
5748         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5749         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5750         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5751
5752         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5753         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5754                 REG_WR(bp, i, 0xc0cac01a);
5755                 /* TODO: replace with something meaningful */
5756         }
5757         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5758         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5759
5760         if (sizeof(union cdu_context) != 1024)
5761                 /* we currently assume that a context is 1024 bytes */
5762                 printk(KERN_ALERT PFX "please adjust the size of"
5763                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5764
5765         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5766         val = (4 << 24) + (0 << 12) + 1024;
5767         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5768         if (CHIP_IS_E1(bp)) {
5769                 /* !!! fix pxp client crdit until excel update */
5770                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5771                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5772         }
5773
5774         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5775         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5776         /* enable context validation interrupt from CFC */
5777      &