bnx2x: Supporting BCM8727 PHY
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.105-1"
60 #define DRV_MODULE_RELDATE      "2009/04/22"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497                 for (word = 0; word < 8; word++)
498                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499                                                   offset + 4*word));
500                 data[8] = 0x0;
501                 printk(KERN_CONT "%s", (char *)data);
502         }
503         printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508         int i;
509         u16 j, start, end;
510
511         bp->stats_state = STATS_STATE_DISABLED;
512         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514         BNX2X_ERR("begin crash dump -----------------\n");
515
516         /* Indices */
517         /* Common */
518         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
519                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
520                   "  spq_prod_idx(%u)\n",
521                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524         /* Rx */
525         for_each_rx_queue(bp, i) {
526                 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
529                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
530                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
531                           i, fp->rx_bd_prod, fp->rx_bd_cons,
532                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
535                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
536                           fp->rx_sge_prod, fp->last_max_sge,
537                           le16_to_cpu(fp->fp_u_idx),
538                           fp->status_blk->u_status_block.status_block_index);
539         }
540
541         /* Tx */
542         for_each_tx_queue(bp, i) {
543                 struct bnx2x_fastpath *fp = &bp->fp[i];
544                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
547                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
548                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
551                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552                           fp->status_blk->c_status_block.status_block_index,
553                           hw_prods->packets_prod, hw_prods->bds_prod);
554         }
555
556         /* Rings */
557         /* Rx */
558         for_each_rx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563                 for (j = start; j != end; j = RX_BD(j + 1)) {
564                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
568                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569                 }
570
571                 start = RX_SGE(fp->rx_sge_prod);
572                 end = RX_SGE(fp->last_max_sge);
573                 for (j = start; j != end; j = RX_SGE(j + 1)) {
574                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
578                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
579                 }
580
581                 start = RCQ_BD(fp->rx_comp_cons - 10);
582                 end = RCQ_BD(fp->rx_comp_cons + 503);
583                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588                 }
589         }
590
591         /* Tx */
592         for_each_tx_queue(bp, i) {
593                 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601                                   i, j, sw_bd->skb, sw_bd->first_bd);
602                 }
603
604                 start = TX_BD(fp->tx_bd_cons - 10);
605                 end = TX_BD(fp->tx_bd_cons + 254);
606                 for (j = start; j != end; j = TX_BD(j + 1)) {
607                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611                 }
612         }
613
614         bnx2x_fw_dump(bp);
615         bnx2x_mc_assert(bp);
616         BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621         int port = BP_PORT(bp);
622         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623         u32 val = REG_RD(bp, addr);
624         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627         if (msix) {
628                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629                          HC_CONFIG_0_REG_INT_LINE_EN_0);
630                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632         } else if (msi) {
633                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637         } else {
638                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644                    val, port, addr);
645
646                 REG_WR(bp, addr, val);
647
648                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649         }
650
651         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
652            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654         REG_WR(bp, addr, val);
655         /*
656          * Ensure that HC_CONFIG is written before leading/trailing edge config
657          */
658         mmiowb();
659         barrier();
660
661         if (CHIP_IS_E1H(bp)) {
662                 /* init leading/trailing edge */
663                 if (IS_E1HMF(bp)) {
664                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
665                         if (bp->port.pmf)
666                                 /* enable nig and gpio3 attention */
667                                 val |= 0x1100;
668                 } else
669                         val = 0xffff;
670
671                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673         }
674
675         /* Make sure that interrupts are indeed enabled from here on */
676         mmiowb();
677 }
678
679 static void bnx2x_int_disable(struct bnx2x *bp)
680 {
681         int port = BP_PORT(bp);
682         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683         u32 val = REG_RD(bp, addr);
684
685         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
688                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691            val, port, addr);
692
693         /* flush all outstanding writes */
694         mmiowb();
695
696         REG_WR(bp, addr, val);
697         if (REG_RD(bp, addr) != val)
698                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
699
700 }
701
702 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
703 {
704         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
705         int i, offset;
706
707         /* disable interrupt handling */
708         atomic_inc(&bp->intr_sem);
709         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
711         if (disable_hw)
712                 /* prevent the HW from sending interrupts */
713                 bnx2x_int_disable(bp);
714
715         /* make sure all ISRs are done */
716         if (msix) {
717                 synchronize_irq(bp->msix_table[0].vector);
718                 offset = 1;
719                 for_each_queue(bp, i)
720                         synchronize_irq(bp->msix_table[i + offset].vector);
721         } else
722                 synchronize_irq(bp->pdev->irq);
723
724         /* make sure sp_task is not running */
725         cancel_delayed_work(&bp->sp_task);
726         flush_workqueue(bnx2x_wq);
727 }
728
729 /* fast path */
730
731 /*
732  * General service functions
733  */
734
735 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
736                                 u8 storm, u16 index, u8 op, u8 update)
737 {
738         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739                        COMMAND_REG_INT_ACK);
740         struct igu_ack_register igu_ack;
741
742         igu_ack.status_block_index = index;
743         igu_ack.sb_id_and_flags =
744                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
745                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
749         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750            (*(u32 *)&igu_ack), hc_addr);
751         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
752
753         /* Make sure that ACK is written */
754         mmiowb();
755         barrier();
756 }
757
758 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759 {
760         struct host_status_block *fpsb = fp->status_blk;
761         u16 rc = 0;
762
763         barrier(); /* status block is written to by the chip */
764         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766                 rc |= 1;
767         }
768         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770                 rc |= 2;
771         }
772         return rc;
773 }
774
775 static u16 bnx2x_ack_int(struct bnx2x *bp)
776 {
777         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778                        COMMAND_REG_SIMD_MASK);
779         u32 result = REG_RD(bp, hc_addr);
780
781         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782            result, hc_addr);
783
784         return result;
785 }
786
787
788 /*
789  * fast path service functions
790  */
791
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793 {
794         u16 tx_cons_sb;
795
796         /* Tell compiler that status block fields can change */
797         barrier();
798         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
799         return (fp->tx_pkt_cons != tx_cons_sb);
800 }
801
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803 {
804         /* Tell compiler that consumer and producer can change */
805         barrier();
806         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
807 }
808
809 /* free skb in the packet ring at pos idx
810  * return idx of last bd freed
811  */
812 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813                              u16 idx)
814 {
815         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816         struct eth_tx_bd *tx_bd;
817         struct sk_buff *skb = tx_buf->skb;
818         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
819         int nbd;
820
821         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
822            idx, tx_buf, skb);
823
824         /* unmap first bd */
825         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826         tx_bd = &fp->tx_desc_ring[bd_idx];
827         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830         nbd = le16_to_cpu(tx_bd->nbd) - 1;
831         new_cons = nbd + tx_buf->first_bd;
832 #ifdef BNX2X_STOP_ON_ERROR
833         if (nbd > (MAX_SKB_FRAGS + 2)) {
834                 BNX2X_ERR("BAD nbd!\n");
835                 bnx2x_panic();
836         }
837 #endif
838
839         /* Skip a parse bd and the TSO split header bd
840            since they have no mapping */
841         if (nbd)
842                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845                                            ETH_TX_BD_FLAGS_TCP_CSUM |
846                                            ETH_TX_BD_FLAGS_SW_LSO)) {
847                 if (--nbd)
848                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849                 tx_bd = &fp->tx_desc_ring[bd_idx];
850                 /* is this a TSO split header bd? */
851                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852                         if (--nbd)
853                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854                 }
855         }
856
857         /* now free frags */
858         while (nbd > 0) {
859
860                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861                 tx_bd = &fp->tx_desc_ring[bd_idx];
862                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864                 if (--nbd)
865                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866         }
867
868         /* release skb */
869         WARN_ON(!skb);
870         dev_kfree_skb(skb);
871         tx_buf->first_bd = 0;
872         tx_buf->skb = NULL;
873
874         return new_cons;
875 }
876
877 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
878 {
879         s16 used;
880         u16 prod;
881         u16 cons;
882
883         barrier(); /* Tell compiler that prod and cons can change */
884         prod = fp->tx_bd_prod;
885         cons = fp->tx_bd_cons;
886
887         /* NUM_TX_RINGS = number of "next-page" entries
888            It will be used as a threshold */
889         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
890
891 #ifdef BNX2X_STOP_ON_ERROR
892         WARN_ON(used < 0);
893         WARN_ON(used > fp->bp->tx_ring_size);
894         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
895 #endif
896
897         return (s16)(fp->bp->tx_ring_size) - used;
898 }
899
900 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
901 {
902         struct bnx2x *bp = fp->bp;
903         struct netdev_queue *txq;
904         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905         int done = 0;
906
907 #ifdef BNX2X_STOP_ON_ERROR
908         if (unlikely(bp->panic))
909                 return;
910 #endif
911
912         txq = netdev_get_tx_queue(bp->dev, fp->index);
913         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914         sw_cons = fp->tx_pkt_cons;
915
916         while (sw_cons != hw_cons) {
917                 u16 pkt_cons;
918
919                 pkt_cons = TX_BD(sw_cons);
920
921                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
923                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
924                    hw_cons, sw_cons, pkt_cons);
925
926 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
927                         rmb();
928                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929                 }
930 */
931                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932                 sw_cons++;
933                 done++;
934         }
935
936         fp->tx_pkt_cons = sw_cons;
937         fp->tx_bd_cons = bd_cons;
938
939         /* TBD need a thresh? */
940         if (unlikely(netif_tx_queue_stopped(txq))) {
941
942                 __netif_tx_lock(txq, smp_processor_id());
943
944                 /* Need to make the tx_bd_cons update visible to start_xmit()
945                  * before checking for netif_tx_queue_stopped().  Without the
946                  * memory barrier, there is a small possibility that
947                  * start_xmit() will miss it and cause the queue to be stopped
948                  * forever.
949                  */
950                 smp_mb();
951
952                 if ((netif_tx_queue_stopped(txq)) &&
953                     (bp->state == BNX2X_STATE_OPEN) &&
954                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
955                         netif_tx_wake_queue(txq);
956
957                 __netif_tx_unlock(txq);
958         }
959 }
960
961
962 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963                            union eth_rx_cqe *rr_cqe)
964 {
965         struct bnx2x *bp = fp->bp;
966         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
969         DP(BNX2X_MSG_SP,
970            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
971            fp->index, cid, command, bp->state,
972            rr_cqe->ramrod_cqe.ramrod_type);
973
974         bp->spq_left++;
975
976         if (fp->index) {
977                 switch (command | fp->state) {
978                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979                                                 BNX2X_FP_STATE_OPENING):
980                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981                            cid);
982                         fp->state = BNX2X_FP_STATE_OPEN;
983                         break;
984
985                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987                            cid);
988                         fp->state = BNX2X_FP_STATE_HALTED;
989                         break;
990
991                 default:
992                         BNX2X_ERR("unexpected MC reply (%d)  "
993                                   "fp->state is %x\n", command, fp->state);
994                         break;
995                 }
996                 mb(); /* force bnx2x_wait_ramrod() to see the change */
997                 return;
998         }
999
1000         switch (command | bp->state) {
1001         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003                 bp->state = BNX2X_STATE_OPEN;
1004                 break;
1005
1006         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009                 fp->state = BNX2X_FP_STATE_HALTED;
1010                 break;
1011
1012         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1013                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1014                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1015                 break;
1016
1017
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1019         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1020                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1021                 bp->set_mac_pending = 0;
1022                 break;
1023
1024         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1025                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1026                 break;
1027
1028         default:
1029                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1030                           command, bp->state);
1031                 break;
1032         }
1033         mb(); /* force bnx2x_wait_ramrod() to see the change */
1034 }
1035
1036 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037                                      struct bnx2x_fastpath *fp, u16 index)
1038 {
1039         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040         struct page *page = sw_buf->page;
1041         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043         /* Skip "next page" elements */
1044         if (!page)
1045                 return;
1046
1047         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1048                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1049         __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051         sw_buf->page = NULL;
1052         sge->addr_hi = 0;
1053         sge->addr_lo = 0;
1054 }
1055
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057                                            struct bnx2x_fastpath *fp, int last)
1058 {
1059         int i;
1060
1061         for (i = 0; i < last; i++)
1062                 bnx2x_free_rx_sge(bp, fp, i);
1063 }
1064
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066                                      struct bnx2x_fastpath *fp, u16 index)
1067 {
1068         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071         dma_addr_t mapping;
1072
1073         if (unlikely(page == NULL))
1074                 return -ENOMEM;
1075
1076         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1077                                PCI_DMA_FROMDEVICE);
1078         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1079                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080                 return -ENOMEM;
1081         }
1082
1083         sw_buf->page = page;
1084         pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089         return 0;
1090 }
1091
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093                                      struct bnx2x_fastpath *fp, u16 index)
1094 {
1095         struct sk_buff *skb;
1096         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098         dma_addr_t mapping;
1099
1100         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101         if (unlikely(skb == NULL))
1102                 return -ENOMEM;
1103
1104         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1105                                  PCI_DMA_FROMDEVICE);
1106         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1107                 dev_kfree_skb(skb);
1108                 return -ENOMEM;
1109         }
1110
1111         rx_buf->skb = skb;
1112         pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117         return 0;
1118 }
1119
1120 /* note that we are not allocating a new skb,
1121  * we are just moving one from cons to prod
1122  * we are not creating a new mapping,
1123  * so there is no need to check for dma_mapping_error().
1124  */
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126                                struct sk_buff *skb, u16 cons, u16 prod)
1127 {
1128         struct bnx2x *bp = fp->bp;
1129         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134         pci_dma_sync_single_for_device(bp->pdev,
1135                                        pci_unmap_addr(cons_rx_buf, mapping),
1136                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1137
1138         prod_rx_buf->skb = cons_rx_buf->skb;
1139         pci_unmap_addr_set(prod_rx_buf, mapping,
1140                            pci_unmap_addr(cons_rx_buf, mapping));
1141         *prod_bd = *cons_bd;
1142 }
1143
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145                                              u16 idx)
1146 {
1147         u16 last_max = fp->last_max_sge;
1148
1149         if (SUB_S16(idx, last_max) > 0)
1150                 fp->last_max_sge = idx;
1151 }
1152
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154 {
1155         int i, j;
1156
1157         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158                 int idx = RX_SGE_CNT * i - 1;
1159
1160                 for (j = 0; j < 2; j++) {
1161                         SGE_MASK_CLEAR_BIT(fp, idx);
1162                         idx--;
1163                 }
1164         }
1165 }
1166
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168                                   struct eth_fast_path_rx_cqe *fp_cqe)
1169 {
1170         struct bnx2x *bp = fp->bp;
1171         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1172                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1173                       SGE_PAGE_SHIFT;
1174         u16 last_max, last_elem, first_elem;
1175         u16 delta = 0;
1176         u16 i;
1177
1178         if (!sge_len)
1179                 return;
1180
1181         /* First mark all used pages */
1182         for (i = 0; i < sge_len; i++)
1183                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188         /* Here we assume that the last SGE index is the biggest */
1189         prefetch((void *)(fp->sge_mask));
1190         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192         last_max = RX_SGE(fp->last_max_sge);
1193         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196         /* If ring is not full */
1197         if (last_elem + 1 != first_elem)
1198                 last_elem++;
1199
1200         /* Now update the prod */
1201         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202                 if (likely(fp->sge_mask[i]))
1203                         break;
1204
1205                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206                 delta += RX_SGE_MASK_ELEM_SZ;
1207         }
1208
1209         if (delta > 0) {
1210                 fp->rx_sge_prod += delta;
1211                 /* clear page-end entries */
1212                 bnx2x_clear_sge_mask_next_elems(fp);
1213         }
1214
1215         DP(NETIF_MSG_RX_STATUS,
1216            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1217            fp->last_max_sge, fp->rx_sge_prod);
1218 }
1219
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221 {
1222         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223         memset(fp->sge_mask, 0xff,
1224                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
1226         /* Clear the two last indices in the page to 1:
1227            these are the indices that correspond to the "next" element,
1228            hence will never be indicated and should be removed from
1229            the calculations. */
1230         bnx2x_clear_sge_mask_next_elems(fp);
1231 }
1232
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234                             struct sk_buff *skb, u16 cons, u16 prod)
1235 {
1236         struct bnx2x *bp = fp->bp;
1237         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240         dma_addr_t mapping;
1241
1242         /* move empty skb from pool to prod and map it */
1243         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1245                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1246         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248         /* move partial skb from cons to pool (don't unmap yet) */
1249         fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251         /* mark bin state as start - print error if current state != stop */
1252         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255         fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257         /* point prod_bd to new skb */
1258         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261 #ifdef BNX2X_STOP_ON_ERROR
1262         fp->tpa_queue_used |= (1 << queue);
1263 #ifdef __powerpc64__
1264         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265 #else
1266         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267 #endif
1268            fp->tpa_queue_used);
1269 #endif
1270 }
1271
1272 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273                                struct sk_buff *skb,
1274                                struct eth_fast_path_rx_cqe *fp_cqe,
1275                                u16 cqe_idx)
1276 {
1277         struct sw_rx_page *rx_pg, old_rx_pg;
1278         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279         u32 i, frag_len, frag_size, pages;
1280         int err;
1281         int j;
1282
1283         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1284         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1285
1286         /* This is needed in order to enable forwarding support */
1287         if (frag_size)
1288                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1289                                                max(frag_size, (u32)len_on_bd));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         if (pages >
1293             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1294                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295                           pages, cqe_idx);
1296                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1297                           fp_cqe->pkt_len, len_on_bd);
1298                 bnx2x_panic();
1299                 return -EINVAL;
1300         }
1301 #endif
1302
1303         /* Run through the SGL and compose the fragmented skb */
1304         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307                 /* FW gives the indices of the SGE as if the ring is an array
1308                    (meaning that "next" element will consume 2 indices) */
1309                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1310                 rx_pg = &fp->rx_page_ring[sge_idx];
1311                 old_rx_pg = *rx_pg;
1312
1313                 /* If we fail to allocate a substitute page, we simply stop
1314                    where we are and drop the whole packet */
1315                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316                 if (unlikely(err)) {
1317                         fp->eth_q_stats.rx_skb_alloc_failed++;
1318                         return err;
1319                 }
1320
1321                 /* Unmap the page as we r going to pass it to the stack */
1322                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1323                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1324
1325                 /* Add one frag and update the appropriate fields in the skb */
1326                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328                 skb->data_len += frag_len;
1329                 skb->truesize += frag_len;
1330                 skb->len += frag_len;
1331
1332                 frag_size -= frag_len;
1333         }
1334
1335         return 0;
1336 }
1337
1338 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340                            u16 cqe_idx)
1341 {
1342         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343         struct sk_buff *skb = rx_buf->skb;
1344         /* alloc new skb */
1345         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347         /* Unmap skb in the pool anyway, as we are going to change
1348            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349            fails. */
1350         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1351                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1352
1353         if (likely(new_skb)) {
1354                 /* fix ip xsum and give it to the stack */
1355                 /* (no need to map the new skb) */
1356 #ifdef BCM_VLAN
1357                 int is_vlan_cqe =
1358                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359                          PARSING_FLAGS_VLAN);
1360                 int is_not_hwaccel_vlan_cqe =
1361                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362 #endif
1363
1364                 prefetch(skb);
1365                 prefetch(((char *)(skb)) + 128);
1366
1367 #ifdef BNX2X_STOP_ON_ERROR
1368                 if (pad + len > bp->rx_buf_size) {
1369                         BNX2X_ERR("skb_put is about to fail...  "
1370                                   "pad %d  len %d  rx_buf_size %d\n",
1371                                   pad, len, bp->rx_buf_size);
1372                         bnx2x_panic();
1373                         return;
1374                 }
1375 #endif
1376
1377                 skb_reserve(skb, pad);
1378                 skb_put(skb, len);
1379
1380                 skb->protocol = eth_type_trans(skb, bp->dev);
1381                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383                 {
1384                         struct iphdr *iph;
1385
1386                         iph = (struct iphdr *)skb->data;
1387 #ifdef BCM_VLAN
1388                         /* If there is no Rx VLAN offloading -
1389                            take VLAN tag into an account */
1390                         if (unlikely(is_not_hwaccel_vlan_cqe))
1391                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392 #endif
1393                         iph->check = 0;
1394                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395                 }
1396
1397                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398                                          &cqe->fast_path_cqe, cqe_idx)) {
1399 #ifdef BCM_VLAN
1400                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401                             (!is_not_hwaccel_vlan_cqe))
1402                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403                                                 le16_to_cpu(cqe->fast_path_cqe.
1404                                                             vlan_tag));
1405                         else
1406 #endif
1407                                 netif_receive_skb(skb);
1408                 } else {
1409                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410                            " - dropping packet!\n");
1411                         dev_kfree_skb(skb);
1412                 }
1413
1414
1415                 /* put new skb in bin */
1416                 fp->tpa_pool[queue].skb = new_skb;
1417
1418         } else {
1419                 /* else drop the packet and keep the buffer in the bin */
1420                 DP(NETIF_MSG_RX_STATUS,
1421                    "Failed to allocate new skb - dropping packet!\n");
1422                 fp->eth_q_stats.rx_skb_alloc_failed++;
1423         }
1424
1425         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426 }
1427
1428 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429                                         struct bnx2x_fastpath *fp,
1430                                         u16 bd_prod, u16 rx_comp_prod,
1431                                         u16 rx_sge_prod)
1432 {
1433         struct ustorm_eth_rx_producers rx_prods = {0};
1434         int i;
1435
1436         /* Update producers */
1437         rx_prods.bd_prod = bd_prod;
1438         rx_prods.cqe_prod = rx_comp_prod;
1439         rx_prods.sge_prod = rx_sge_prod;
1440
1441         /*
1442          * Make sure that the BD and SGE data is updated before updating the
1443          * producers since FW might read the BD/SGE right after the producer
1444          * is updated.
1445          * This is only applicable for weak-ordered memory model archs such
1446          * as IA-64. The following barrier is also mandatory since FW will
1447          * assumes BDs must have buffers.
1448          */
1449         wmb();
1450
1451         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452                 REG_WR(bp, BAR_USTRORM_INTMEM +
1453                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1454                        ((u32 *)&rx_prods)[i]);
1455
1456         mmiowb(); /* keep prod updates ordered */
1457
1458         DP(NETIF_MSG_RX_STATUS,
1459            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1460            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1461 }
1462
1463 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464 {
1465         struct bnx2x *bp = fp->bp;
1466         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1467         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468         int rx_pkt = 0;
1469
1470 #ifdef BNX2X_STOP_ON_ERROR
1471         if (unlikely(bp->panic))
1472                 return 0;
1473 #endif
1474
1475         /* CQ "next element" is of the size of the regular element,
1476            that's why it's ok here */
1477         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479                 hw_comp_cons++;
1480
1481         bd_cons = fp->rx_bd_cons;
1482         bd_prod = fp->rx_bd_prod;
1483         bd_prod_fw = bd_prod;
1484         sw_comp_cons = fp->rx_comp_cons;
1485         sw_comp_prod = fp->rx_comp_prod;
1486
1487         /* Memory barrier necessary as speculative reads of the rx
1488          * buffer can be ahead of the index in the status block
1489          */
1490         rmb();
1491
1492         DP(NETIF_MSG_RX_STATUS,
1493            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1494            fp->index, hw_comp_cons, sw_comp_cons);
1495
1496         while (sw_comp_cons != hw_comp_cons) {
1497                 struct sw_rx_bd *rx_buf = NULL;
1498                 struct sk_buff *skb;
1499                 union eth_rx_cqe *cqe;
1500                 u8 cqe_fp_flags;
1501                 u16 len, pad;
1502
1503                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504                 bd_prod = RX_BD(bd_prod);
1505                 bd_cons = RX_BD(bd_cons);
1506
1507                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1508                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1509
1510                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1511                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1512                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1513                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1514                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1516
1517                 /* is this a slowpath msg? */
1518                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1519                         bnx2x_sp_event(fp, cqe);
1520                         goto next_cqe;
1521
1522                 /* this is an rx packet */
1523                 } else {
1524                         rx_buf = &fp->rx_buf_ring[bd_cons];
1525                         skb = rx_buf->skb;
1526                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527                         pad = cqe->fast_path_cqe.placement_offset;
1528
1529                         /* If CQE is marked both TPA_START and TPA_END
1530                            it is a non-TPA CQE */
1531                         if ((!fp->disable_tpa) &&
1532                             (TPA_TYPE(cqe_fp_flags) !=
1533                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1534                                 u16 queue = cqe->fast_path_cqe.queue_index;
1535
1536                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537                                         DP(NETIF_MSG_RX_STATUS,
1538                                            "calling tpa_start on queue %d\n",
1539                                            queue);
1540
1541                                         bnx2x_tpa_start(fp, queue, skb,
1542                                                         bd_cons, bd_prod);
1543                                         goto next_rx;
1544                                 }
1545
1546                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547                                         DP(NETIF_MSG_RX_STATUS,
1548                                            "calling tpa_stop on queue %d\n",
1549                                            queue);
1550
1551                                         if (!BNX2X_RX_SUM_FIX(cqe))
1552                                                 BNX2X_ERR("STOP on none TCP "
1553                                                           "data\n");
1554
1555                                         /* This is a size of the linear data
1556                                            on this skb */
1557                                         len = le16_to_cpu(cqe->fast_path_cqe.
1558                                                                 len_on_bd);
1559                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1560                                                     len, cqe, comp_ring_cons);
1561 #ifdef BNX2X_STOP_ON_ERROR
1562                                         if (bp->panic)
1563                                                 return 0;
1564 #endif
1565
1566                                         bnx2x_update_sge_prod(fp,
1567                                                         &cqe->fast_path_cqe);
1568                                         goto next_cqe;
1569                                 }
1570                         }
1571
1572                         pci_dma_sync_single_for_device(bp->pdev,
1573                                         pci_unmap_addr(rx_buf, mapping),
1574                                                        pad + RX_COPY_THRESH,
1575                                                        PCI_DMA_FROMDEVICE);
1576                         prefetch(skb);
1577                         prefetch(((char *)(skb)) + 128);
1578
1579                         /* is this an error packet? */
1580                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1581                                 DP(NETIF_MSG_RX_ERR,
1582                                    "ERROR  flags %x  rx packet %u\n",
1583                                    cqe_fp_flags, sw_comp_cons);
1584                                 fp->eth_q_stats.rx_err_discard_pkt++;
1585                                 goto reuse_rx;
1586                         }
1587
1588                         /* Since we don't have a jumbo ring
1589                          * copy small packets if mtu > 1500
1590                          */
1591                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592                             (len <= RX_COPY_THRESH)) {
1593                                 struct sk_buff *new_skb;
1594
1595                                 new_skb = netdev_alloc_skb(bp->dev,
1596                                                            len + pad);
1597                                 if (new_skb == NULL) {
1598                                         DP(NETIF_MSG_RX_ERR,
1599                                            "ERROR  packet dropped "
1600                                            "because of alloc failure\n");
1601                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1602                                         goto reuse_rx;
1603                                 }
1604
1605                                 /* aligned copy */
1606                                 skb_copy_from_linear_data_offset(skb, pad,
1607                                                     new_skb->data + pad, len);
1608                                 skb_reserve(new_skb, pad);
1609                                 skb_put(new_skb, len);
1610
1611                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613                                 skb = new_skb;
1614
1615                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616                                 pci_unmap_single(bp->pdev,
1617                                         pci_unmap_addr(rx_buf, mapping),
1618                                                  bp->rx_buf_size,
1619                                                  PCI_DMA_FROMDEVICE);
1620                                 skb_reserve(skb, pad);
1621                                 skb_put(skb, len);
1622
1623                         } else {
1624                                 DP(NETIF_MSG_RX_ERR,
1625                                    "ERROR  packet dropped because "
1626                                    "of alloc failure\n");
1627                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1628 reuse_rx:
1629                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630                                 goto next_rx;
1631                         }
1632
1633                         skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635                         skb->ip_summed = CHECKSUM_NONE;
1636                         if (bp->rx_csum) {
1637                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1639                                 else
1640                                         fp->eth_q_stats.hw_csum_err++;
1641                         }
1642                 }
1643
1644                 skb_record_rx_queue(skb, fp->index);
1645 #ifdef BCM_VLAN
1646                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1647                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648                      PARSING_FLAGS_VLAN))
1649                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651                 else
1652 #endif
1653                         netif_receive_skb(skb);
1654
1655
1656 next_rx:
1657                 rx_buf->skb = NULL;
1658
1659                 bd_cons = NEXT_RX_IDX(bd_cons);
1660                 bd_prod = NEXT_RX_IDX(bd_prod);
1661                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662                 rx_pkt++;
1663 next_cqe:
1664                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1666
1667                 if (rx_pkt == budget)
1668                         break;
1669         } /* while */
1670
1671         fp->rx_bd_cons = bd_cons;
1672         fp->rx_bd_prod = bd_prod_fw;
1673         fp->rx_comp_cons = sw_comp_cons;
1674         fp->rx_comp_prod = sw_comp_prod;
1675
1676         /* Update producers */
1677         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678                              fp->rx_sge_prod);
1679
1680         fp->rx_pkt += rx_pkt;
1681         fp->rx_calls++;
1682
1683         return rx_pkt;
1684 }
1685
1686 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687 {
1688         struct bnx2x_fastpath *fp = fp_cookie;
1689         struct bnx2x *bp = fp->bp;
1690         int index = fp->index;
1691
1692         /* Return here if interrupt is disabled */
1693         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695                 return IRQ_HANDLED;
1696         }
1697
1698         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1699            index, fp->sb_id);
1700         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1701
1702 #ifdef BNX2X_STOP_ON_ERROR
1703         if (unlikely(bp->panic))
1704                 return IRQ_HANDLED;
1705 #endif
1706
1707         prefetch(fp->rx_cons_sb);
1708         prefetch(fp->tx_cons_sb);
1709         prefetch(&fp->status_blk->c_status_block.status_block_index);
1710         prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
1712         napi_schedule(&bnx2x_fp(bp, index, napi));
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718 {
1719         struct bnx2x *bp = netdev_priv(dev_instance);
1720         u16 status = bnx2x_ack_int(bp);
1721         u16 mask;
1722
1723         /* Return here if interrupt is shared and it's not for us */
1724         if (unlikely(status == 0)) {
1725                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726                 return IRQ_NONE;
1727         }
1728         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1729
1730         /* Return here if interrupt is disabled */
1731         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733                 return IRQ_HANDLED;
1734         }
1735
1736 #ifdef BNX2X_STOP_ON_ERROR
1737         if (unlikely(bp->panic))
1738                 return IRQ_HANDLED;
1739 #endif
1740
1741         mask = 0x2 << bp->fp[0].sb_id;
1742         if (status & mask) {
1743                 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745                 prefetch(fp->rx_cons_sb);
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
1750                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1751
1752                 status &= ~mask;
1753         }
1754
1755
1756         if (unlikely(status & 0x1)) {
1757                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1758
1759                 status &= ~0x1;
1760                 if (!status)
1761                         return IRQ_HANDLED;
1762         }
1763
1764         if (status)
1765                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766                    status);
1767
1768         return IRQ_HANDLED;
1769 }
1770
1771 /* end of fast path */
1772
1773 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1774
1775 /* Link */
1776
1777 /*
1778  * General service functions
1779  */
1780
1781 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1782 {
1783         u32 lock_status;
1784         u32 resource_bit = (1 << resource);
1785         int func = BP_FUNC(bp);
1786         u32 hw_lock_control_reg;
1787         int cnt;
1788
1789         /* Validating that the resource is within range */
1790         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791                 DP(NETIF_MSG_HW,
1792                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794                 return -EINVAL;
1795         }
1796
1797         if (func <= 5) {
1798                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799         } else {
1800                 hw_lock_control_reg =
1801                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802         }
1803
1804         /* Validating that the resource is not already taken */
1805         lock_status = REG_RD(bp, hw_lock_control_reg);
1806         if (lock_status & resource_bit) {
1807                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1808                    lock_status, resource_bit);
1809                 return -EEXIST;
1810         }
1811
1812         /* Try for 5 second every 5ms */
1813         for (cnt = 0; cnt < 1000; cnt++) {
1814                 /* Try to acquire the lock */
1815                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816                 lock_status = REG_RD(bp, hw_lock_control_reg);
1817                 if (lock_status & resource_bit)
1818                         return 0;
1819
1820                 msleep(5);
1821         }
1822         DP(NETIF_MSG_HW, "Timeout\n");
1823         return -EAGAIN;
1824 }
1825
1826 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1827 {
1828         u32 lock_status;
1829         u32 resource_bit = (1 << resource);
1830         int func = BP_FUNC(bp);
1831         u32 hw_lock_control_reg;
1832
1833         /* Validating that the resource is within range */
1834         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835                 DP(NETIF_MSG_HW,
1836                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838                 return -EINVAL;
1839         }
1840
1841         if (func <= 5) {
1842                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843         } else {
1844                 hw_lock_control_reg =
1845                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846         }
1847
1848         /* Validating that the resource is currently taken */
1849         lock_status = REG_RD(bp, hw_lock_control_reg);
1850         if (!(lock_status & resource_bit)) {
1851                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1852                    lock_status, resource_bit);
1853                 return -EFAULT;
1854         }
1855
1856         REG_WR(bp, hw_lock_control_reg, resource_bit);
1857         return 0;
1858 }
1859
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1862 {
1863         mutex_lock(&bp->port.phy_mutex);
1864
1865         if (bp->port.need_hw_lock)
1866                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1867 }
1868
1869 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1870 {
1871         if (bp->port.need_hw_lock)
1872                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1873
1874         mutex_unlock(&bp->port.phy_mutex);
1875 }
1876
1877 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878 {
1879         /* The GPIO should be swapped if swap register is set and active */
1880         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882         int gpio_shift = gpio_num +
1883                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884         u32 gpio_mask = (1 << gpio_shift);
1885         u32 gpio_reg;
1886         int value;
1887
1888         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890                 return -EINVAL;
1891         }
1892
1893         /* read GPIO value */
1894         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896         /* get the requested pin value */
1897         if ((gpio_reg & gpio_mask) == gpio_mask)
1898                 value = 1;
1899         else
1900                 value = 0;
1901
1902         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1903
1904         return value;
1905 }
1906
1907 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1908 {
1909         /* The GPIO should be swapped if swap register is set and active */
1910         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1911                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1912         int gpio_shift = gpio_num +
1913                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914         u32 gpio_mask = (1 << gpio_shift);
1915         u32 gpio_reg;
1916
1917         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919                 return -EINVAL;
1920         }
1921
1922         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1923         /* read GPIO and mask except the float bits */
1924         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1925
1926         switch (mode) {
1927         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929                    gpio_num, gpio_shift);
1930                 /* clear FLOAT and set CLR */
1931                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933                 break;
1934
1935         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937                    gpio_num, gpio_shift);
1938                 /* clear FLOAT and set SET */
1939                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941                 break;
1942
1943         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1944                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945                    gpio_num, gpio_shift);
1946                 /* set FLOAT */
1947                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948                 break;
1949
1950         default:
1951                 break;
1952         }
1953
1954         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1955         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956
1957         return 0;
1958 }
1959
1960 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961 {
1962         /* The GPIO should be swapped if swap register is set and active */
1963         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965         int gpio_shift = gpio_num +
1966                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967         u32 gpio_mask = (1 << gpio_shift);
1968         u32 gpio_reg;
1969
1970         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972                 return -EINVAL;
1973         }
1974
1975         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976         /* read GPIO int */
1977         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979         switch (mode) {
1980         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982                                    "output low\n", gpio_num, gpio_shift);
1983                 /* clear SET and set CLR */
1984                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986                 break;
1987
1988         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990                                    "output high\n", gpio_num, gpio_shift);
1991                 /* clear CLR and set SET */
1992                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994                 break;
1995
1996         default:
1997                 break;
1998         }
1999
2000         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003         return 0;
2004 }
2005
2006 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2007 {
2008         u32 spio_mask = (1 << spio_num);
2009         u32 spio_reg;
2010
2011         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012             (spio_num > MISC_REGISTERS_SPIO_7)) {
2013                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014                 return -EINVAL;
2015         }
2016
2017         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2018         /* read SPIO and mask except the float bits */
2019         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2020
2021         switch (mode) {
2022         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2023                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024                 /* clear FLOAT and set CLR */
2025                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027                 break;
2028
2029         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2030                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031                 /* clear FLOAT and set SET */
2032                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034                 break;
2035
2036         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038                 /* set FLOAT */
2039                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040                 break;
2041
2042         default:
2043                 break;
2044         }
2045
2046         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2047         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2048
2049         return 0;
2050 }
2051
2052 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2053 {
2054         switch (bp->link_vars.ieee_fc &
2055                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2056         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2057                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058                                           ADVERTISED_Pause);
2059                 break;
2060
2061         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2062                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2063                                          ADVERTISED_Pause);
2064                 break;
2065
2066         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2067                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2068                 break;
2069
2070         default:
2071                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2072                                           ADVERTISED_Pause);
2073                 break;
2074         }
2075 }
2076
2077 static void bnx2x_link_report(struct bnx2x *bp)
2078 {
2079         if (bp->link_vars.link_up) {
2080                 if (bp->state == BNX2X_STATE_OPEN)
2081                         netif_carrier_on(bp->dev);
2082                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2083
2084                 printk("%d Mbps ", bp->link_vars.line_speed);
2085
2086                 if (bp->link_vars.duplex == DUPLEX_FULL)
2087                         printk("full duplex");
2088                 else
2089                         printk("half duplex");
2090
2091                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2093                                 printk(", receive ");
2094                                 if (bp->link_vars.flow_ctrl &
2095                                     BNX2X_FLOW_CTRL_TX)
2096                                         printk("& transmit ");
2097                         } else {
2098                                 printk(", transmit ");
2099                         }
2100                         printk("flow control ON");
2101                 }
2102                 printk("\n");
2103
2104         } else { /* link_down */
2105                 netif_carrier_off(bp->dev);
2106                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2107         }
2108 }
2109
2110 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2111 {
2112         if (!BP_NOMCP(bp)) {
2113                 u8 rc;
2114
2115                 /* Initialize link parameters structure variables */
2116                 /* It is recommended to turn off RX FC for jumbo frames
2117                    for better performance */
2118                 if (IS_E1HMF(bp))
2119                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2120                 else if (bp->dev->mtu > 5000)
2121                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2122                 else
2123                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2124
2125                 bnx2x_acquire_phy_lock(bp);
2126
2127                 if (load_mode == LOAD_DIAG)
2128                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
2130                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131
2132                 bnx2x_release_phy_lock(bp);
2133
2134                 bnx2x_calc_fc_adv(bp);
2135
2136                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2138                         bnx2x_link_report(bp);
2139                 }
2140
2141                 return rc;
2142         }
2143         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2144         return -EINVAL;
2145 }
2146
2147 static void bnx2x_link_set(struct bnx2x *bp)
2148 {
2149         if (!BP_NOMCP(bp)) {
2150                 bnx2x_acquire_phy_lock(bp);
2151                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2152                 bnx2x_release_phy_lock(bp);
2153
2154                 bnx2x_calc_fc_adv(bp);
2155         } else
2156                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2157 }
2158
2159 static void bnx2x__link_reset(struct bnx2x *bp)
2160 {
2161         if (!BP_NOMCP(bp)) {
2162                 bnx2x_acquire_phy_lock(bp);
2163                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2164                 bnx2x_release_phy_lock(bp);
2165         } else
2166                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2167 }
2168
2169 static u8 bnx2x_link_test(struct bnx2x *bp)
2170 {
2171         u8 rc;
2172
2173         bnx2x_acquire_phy_lock(bp);
2174         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2175         bnx2x_release_phy_lock(bp);
2176
2177         return rc;
2178 }
2179
2180 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2181 {
2182         u32 r_param = bp->link_vars.line_speed / 8;
2183         u32 fair_periodic_timeout_usec;
2184         u32 t_fair;
2185
2186         memset(&(bp->cmng.rs_vars), 0,
2187                sizeof(struct rate_shaping_vars_per_port));
2188         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2189
2190         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2192
2193         /* this is the threshold below which no timer arming will occur
2194            1.25 coefficient is for the threshold to be a little bigger
2195            than the real time, to compensate for timer in-accuracy */
2196         bp->cmng.rs_vars.rs_threshold =
2197                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
2199         /* resolution of fairness timer */
2200         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2203
2204         /* this is the threshold below which we won't arm the timer anymore */
2205         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2206
2207         /* we multiply by 1e3/8 to get bytes/msec.
2208            We don't want the credits to pass a credit
2209            of the t_fair*FAIR_MEM (algorithm resolution) */
2210         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211         /* since each tick is 4 usec */
2212         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2213 }
2214
2215 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2216 {
2217         struct rate_shaping_vars_per_vn m_rs_vn;
2218         struct fairness_vars_per_vn m_fair_vn;
2219         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220         u16 vn_min_rate, vn_max_rate;
2221         int i;
2222
2223         /* If function is hidden - set min and max to zeroes */
2224         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225                 vn_min_rate = 0;
2226                 vn_max_rate = 0;
2227
2228         } else {
2229                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2231                 /* If fairness is enabled (not all min rates are zeroes) and
2232                    if current min rate is zero - set it to 1.
2233                    This is a requirement of the algorithm. */
2234                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2235                         vn_min_rate = DEF_MIN_RATE;
2236                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238         }
2239
2240         DP(NETIF_MSG_IFUP,
2241            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2242            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2243
2244         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247         /* global vn counter - maximal Mbps for this vn */
2248         m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250         /* quota - number of bytes transmitted in this period */
2251         m_rs_vn.vn_counter.quota =
2252                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
2254         if (bp->vn_weight_sum) {
2255                 /* credit for each period of the fairness algorithm:
2256                    number of bytes in T_FAIR (the vn share the port rate).
2257                    vn_weight_sum should not be larger than 10000, thus
2258                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259                    than zero */
2260                 m_fair_vn.vn_credit_delta =
2261                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2262                                                  (8 * bp->vn_weight_sum))),
2263                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2264                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265                    m_fair_vn.vn_credit_delta);
2266         }
2267
2268         /* Store it to internal memory */
2269         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272                        ((u32 *)(&m_rs_vn))[i]);
2273
2274         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277                        ((u32 *)(&m_fair_vn))[i]);
2278 }
2279
2280
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x *bp)
2283 {
2284         /* Make sure that we are synced with the current statistics */
2285         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
2287         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2288
2289         if (bp->link_vars.link_up) {
2290
2291                 /* dropless flow control */
2292                 if (CHIP_IS_E1H(bp)) {
2293                         int port = BP_PORT(bp);
2294                         u32 pause_enabled = 0;
2295
2296                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297                                 pause_enabled = 1;
2298
2299                         REG_WR(bp, BAR_USTRORM_INTMEM +
2300                                USTORM_PAUSE_ENABLED_OFFSET(port),
2301                                pause_enabled);
2302                 }
2303
2304                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305                         struct host_port_stats *pstats;
2306
2307                         pstats = bnx2x_sp(bp, port_stats);
2308                         /* reset old bmac stats */
2309                         memset(&(pstats->mac_stx[0]), 0,
2310                                sizeof(struct mac_stx));
2311                 }
2312                 if ((bp->state == BNX2X_STATE_OPEN) ||
2313                     (bp->state == BNX2X_STATE_DISABLED))
2314                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315         }
2316
2317         /* indicate link status */
2318         bnx2x_link_report(bp);
2319
2320         if (IS_E1HMF(bp)) {
2321                 int port = BP_PORT(bp);
2322                 int func;
2323                 int vn;
2324
2325                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326                         if (vn == BP_E1HVN(bp))
2327                                 continue;
2328
2329                         func = ((vn << 1) | port);
2330
2331                         /* Set the attention towards other drivers
2332                            on the same port */
2333                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335                 }
2336
2337                 if (bp->link_vars.link_up) {
2338                         int i;
2339
2340                         /* Init rate shaping and fairness contexts */
2341                         bnx2x_init_port_minmax(bp);
2342
2343                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2344                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346                         /* Store it to internal memory */
2347                         for (i = 0;
2348                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351                                        ((u32 *)(&bp->cmng))[i]);
2352                 }
2353         }
2354 }
2355
2356 static void bnx2x__link_status_update(struct bnx2x *bp)
2357 {
2358         if (bp->state != BNX2X_STATE_OPEN)
2359                 return;
2360
2361         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2362
2363         if (bp->link_vars.link_up)
2364                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365         else
2366                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
2368         /* indicate link status */
2369         bnx2x_link_report(bp);
2370 }
2371
2372 static void bnx2x_pmf_update(struct bnx2x *bp)
2373 {
2374         int port = BP_PORT(bp);
2375         u32 val;
2376
2377         bp->port.pmf = 1;
2378         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380         /* enable nig attention */
2381         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2384
2385         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2386 }
2387
2388 /* end of Link */
2389
2390 /* slow path */
2391
2392 /*
2393  * General service functions
2394  */
2395
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398                          u32 data_hi, u32 data_lo, int common)
2399 {
2400         int func = BP_FUNC(bp);
2401
2402         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2404            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408 #ifdef BNX2X_STOP_ON_ERROR
2409         if (unlikely(bp->panic))
2410                 return -EIO;
2411 #endif
2412
2413         spin_lock_bh(&bp->spq_lock);
2414
2415         if (!bp->spq_left) {
2416                 BNX2X_ERR("BUG! SPQ ring full!\n");
2417                 spin_unlock_bh(&bp->spq_lock);
2418                 bnx2x_panic();
2419                 return -EBUSY;
2420         }
2421
2422         /* CID needs port number to be encoded int it */
2423         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425                                      HW_CID(bp, cid)));
2426         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427         if (common)
2428                 bp->spq_prod_bd->hdr.type |=
2429                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434         bp->spq_left--;
2435
2436         if (bp->spq_prod_bd == bp->spq_last_bd) {
2437                 bp->spq_prod_bd = bp->spq;
2438                 bp->spq_prod_idx = 0;
2439                 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441         } else {
2442                 bp->spq_prod_bd++;
2443                 bp->spq_prod_idx++;
2444         }
2445
2446         /* Make sure that BD data is updated before writing the producer */
2447         wmb();
2448
2449         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2450                bp->spq_prod_idx);
2451
2452         mmiowb();
2453
2454         spin_unlock_bh(&bp->spq_lock);
2455         return 0;
2456 }
2457
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x *bp)
2460 {
2461         u32 i, j, val;
2462         int rc = 0;
2463
2464         might_sleep();
2465         i = 100;
2466         for (j = 0; j < i*10; j++) {
2467                 val = (1UL << 31);
2468                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470                 if (val & (1L << 31))
2471                         break;
2472
2473                 msleep(5);
2474         }
2475         if (!(val & (1L << 31))) {
2476                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2477                 rc = -EBUSY;
2478         }
2479
2480         return rc;
2481 }
2482
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x *bp)
2485 {
2486         u32 val = 0;
2487
2488         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489 }
2490
2491 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492 {
2493         struct host_def_status_block *def_sb = bp->def_status_blk;
2494         u16 rc = 0;
2495
2496         barrier(); /* status block is written to by the chip */
2497         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499                 rc |= 1;
2500         }
2501         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503                 rc |= 2;
2504         }
2505         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507                 rc |= 4;
2508         }
2509         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511                 rc |= 8;
2512         }
2513         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515                 rc |= 16;
2516         }
2517         return rc;
2518 }
2519
2520 /*
2521  * slow path service functions
2522  */
2523
2524 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525 {
2526         int port = BP_PORT(bp);
2527         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528                        COMMAND_REG_ATTN_BITS_SET);
2529         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532                                        NIG_REG_MASK_INTERRUPT_PORT0;
2533         u32 aeu_mask;
2534         u32 nig_mask = 0;
2535
2536         if (bp->attn_state & asserted)
2537                 BNX2X_ERR("IGU ERROR\n");
2538
2539         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540         aeu_mask = REG_RD(bp, aeu_addr);
2541
2542         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2543            aeu_mask, asserted);
2544         aeu_mask &= ~(asserted & 0xff);
2545         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2546
2547         REG_WR(bp, aeu_addr, aeu_mask);
2548         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2549
2550         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2551         bp->attn_state |= asserted;
2552         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2553
2554         if (asserted & ATTN_HARD_WIRED_MASK) {
2555                 if (asserted & ATTN_NIG_FOR_FUNC) {
2556
2557                         bnx2x_acquire_phy_lock(bp);
2558
2559                         /* save nig interrupt mask */
2560                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2561                         REG_WR(bp, nig_int_mask_addr, 0);
2562
2563                         bnx2x_link_attn(bp);
2564
2565                         /* handle unicore attn? */
2566                 }
2567                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570                 if (asserted & GPIO_2_FUNC)
2571                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573                 if (asserted & GPIO_3_FUNC)
2574                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576                 if (asserted & GPIO_4_FUNC)
2577                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579                 if (port == 0) {
2580                         if (asserted & ATTN_GENERAL_ATTN_1) {
2581                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583                         }
2584                         if (asserted & ATTN_GENERAL_ATTN_2) {
2585                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587                         }
2588                         if (asserted & ATTN_GENERAL_ATTN_3) {
2589                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591                         }
2592                 } else {
2593                         if (asserted & ATTN_GENERAL_ATTN_4) {
2594                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596                         }
2597                         if (asserted & ATTN_GENERAL_ATTN_5) {
2598                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600                         }
2601                         if (asserted & ATTN_GENERAL_ATTN_6) {
2602                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604                         }
2605                 }
2606
2607         } /* if hardwired */
2608
2609         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610            asserted, hc_addr);
2611         REG_WR(bp, hc_addr, asserted);
2612
2613         /* now set back the mask */
2614         if (asserted & ATTN_NIG_FOR_FUNC) {
2615                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2616                 bnx2x_release_phy_lock(bp);
2617         }
2618 }
2619
2620 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621 {
2622         int port = BP_PORT(bp);
2623
2624         /* mark the failure */
2625         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628                  bp->link_params.ext_phy_config);
2629
2630         /* log the failure */
2631         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632                " the driver to shutdown the card to prevent permanent"
2633                " damage.  Please contact Dell Support for assistance\n",
2634                bp->dev->name);
2635 }
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2637 {
2638         int port = BP_PORT(bp);
2639         int reg_offset;
2640         u32 val, swap_val, swap_override;
2641
2642         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2644
2645         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2646
2647                 val = REG_RD(bp, reg_offset);
2648                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649                 REG_WR(bp, reg_offset, val);
2650
2651                 BNX2X_ERR("SPIO5 hw attention\n");
2652
2653                 /* Fan failure attention */
2654                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2656                         /* Low power mode is controlled by GPIO 2 */
2657                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2658                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2659                         /* The PHY reset is controlled by GPIO 1 */
2660                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2662                         break;
2663
2664                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2665                         /* The PHY reset is controlled by GPIO 1 */
2666                         /* fake the port number to cancel the swap done in
2667                            set_gpio() */
2668                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2669                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2670                         port = (swap_val && swap_override) ^ 1;
2671                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2672                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2673                         break;
2674
2675                 default:
2676                         break;
2677                 }
2678                 bnx2x_fan_failure(bp);
2679         }
2680
2681         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2682                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2683                 bnx2x_acquire_phy_lock(bp);
2684                 bnx2x_handle_module_detect_int(&bp->link_params);
2685                 bnx2x_release_phy_lock(bp);
2686         }
2687
2688         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2689
2690                 val = REG_RD(bp, reg_offset);
2691                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2692                 REG_WR(bp, reg_offset, val);
2693
2694                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695                           (attn & HW_INTERRUT_ASSERT_SET_0));
2696                 bnx2x_panic();
2697         }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2701 {
2702         u32 val;
2703
2704         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2705
2706                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2707                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2708                 /* DORQ discard attention */
2709                 if (val & 0x2)
2710                         BNX2X_ERR("FATAL error from DORQ\n");
2711         }
2712
2713         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2714
2715                 int port = BP_PORT(bp);
2716                 int reg_offset;
2717
2718                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2719                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2720
2721                 val = REG_RD(bp, reg_offset);
2722                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2723                 REG_WR(bp, reg_offset, val);
2724
2725                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726                           (attn & HW_INTERRUT_ASSERT_SET_1));
2727                 bnx2x_panic();
2728         }
2729 }
2730
2731 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2732 {
2733         u32 val;
2734
2735         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2736
2737                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2738                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2739                 /* CFC error attention */
2740                 if (val & 0x2)
2741                         BNX2X_ERR("FATAL error from CFC\n");
2742         }
2743
2744         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2745
2746                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2747                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2748                 /* RQ_USDMDP_FIFO_OVERFLOW */
2749                 if (val & 0x18000)
2750                         BNX2X_ERR("FATAL error from PXP\n");
2751         }
2752
2753         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2754
2755                 int port = BP_PORT(bp);
2756                 int reg_offset;
2757
2758                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2759                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2760
2761                 val = REG_RD(bp, reg_offset);
2762                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2763                 REG_WR(bp, reg_offset, val);
2764
2765                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766                           (attn & HW_INTERRUT_ASSERT_SET_2));
2767                 bnx2x_panic();
2768         }
2769 }
2770
2771 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2772 {
2773         u32 val;
2774
2775         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2776
2777                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2778                         int func = BP_FUNC(bp);
2779
2780                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2781                         bnx2x__link_status_update(bp);
2782                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2783                                                         DRV_STATUS_PMF)
2784                                 bnx2x_pmf_update(bp);
2785
2786                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2787
2788                         BNX2X_ERR("MC assert!\n");
2789                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2790                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2791                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2792                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2793                         bnx2x_panic();
2794
2795                 } else if (attn & BNX2X_MCP_ASSERT) {
2796
2797                         BNX2X_ERR("MCP assert!\n");
2798                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2799                         bnx2x_fw_dump(bp);
2800
2801                 } else
2802                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2803         }
2804
2805         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2806                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2807                 if (attn & BNX2X_GRC_TIMEOUT) {
2808                         val = CHIP_IS_E1H(bp) ?
2809                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2810                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2811                 }
2812                 if (attn & BNX2X_GRC_RSV) {
2813                         val = CHIP_IS_E1H(bp) ?
2814                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2815                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2816                 }
2817                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2818         }
2819 }
2820
2821 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2822 {
2823         struct attn_route attn;
2824         struct attn_route group_mask;
2825         int port = BP_PORT(bp);
2826         int index;
2827         u32 reg_addr;
2828         u32 val;
2829         u32 aeu_mask;
2830
2831         /* need to take HW lock because MCP or other port might also
2832            try to handle this event */
2833         bnx2x_acquire_alr(bp);
2834
2835         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2836         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2837         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2838         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2839         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2840            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2841
2842         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2843                 if (deasserted & (1 << index)) {
2844                         group_mask = bp->attn_group[index];
2845
2846                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2847                            index, group_mask.sig[0], group_mask.sig[1],
2848                            group_mask.sig[2], group_mask.sig[3]);
2849
2850                         bnx2x_attn_int_deasserted3(bp,
2851                                         attn.sig[3] & group_mask.sig[3]);
2852                         bnx2x_attn_int_deasserted1(bp,
2853                                         attn.sig[1] & group_mask.sig[1]);
2854                         bnx2x_attn_int_deasserted2(bp,
2855                                         attn.sig[2] & group_mask.sig[2]);
2856                         bnx2x_attn_int_deasserted0(bp,
2857                                         attn.sig[0] & group_mask.sig[0]);
2858
2859                         if ((attn.sig[0] & group_mask.sig[0] &
2860                                                 HW_PRTY_ASSERT_SET_0) ||
2861                             (attn.sig[1] & group_mask.sig[1] &
2862                                                 HW_PRTY_ASSERT_SET_1) ||
2863                             (attn.sig[2] & group_mask.sig[2] &
2864                                                 HW_PRTY_ASSERT_SET_2))
2865                                 BNX2X_ERR("FATAL HW block parity attention\n");
2866                 }
2867         }
2868
2869         bnx2x_release_alr(bp);
2870
2871         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2872
2873         val = ~deasserted;
2874         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2875            val, reg_addr);
2876         REG_WR(bp, reg_addr, val);
2877
2878         if (~bp->attn_state & deasserted)
2879                 BNX2X_ERR("IGU ERROR\n");
2880
2881         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2882                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2883
2884         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885         aeu_mask = REG_RD(bp, reg_addr);
2886
2887         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2888            aeu_mask, deasserted);
2889         aeu_mask |= (deasserted & 0xff);
2890         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2891
2892         REG_WR(bp, reg_addr, aeu_mask);
2893         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2894
2895         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2896         bp->attn_state &= ~deasserted;
2897         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2898 }
2899
2900 static void bnx2x_attn_int(struct bnx2x *bp)
2901 {
2902         /* read local copy of bits */
2903         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2904                                                                 attn_bits);
2905         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2906                                                                 attn_bits_ack);
2907         u32 attn_state = bp->attn_state;
2908
2909         /* look for changed bits */
2910         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2911         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2912
2913         DP(NETIF_MSG_HW,
2914            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2915            attn_bits, attn_ack, asserted, deasserted);
2916
2917         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2918                 BNX2X_ERR("BAD attention state\n");
2919
2920         /* handle bits that were raised */
2921         if (asserted)
2922                 bnx2x_attn_int_asserted(bp, asserted);
2923
2924         if (deasserted)
2925                 bnx2x_attn_int_deasserted(bp, deasserted);
2926 }
2927
2928 static void bnx2x_sp_task(struct work_struct *work)
2929 {
2930         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2931         u16 status;
2932
2933
2934         /* Return here if interrupt is disabled */
2935         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2936                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2937                 return;
2938         }
2939
2940         status = bnx2x_update_dsb_idx(bp);
2941 /*      if (status == 0)                                     */
2942 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2943
2944         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2945
2946         /* HW attentions */
2947         if (status & 0x1)
2948                 bnx2x_attn_int(bp);
2949
2950         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2951                      IGU_INT_NOP, 1);
2952         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2953                      IGU_INT_NOP, 1);
2954         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2955                      IGU_INT_NOP, 1);
2956         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2957                      IGU_INT_NOP, 1);
2958         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2959                      IGU_INT_ENABLE, 1);
2960
2961 }
2962
2963 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2964 {
2965         struct net_device *dev = dev_instance;
2966         struct bnx2x *bp = netdev_priv(dev);
2967
2968         /* Return here if interrupt is disabled */
2969         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2970                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2971                 return IRQ_HANDLED;
2972         }
2973
2974         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2975
2976 #ifdef BNX2X_STOP_ON_ERROR
2977         if (unlikely(bp->panic))
2978                 return IRQ_HANDLED;
2979 #endif
2980
2981         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2982
2983         return IRQ_HANDLED;
2984 }
2985
2986 /* end of slow path */
2987
2988 /* Statistics */
2989
2990 /****************************************************************************
2991 * Macros
2992 ****************************************************************************/
2993
2994 /* sum[hi:lo] += add[hi:lo] */
2995 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2996         do { \
2997                 s_lo += a_lo; \
2998                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2999         } while (0)
3000
3001 /* difference = minuend - subtrahend */
3002 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3003         do { \
3004                 if (m_lo < s_lo) { \
3005                         /* underflow */ \
3006                         d_hi = m_hi - s_hi; \
3007                         if (d_hi > 0) { \
3008                                 /* we can 'loan' 1 */ \
3009                                 d_hi--; \
3010                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3011                         } else { \
3012                                 /* m_hi <= s_hi */ \
3013                                 d_hi = 0; \
3014                                 d_lo = 0; \
3015                         } \
3016                 } else { \
3017                         /* m_lo >= s_lo */ \
3018                         if (m_hi < s_hi) { \
3019                                 d_hi = 0; \
3020                                 d_lo = 0; \
3021                         } else { \
3022                                 /* m_hi >= s_hi */ \
3023                                 d_hi = m_hi - s_hi; \
3024                                 d_lo = m_lo - s_lo; \
3025                         } \
3026                 } \
3027         } while (0)
3028
3029 #define UPDATE_STAT64(s, t) \
3030         do { \
3031                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036                        pstats->mac_stx[1].t##_lo, diff.lo); \
3037         } while (0)
3038
3039 #define UPDATE_STAT64_NIG(s, t) \
3040         do { \
3041                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042                         diff.lo, new->s##_lo, old->s##_lo); \
3043                 ADD_64(estats->t##_hi, diff.hi, \
3044                        estats->t##_lo, diff.lo); \
3045         } while (0)
3046
3047 /* sum[hi:lo] += add */
3048 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3049         do { \
3050                 s_lo += a; \
3051                 s_hi += (s_lo < a) ? 1 : 0; \
3052         } while (0)
3053
3054 #define UPDATE_EXTEND_STAT(s) \
3055         do { \
3056                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057                               pstats->mac_stx[1].s##_lo, \
3058                               new->s); \
3059         } while (0)
3060
3061 #define UPDATE_EXTEND_TSTAT(s, t) \
3062         do { \
3063                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064                 old_tclient->s = tclient->s; \
3065                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3066         } while (0)
3067
3068 #define UPDATE_EXTEND_USTAT(s, t) \
3069         do { \
3070                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071                 old_uclient->s = uclient->s; \
3072                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3073         } while (0)
3074
3075 #define UPDATE_EXTEND_XSTAT(s, t) \
3076         do { \
3077                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078                 old_xclient->s = xclient->s; \
3079                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3080         } while (0)
3081
3082 /* minuend -= subtrahend */
3083 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3084         do { \
3085                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3086         } while (0)
3087
3088 /* minuend[hi:lo] -= subtrahend */
3089 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3090         do { \
3091                 SUB_64(m_hi, 0, m_lo, s); \
3092         } while (0)
3093
3094 #define SUB_EXTEND_USTAT(s, t) \
3095         do { \
3096                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3098         } while (0)
3099
3100 /*
3101  * General service functions
3102  */
3103
3104 static inline long bnx2x_hilo(u32 *hiref)
3105 {
3106         u32 lo = *(hiref + 1);
3107 #if (BITS_PER_LONG == 64)
3108         u32 hi = *hiref;
3109
3110         return HILO_U64(hi, lo);
3111 #else
3112         return lo;
3113 #endif
3114 }
3115
3116 /*
3117  * Init service functions
3118  */
3119
3120 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3121 {
3122         if (!bp->stats_pending) {
3123                 struct eth_query_ramrod_data ramrod_data = {0};
3124                 int i, rc;
3125
3126                 ramrod_data.drv_counter = bp->stats_counter++;
3127                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3128                 for_each_queue(bp, i)
3129                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3130
3131                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3132                                    ((u32 *)&ramrod_data)[1],
3133                                    ((u32 *)&ramrod_data)[0], 0);
3134                 if (rc == 0) {
3135                         /* stats ramrod has it's own slot on the spq */
3136                         bp->spq_left++;
3137                         bp->stats_pending = 1;
3138                 }
3139         }
3140 }
3141
3142 static void bnx2x_stats_init(struct bnx2x *bp)
3143 {
3144         int port = BP_PORT(bp);
3145         int i;
3146
3147         bp->stats_pending = 0;
3148         bp->executer_idx = 0;
3149         bp->stats_counter = 0;
3150
3151         /* port stats */
3152         if (!BP_NOMCP(bp))
3153                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3154         else
3155                 bp->port.port_stx = 0;
3156         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3157
3158         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3159         bp->port.old_nig_stats.brb_discard =
3160                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3161         bp->port.old_nig_stats.brb_truncate =
3162                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3163         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3164                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3165         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3166                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3167
3168         /* function stats */
3169         for_each_queue(bp, i) {
3170                 struct bnx2x_fastpath *fp = &bp->fp[i];
3171
3172                 memset(&fp->old_tclient, 0,
3173                        sizeof(struct tstorm_per_client_stats));
3174                 memset(&fp->old_uclient, 0,
3175                        sizeof(struct ustorm_per_client_stats));
3176                 memset(&fp->old_xclient, 0,
3177                        sizeof(struct xstorm_per_client_stats));
3178                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3179         }
3180
3181         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3182         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3183
3184         bp->stats_state = STATS_STATE_DISABLED;
3185         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3186                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3187 }
3188
3189 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3190 {
3191         struct dmae_command *dmae = &bp->stats_dmae;
3192         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194         *stats_comp = DMAE_COMP_VAL;
3195         if (CHIP_REV_IS_SLOW(bp))
3196                 return;
3197
3198         /* loader */
3199         if (bp->executer_idx) {
3200                 int loader_idx = PMF_DMAE_C(bp);
3201
3202                 memset(dmae, 0, sizeof(struct dmae_command));
3203
3204                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3205                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3206                                 DMAE_CMD_DST_RESET |
3207 #ifdef __BIG_ENDIAN
3208                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3209 #else
3210                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3211 #endif
3212                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3213                                                DMAE_CMD_PORT_0) |
3214                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3215                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3216                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3217                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3218                                      sizeof(struct dmae_command) *
3219                                      (loader_idx + 1)) >> 2;
3220                 dmae->dst_addr_hi = 0;
3221                 dmae->len = sizeof(struct dmae_command) >> 2;
3222                 if (CHIP_IS_E1(bp))
3223                         dmae->len--;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227
3228                 *stats_comp = 0;
3229                 bnx2x_post_dmae(bp, dmae, loader_idx);
3230
3231         } else if (bp->func_stx) {
3232                 *stats_comp = 0;
3233                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3234         }
3235 }
3236
3237 static int bnx2x_stats_comp(struct bnx2x *bp)
3238 {
3239         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3240         int cnt = 10;
3241
3242         might_sleep();
3243         while (*stats_comp != DMAE_COMP_VAL) {
3244                 if (!cnt) {
3245                         BNX2X_ERR("timeout waiting for stats finished\n");
3246                         break;
3247                 }
3248                 cnt--;
3249                 msleep(1);
3250         }
3251         return 1;
3252 }
3253
3254 /*
3255  * Statistics service functions
3256  */
3257
3258 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3259 {
3260         struct dmae_command *dmae;
3261         u32 opcode;
3262         int loader_idx = PMF_DMAE_C(bp);
3263         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3264
3265         /* sanity */
3266         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3267                 BNX2X_ERR("BUG!\n");
3268                 return;
3269         }
3270
3271         bp->executer_idx = 0;
3272
3273         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3274                   DMAE_CMD_C_ENABLE |
3275                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3276 #ifdef __BIG_ENDIAN
3277                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3278 #else
3279                   DMAE_CMD_ENDIANITY_DW_SWAP |
3280 #endif
3281                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3282                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3283
3284         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3286         dmae->src_addr_lo = bp->port.port_stx >> 2;
3287         dmae->src_addr_hi = 0;
3288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3290         dmae->len = DMAE_LEN32_RD_MAX;
3291         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292         dmae->comp_addr_hi = 0;
3293         dmae->comp_val = 1;
3294
3295         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3297         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3298         dmae->src_addr_hi = 0;
3299         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3300                                    DMAE_LEN32_RD_MAX * 4);
3301         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3302                                    DMAE_LEN32_RD_MAX * 4);
3303         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3304         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3305         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3306         dmae->comp_val = DMAE_COMP_VAL;
3307
3308         *stats_comp = 0;
3309         bnx2x_hw_stats_post(bp);
3310         bnx2x_stats_comp(bp);
3311 }
3312
3313 static void bnx2x_port_stats_init(struct bnx2x *bp)
3314 {
3315         struct dmae_command *dmae;
3316         int port = BP_PORT(bp);
3317         int vn = BP_E1HVN(bp);
3318         u32 opcode;
3319         int loader_idx = PMF_DMAE_C(bp);
3320         u32 mac_addr;
3321         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3322
3323         /* sanity */
3324         if (!bp->link_vars.link_up || !bp->port.pmf) {
3325                 BNX2X_ERR("BUG!\n");
3326                 return;
3327         }
3328
3329         bp->executer_idx = 0;
3330
3331         /* MCP */
3332         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3333                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3335 #ifdef __BIG_ENDIAN
3336                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3337 #else
3338                   DMAE_CMD_ENDIANITY_DW_SWAP |
3339 #endif
3340                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341                   (vn << DMAE_CMD_E1HVN_SHIFT));
3342
3343         if (bp->port.port_stx) {
3344
3345                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346                 dmae->opcode = opcode;
3347                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3348                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3349                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3350                 dmae->dst_addr_hi = 0;
3351                 dmae->len = sizeof(struct host_port_stats) >> 2;
3352                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353                 dmae->comp_addr_hi = 0;
3354                 dmae->comp_val = 1;
3355         }
3356
3357         if (bp->func_stx) {
3358
3359                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360                 dmae->opcode = opcode;
3361                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3362                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3363                 dmae->dst_addr_lo = bp->func_stx >> 2;
3364                 dmae->dst_addr_hi = 0;
3365                 dmae->len = sizeof(struct host_func_stats) >> 2;
3366                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367                 dmae->comp_addr_hi = 0;
3368                 dmae->comp_val = 1;
3369         }
3370
3371         /* MAC */
3372         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3373                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3374                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3375 #ifdef __BIG_ENDIAN
3376                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3377 #else
3378                   DMAE_CMD_ENDIANITY_DW_SWAP |
3379 #endif
3380                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3381                   (vn << DMAE_CMD_E1HVN_SHIFT));
3382
3383         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3384
3385                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3386                                    NIG_REG_INGRESS_BMAC0_MEM);
3387
3388                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3390                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3391                 dmae->opcode = opcode;
3392                 dmae->src_addr_lo = (mac_addr +
3393                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3394                 dmae->src_addr_hi = 0;
3395                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3396                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3397                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3398                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3399                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3400                 dmae->comp_addr_hi = 0;
3401                 dmae->comp_val = 1;
3402
3403                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3406                 dmae->opcode = opcode;
3407                 dmae->src_addr_lo = (mac_addr +
3408                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3409                 dmae->src_addr_hi = 0;
3410                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3411                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3412                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3413                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3414                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3415                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3416                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3417                 dmae->comp_addr_hi = 0;
3418                 dmae->comp_val = 1;
3419
3420         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3421
3422                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3423
3424                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426                 dmae->opcode = opcode;
3427                 dmae->src_addr_lo = (mac_addr +
3428                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3429                 dmae->src_addr_hi = 0;
3430                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3431                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3432                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3433                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434                 dmae->comp_addr_hi = 0;
3435                 dmae->comp_val = 1;
3436
3437                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439                 dmae->opcode = opcode;
3440                 dmae->src_addr_lo = (mac_addr +
3441                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3442                 dmae->src_addr_hi = 0;
3443                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3444                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3445                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3446                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3447                 dmae->len = 1;
3448                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3449                 dmae->comp_addr_hi = 0;
3450                 dmae->comp_val = 1;
3451
3452                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454                 dmae->opcode = opcode;
3455                 dmae->src_addr_lo = (mac_addr +
3456                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3457                 dmae->src_addr_hi = 0;
3458                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3459                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3460                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3461                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3462                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3463                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3464                 dmae->comp_addr_hi = 0;
3465                 dmae->comp_val = 1;
3466         }
3467
3468         /* NIG */
3469         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3470         dmae->opcode = opcode;
3471         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3472                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3473         dmae->src_addr_hi = 0;
3474         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3475         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3476         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3477         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478         dmae->comp_addr_hi = 0;
3479         dmae->comp_val = 1;
3480
3481         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482         dmae->opcode = opcode;
3483         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3484                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3485         dmae->src_addr_hi = 0;
3486         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3487                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3488         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3489                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3490         dmae->len = (2*sizeof(u32)) >> 2;
3491         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492         dmae->comp_addr_hi = 0;
3493         dmae->comp_val = 1;
3494
3495         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3496         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499 #ifdef __BIG_ENDIAN
3500                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501 #else
3502                         DMAE_CMD_ENDIANITY_DW_SWAP |
3503 #endif
3504                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505                         (vn << DMAE_CMD_E1HVN_SHIFT));
3506         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3507                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3508         dmae->src_addr_hi = 0;
3509         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3510                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3511         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3512                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3513         dmae->len = (2*sizeof(u32)) >> 2;
3514         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3515         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3516         dmae->comp_val = DMAE_COMP_VAL;
3517
3518         *stats_comp = 0;
3519 }
3520
3521 static void bnx2x_func_stats_init(struct bnx2x *bp)
3522 {
3523         struct dmae_command *dmae = &bp->stats_dmae;
3524         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3525
3526         /* sanity */
3527         if (!bp->func_stx) {
3528                 BNX2X_ERR("BUG!\n");
3529                 return;
3530         }
3531
3532         bp->executer_idx = 0;
3533         memset(dmae, 0, sizeof(struct dmae_command));
3534
3535         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3537                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3538 #ifdef __BIG_ENDIAN
3539                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3540 #else
3541                         DMAE_CMD_ENDIANITY_DW_SWAP |
3542 #endif
3543                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3545         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3546         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3547         dmae->dst_addr_lo = bp->func_stx >> 2;
3548         dmae->dst_addr_hi = 0;
3549         dmae->len = sizeof(struct host_func_stats) >> 2;
3550         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3551         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3552         dmae->comp_val = DMAE_COMP_VAL;
3553
3554         *stats_comp = 0;
3555 }
3556
3557 static void bnx2x_stats_start(struct bnx2x *bp)
3558 {
3559         if (bp->port.pmf)
3560                 bnx2x_port_stats_init(bp);
3561
3562         else if (bp->func_stx)
3563                 bnx2x_func_stats_init(bp);
3564
3565         bnx2x_hw_stats_post(bp);
3566         bnx2x_storm_stats_post(bp);
3567 }
3568
3569 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3570 {
3571         bnx2x_stats_comp(bp);
3572         bnx2x_stats_pmf_update(bp);
3573         bnx2x_stats_start(bp);
3574 }
3575
3576 static void bnx2x_stats_restart(struct bnx2x *bp)
3577 {
3578         bnx2x_stats_comp(bp);
3579         bnx2x_stats_start(bp);
3580 }
3581
3582 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3583 {
3584         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3585         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3586         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3587         struct {
3588                 u32 lo;
3589                 u32 hi;
3590         } diff;
3591
3592         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3593         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3594         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3595         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3596         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3597         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3598         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3599         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3600         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3601         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3602         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3603         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3604         UPDATE_STAT64(tx_stat_gt127,
3605                                 tx_stat_etherstatspkts65octetsto127octets);
3606         UPDATE_STAT64(tx_stat_gt255,
3607                                 tx_stat_etherstatspkts128octetsto255octets);
3608         UPDATE_STAT64(tx_stat_gt511,
3609                                 tx_stat_etherstatspkts256octetsto511octets);
3610         UPDATE_STAT64(tx_stat_gt1023,
3611                                 tx_stat_etherstatspkts512octetsto1023octets);
3612         UPDATE_STAT64(tx_stat_gt1518,
3613                                 tx_stat_etherstatspkts1024octetsto1522octets);
3614         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3615         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3616         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3617         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3618         UPDATE_STAT64(tx_stat_gterr,
3619                                 tx_stat_dot3statsinternalmactransmiterrors);
3620         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3621
3622         estats->pause_frames_received_hi =
3623                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3624         estats->pause_frames_received_lo =
3625                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3626
3627         estats->pause_frames_sent_hi =
3628                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3629         estats->pause_frames_sent_lo =
3630                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3631 }
3632
3633 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3634 {
3635         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3636         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3637         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3638
3639         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3640         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3641         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3642         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3643         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3644         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3645         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3646         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3647         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3648         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3649         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3650         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3651         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3652         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3653         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3654         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3655         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3656         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3657         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3658         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3659         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3660         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3661         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3662         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3663         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3664         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3665         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3666         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3667         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3668         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3669         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3670
3671         estats->pause_frames_received_hi =
3672                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3673         estats->pause_frames_received_lo =
3674                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3675         ADD_64(estats->pause_frames_received_hi,
3676                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3677                estats->pause_frames_received_lo,
3678                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3679
3680         estats->pause_frames_sent_hi =
3681                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3682         estats->pause_frames_sent_lo =
3683                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3684         ADD_64(estats->pause_frames_sent_hi,
3685                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3686                estats->pause_frames_sent_lo,
3687                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3688 }
3689
3690 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3691 {
3692         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3693         struct nig_stats *old = &(bp->port.old_nig_stats);
3694         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3695         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3696         struct {
3697                 u32 lo;
3698                 u32 hi;
3699         } diff;
3700         u32 nig_timer_max;
3701
3702         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3703                 bnx2x_bmac_stats_update(bp);
3704
3705         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3706                 bnx2x_emac_stats_update(bp);
3707
3708         else { /* unreached */
3709                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3710                 return -1;
3711         }
3712
3713         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3714                       new->brb_discard - old->brb_discard);
3715         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3716                       new->brb_truncate - old->brb_truncate);
3717
3718         UPDATE_STAT64_NIG(egress_mac_pkt0,
3719                                         etherstatspkts1024octetsto1522octets);
3720         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3721
3722         memcpy(old, new, sizeof(struct nig_stats));
3723
3724         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3725                sizeof(struct mac_stx));
3726         estats->brb_drop_hi = pstats->brb_drop_hi;
3727         estats->brb_drop_lo = pstats->brb_drop_lo;
3728
3729         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3730
3731         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3732         if (nig_timer_max != estats->nig_timer_max) {
3733                 estats->nig_timer_max = nig_timer_max;
3734                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3735         }
3736
3737         return 0;
3738 }
3739
3740 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3741 {
3742         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3743         struct tstorm_per_port_stats *tport =
3744                                         &stats->tstorm_common.port_statistics;
3745         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3746         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3747         int i;
3748
3749         memset(&(fstats->total_bytes_received_hi), 0,
3750                sizeof(struct host_func_stats) - 2*sizeof(u32));
3751         estats->error_bytes_received_hi = 0;
3752         estats->error_bytes_received_lo = 0;
3753         estats->etherstatsoverrsizepkts_hi = 0;
3754         estats->etherstatsoverrsizepkts_lo = 0;
3755         estats->no_buff_discard_hi = 0;
3756         estats->no_buff_discard_lo = 0;
3757
3758         for_each_queue(bp, i) {
3759                 struct bnx2x_fastpath *fp = &bp->fp[i];
3760                 int cl_id = fp->cl_id;
3761                 struct tstorm_per_client_stats *tclient =
3762                                 &stats->tstorm_common.client_statistics[cl_id];
3763                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3764                 struct ustorm_per_client_stats *uclient =
3765                                 &stats->ustorm_common.client_statistics[cl_id];
3766                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3767                 struct xstorm_per_client_stats *xclient =
3768                                 &stats->xstorm_common.client_statistics[cl_id];
3769                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3770                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3771                 u32 diff;
3772
3773                 /* are storm stats valid? */
3774                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3775                                                         bp->stats_counter) {
3776                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3777                            "  xstorm counter (%d) != stats_counter (%d)\n",
3778                            i, xclient->stats_counter, bp->stats_counter);
3779                         return -1;
3780                 }
3781                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3782                                                         bp->stats_counter) {
3783                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3784                            "  tstorm counter (%d) != stats_counter (%d)\n",
3785                            i, tclient->stats_counter, bp->stats_counter);
3786                         return -2;
3787                 }
3788                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3789                                                         bp->stats_counter) {
3790                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3791                            "  ustorm counter (%d) != stats_counter (%d)\n",
3792                            i, uclient->stats_counter, bp->stats_counter);
3793                         return -4;
3794                 }
3795
3796                 qstats->total_bytes_received_hi =
3797                 qstats->valid_bytes_received_hi =
3798                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3799                 qstats->total_bytes_received_lo =
3800                 qstats->valid_bytes_received_lo =
3801                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3802
3803                 qstats->error_bytes_received_hi =
3804                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3805                 qstats->error_bytes_received_lo =
3806                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3807
3808                 ADD_64(qstats->total_bytes_received_hi,
3809                        qstats->error_bytes_received_hi,
3810                        qstats->total_bytes_received_lo,
3811                        qstats->error_bytes_received_lo);
3812
3813                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3814                                         total_unicast_packets_received);
3815                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3816                                         total_multicast_packets_received);
3817                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3818                                         total_broadcast_packets_received);
3819                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3820                                         etherstatsoverrsizepkts);
3821                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3822
3823                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3824                                         total_unicast_packets_received);
3825                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3826                                         total_multicast_packets_received);
3827                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3828                                         total_broadcast_packets_received);
3829                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3830                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3831                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3832
3833                 qstats->total_bytes_transmitted_hi =
3834                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3835                 qstats->total_bytes_transmitted_lo =
3836                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3837
3838                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3839                                         total_unicast_packets_transmitted);
3840                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3841                                         total_multicast_packets_transmitted);
3842                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3843                                         total_broadcast_packets_transmitted);
3844
3845                 old_tclient->checksum_discard = tclient->checksum_discard;
3846                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3847
3848                 ADD_64(fstats->total_bytes_received_hi,
3849                        qstats->total_bytes_received_hi,
3850                        fstats->total_bytes_received_lo,
3851                        qstats->total_bytes_received_lo);
3852                 ADD_64(fstats->total_bytes_transmitted_hi,
3853                        qstats->total_bytes_transmitted_hi,
3854                        fstats->total_bytes_transmitted_lo,
3855                        qstats->total_bytes_transmitted_lo);
3856                 ADD_64(fstats->total_unicast_packets_received_hi,
3857                        qstats->total_unicast_packets_received_hi,
3858                        fstats->total_unicast_packets_received_lo,
3859                        qstats->total_unicast_packets_received_lo);
3860                 ADD_64(fstats->total_multicast_packets_received_hi,
3861                        qstats->total_multicast_packets_received_hi,
3862                        fstats->total_multicast_packets_received_lo,
3863                        qstats->total_multicast_packets_received_lo);
3864                 ADD_64(fstats->total_broadcast_packets_received_hi,
3865                        qstats->total_broadcast_packets_received_hi,
3866                        fstats->total_broadcast_packets_received_lo,
3867                        qstats->total_broadcast_packets_received_lo);
3868                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3869                        qstats->total_unicast_packets_transmitted_hi,
3870                        fstats->total_unicast_packets_transmitted_lo,
3871                        qstats->total_unicast_packets_transmitted_lo);
3872                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3873                        qstats->total_multicast_packets_transmitted_hi,
3874                        fstats->total_multicast_packets_transmitted_lo,
3875                        qstats->total_multicast_packets_transmitted_lo);
3876                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3877                        qstats->total_broadcast_packets_transmitted_hi,
3878                        fstats->total_broadcast_packets_transmitted_lo,
3879                        qstats->total_broadcast_packets_transmitted_lo);
3880                 ADD_64(fstats->valid_bytes_received_hi,
3881                        qstats->valid_bytes_received_hi,
3882                        fstats->valid_bytes_received_lo,
3883                        qstats->valid_bytes_received_lo);
3884
3885                 ADD_64(estats->error_bytes_received_hi,
3886                        qstats->error_bytes_received_hi,
3887                        estats->error_bytes_received_lo,
3888                        qstats->error_bytes_received_lo);
3889                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3890                        qstats->etherstatsoverrsizepkts_hi,
3891                        estats->etherstatsoverrsizepkts_lo,
3892                        qstats->etherstatsoverrsizepkts_lo);
3893                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3894                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3895         }
3896
3897         ADD_64(fstats->total_bytes_received_hi,
3898                estats->rx_stat_ifhcinbadoctets_hi,
3899                fstats->total_bytes_received_lo,
3900                estats->rx_stat_ifhcinbadoctets_lo);
3901
3902         memcpy(estats, &(fstats->total_bytes_received_hi),
3903                sizeof(struct host_func_stats) - 2*sizeof(u32));
3904
3905         ADD_64(estats->etherstatsoverrsizepkts_hi,
3906                estats->rx_stat_dot3statsframestoolong_hi,
3907                estats->etherstatsoverrsizepkts_lo,
3908                estats->rx_stat_dot3statsframestoolong_lo);
3909         ADD_64(estats->error_bytes_received_hi,
3910                estats->rx_stat_ifhcinbadoctets_hi,
3911                estats->error_bytes_received_lo,
3912                estats->rx_stat_ifhcinbadoctets_lo);
3913
3914         if (bp->port.pmf) {
3915                 estats->mac_filter_discard =
3916                                 le32_to_cpu(tport->mac_filter_discard);
3917                 estats->xxoverflow_discard =
3918                                 le32_to_cpu(tport->xxoverflow_discard);
3919                 estats->brb_truncate_discard =
3920                                 le32_to_cpu(tport->brb_truncate_discard);
3921                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3922         }
3923
3924         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3925
3926         bp->stats_pending = 0;
3927
3928         return 0;
3929 }
3930
3931 static void bnx2x_net_stats_update(struct bnx2x *bp)
3932 {
3933         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934         struct net_device_stats *nstats = &bp->dev->stats;
3935         int i;
3936
3937         nstats->rx_packets =
3938                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3939                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3940                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3941
3942         nstats->tx_packets =
3943                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3944                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3945                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3946
3947         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3948
3949         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3950
3951         nstats->rx_dropped = estats->mac_discard;
3952         for_each_queue(bp, i)
3953                 nstats->rx_dropped +=
3954                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3955
3956         nstats->tx_dropped = 0;
3957
3958         nstats->multicast =
3959                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3960
3961         nstats->collisions =
3962                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3963
3964         nstats->rx_length_errors =
3965                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3966                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3967         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3968                                  bnx2x_hilo(&estats->brb_truncate_hi);
3969         nstats->rx_crc_errors =
3970                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3971         nstats->rx_frame_errors =
3972                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3973         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3974         nstats->rx_missed_errors = estats->xxoverflow_discard;
3975
3976         nstats->rx_errors = nstats->rx_length_errors +
3977                             nstats->rx_over_errors +
3978                             nstats->rx_crc_errors +
3979                             nstats->rx_frame_errors +
3980                             nstats->rx_fifo_errors +
3981                             nstats->rx_missed_errors;
3982
3983         nstats->tx_aborted_errors =
3984                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3985                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3986         nstats->tx_carrier_errors =
3987                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3988         nstats->tx_fifo_errors = 0;
3989         nstats->tx_heartbeat_errors = 0;
3990         nstats->tx_window_errors = 0;
3991
3992         nstats->tx_errors = nstats->tx_aborted_errors +
3993                             nstats->tx_carrier_errors +
3994             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3995 }
3996
3997 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3998 {
3999         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4000         int i;
4001
4002         estats->driver_xoff = 0;
4003         estats->rx_err_discard_pkt = 0;
4004         estats->rx_skb_alloc_failed = 0;
4005         estats->hw_csum_err = 0;
4006         for_each_queue(bp, i) {
4007                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4008
4009                 estats->driver_xoff += qstats->driver_xoff;
4010                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4011                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4012                 estats->hw_csum_err += qstats->hw_csum_err;
4013         }
4014 }
4015
4016 static void bnx2x_stats_update(struct bnx2x *bp)
4017 {
4018         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4019
4020         if (*stats_comp != DMAE_COMP_VAL)
4021                 return;
4022
4023         if (bp->port.pmf)
4024                 bnx2x_hw_stats_update(bp);
4025
4026         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4027                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4028                 bnx2x_panic();
4029                 return;
4030         }
4031
4032         bnx2x_net_stats_update(bp);
4033         bnx2x_drv_stats_update(bp);
4034
4035         if (bp->msglevel & NETIF_MSG_TIMER) {
4036                 struct tstorm_per_client_stats *old_tclient =
4037                                                         &bp->fp->old_tclient;
4038                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4039                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4040                 struct net_device_stats *nstats = &bp->dev->stats;
4041                 int i;
4042
4043                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4044                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4045                                   "  tx pkt (%lx)\n",
4046                        bnx2x_tx_avail(bp->fp),
4047                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4048                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4049                                   "  rx pkt (%lx)\n",
4050                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4051                              bp->fp->rx_comp_cons),
4052                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4053                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4054                                   "brb truncate %u\n",
4055                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4056                        qstats->driver_xoff,
4057                        estats->brb_drop_lo, estats->brb_truncate_lo);
4058                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4059                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4060                         "mac_discard %u  mac_filter_discard %u  "
4061                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4062                         "ttl0_discard %u\n",
4063                        le32_to_cpu(old_tclient->checksum_discard),
4064                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4065                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4066                        estats->mac_discard, estats->mac_filter_discard,
4067                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4068                        le32_to_cpu(old_tclient->ttl0_discard));
4069
4070                 for_each_queue(bp, i) {
4071                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4072                                bnx2x_fp(bp, i, tx_pkt),
4073                                bnx2x_fp(bp, i, rx_pkt),
4074                                bnx2x_fp(bp, i, rx_calls));
4075                 }
4076         }
4077
4078         bnx2x_hw_stats_post(bp);
4079         bnx2x_storm_stats_post(bp);
4080 }
4081
4082 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4083 {
4084         struct dmae_command *dmae;
4085         u32 opcode;
4086         int loader_idx = PMF_DMAE_C(bp);
4087         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4088
4089         bp->executer_idx = 0;
4090
4091         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4092                   DMAE_CMD_C_ENABLE |
4093                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4094 #ifdef __BIG_ENDIAN
4095                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4096 #else
4097                   DMAE_CMD_ENDIANITY_DW_SWAP |
4098 #endif
4099                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4100                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4101
4102         if (bp->port.port_stx) {
4103
4104                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105                 if (bp->func_stx)
4106                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4107                 else
4108                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4109                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4110                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4111                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4112                 dmae->dst_addr_hi = 0;
4113                 dmae->len = sizeof(struct host_port_stats) >> 2;
4114                 if (bp->func_stx) {
4115                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4116                         dmae->comp_addr_hi = 0;
4117                         dmae->comp_val = 1;
4118                 } else {
4119                         dmae->comp_addr_lo =
4120                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4121                         dmae->comp_addr_hi =
4122                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4123                         dmae->comp_val = DMAE_COMP_VAL;
4124
4125                         *stats_comp = 0;
4126                 }
4127         }
4128
4129         if (bp->func_stx) {
4130
4131                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4132                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4133                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4134                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4135                 dmae->dst_addr_lo = bp->func_stx >> 2;
4136                 dmae->dst_addr_hi = 0;
4137                 dmae->len = sizeof(struct host_func_stats) >> 2;
4138                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4139                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4140                 dmae->comp_val = DMAE_COMP_VAL;
4141
4142                 *stats_comp = 0;
4143         }
4144 }
4145
4146 static void bnx2x_stats_stop(struct bnx2x *bp)
4147 {
4148         int update = 0;
4149
4150         bnx2x_stats_comp(bp);
4151
4152         if (bp->port.pmf)
4153                 update = (bnx2x_hw_stats_update(bp) == 0);
4154
4155         update |= (bnx2x_storm_stats_update(bp) == 0);
4156
4157         if (update) {
4158                 bnx2x_net_stats_update(bp);
4159
4160                 if (bp->port.pmf)
4161                         bnx2x_port_stats_stop(bp);
4162
4163                 bnx2x_hw_stats_post(bp);
4164                 bnx2x_stats_comp(bp);
4165         }
4166 }
4167
4168 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4169 {
4170 }
4171
4172 static const struct {
4173         void (*action)(struct bnx2x *bp);
4174         enum bnx2x_stats_state next_state;
4175 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4176 /* state        event   */
4177 {
4178 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4179 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4180 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4181 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4182 },
4183 {
4184 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4185 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4186 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4187 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4188 }
4189 };
4190
4191 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4192 {
4193         enum bnx2x_stats_state state = bp->stats_state;
4194
4195         bnx2x_stats_stm[state][event].action(bp);
4196         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4197
4198         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4199                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4200                    state, event, bp->stats_state);
4201 }
4202
4203 static void bnx2x_timer(unsigned long data)
4204 {
4205         struct bnx2x *bp = (struct bnx2x *) data;
4206
4207         if (!netif_running(bp->dev))
4208                 return;
4209
4210         if (atomic_read(&bp->intr_sem) != 0)
4211                 goto timer_restart;
4212
4213         if (poll) {
4214                 struct bnx2x_fastpath *fp = &bp->fp[0];
4215                 int rc;
4216
4217                 bnx2x_tx_int(fp);
4218                 rc = bnx2x_rx_int(fp, 1000);
4219         }
4220
4221         if (!BP_NOMCP(bp)) {
4222                 int func = BP_FUNC(bp);
4223                 u32 drv_pulse;
4224                 u32 mcp_pulse;
4225
4226                 ++bp->fw_drv_pulse_wr_seq;
4227                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4228                 /* TBD - add SYSTEM_TIME */
4229                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4230                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4231
4232                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4233                              MCP_PULSE_SEQ_MASK);
4234                 /* The delta between driver pulse and mcp response
4235                  * should be 1 (before mcp response) or 0 (after mcp response)
4236                  */
4237                 if ((drv_pulse != mcp_pulse) &&
4238                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4239                         /* someone lost a heartbeat... */
4240                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241                                   drv_pulse, mcp_pulse);
4242                 }
4243         }
4244
4245         if ((bp->state == BNX2X_STATE_OPEN) ||
4246             (bp->state == BNX2X_STATE_DISABLED))
4247                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4248
4249 timer_restart:
4250         mod_timer(&bp->timer, jiffies + bp->current_interval);
4251 }
4252
4253 /* end of Statistics */
4254
4255 /* nic init */
4256
4257 /*
4258  * nic init service functions
4259  */
4260
4261 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4262 {
4263         int port = BP_PORT(bp);
4264
4265         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4266                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4267                         sizeof(struct ustorm_status_block)/4);
4268         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4269                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4270                         sizeof(struct cstorm_status_block)/4);
4271 }
4272
4273 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4274                           dma_addr_t mapping, int sb_id)
4275 {
4276         int port = BP_PORT(bp);
4277         int func = BP_FUNC(bp);
4278         int index;
4279         u64 section;
4280
4281         /* USTORM */
4282         section = ((u64)mapping) + offsetof(struct host_status_block,
4283                                             u_status_block);
4284         sb->u_status_block.status_block_id = sb_id;
4285
4286         REG_WR(bp, BAR_USTRORM_INTMEM +
4287                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4288         REG_WR(bp, BAR_USTRORM_INTMEM +
4289                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4290                U64_HI(section));
4291         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4292                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4293
4294         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4295                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4296                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4297
4298         /* CSTORM */
4299         section = ((u64)mapping) + offsetof(struct host_status_block,
4300                                             c_status_block);
4301         sb->c_status_block.status_block_id = sb_id;
4302
4303         REG_WR(bp, BAR_CSTRORM_INTMEM +
4304                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4305         REG_WR(bp, BAR_CSTRORM_INTMEM +
4306                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4307                U64_HI(section));
4308         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4309                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4310
4311         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4312                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4313                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4314
4315         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4316 }
4317
4318 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4319 {
4320         int func = BP_FUNC(bp);
4321
4322         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4323                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4324                         sizeof(struct tstorm_def_status_block)/4);
4325         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4326                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4327                         sizeof(struct ustorm_def_status_block)/4);
4328         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4329                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4330                         sizeof(struct cstorm_def_status_block)/4);
4331         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4332                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4333                         sizeof(struct xstorm_def_status_block)/4);
4334 }
4335
4336 static void bnx2x_init_def_sb(struct bnx2x *bp,
4337                               struct host_def_status_block *def_sb,
4338                               dma_addr_t mapping, int sb_id)
4339 {
4340         int port = BP_PORT(bp);
4341         int func = BP_FUNC(bp);
4342         int index, val, reg_offset;
4343         u64 section;
4344
4345         /* ATTN */
4346         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4347                                             atten_status_block);
4348         def_sb->atten_status_block.status_block_id = sb_id;
4349
4350         bp->attn_state = 0;
4351
4352         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4353                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4354
4355         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4356                 bp->attn_group[index].sig[0] = REG_RD(bp,
4357                                                      reg_offset + 0x10*index);
4358                 bp->attn_group[index].sig[1] = REG_RD(bp,
4359                                                reg_offset + 0x4 + 0x10*index);
4360                 bp->attn_group[index].sig[2] = REG_RD(bp,
4361                                                reg_offset + 0x8 + 0x10*index);
4362                 bp->attn_group[index].sig[3] = REG_RD(bp,
4363                                                reg_offset + 0xc + 0x10*index);
4364         }
4365
4366         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4367                              HC_REG_ATTN_MSG0_ADDR_L);
4368
4369         REG_WR(bp, reg_offset, U64_LO(section));
4370         REG_WR(bp, reg_offset + 4, U64_HI(section));
4371
4372         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4373
4374         val = REG_RD(bp, reg_offset);
4375         val |= sb_id;
4376         REG_WR(bp, reg_offset, val);
4377
4378         /* USTORM */
4379         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4380                                             u_def_status_block);
4381         def_sb->u_def_status_block.status_block_id = sb_id;
4382
4383         REG_WR(bp, BAR_USTRORM_INTMEM +
4384                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4385         REG_WR(bp, BAR_USTRORM_INTMEM +
4386                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4387                U64_HI(section));
4388         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4389                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4390
4391         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4392                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4393                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4394
4395         /* CSTORM */
4396         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4397                                             c_def_status_block);
4398         def_sb->c_def_status_block.status_block_id = sb_id;
4399
4400         REG_WR(bp, BAR_CSTRORM_INTMEM +
4401                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4402         REG_WR(bp, BAR_CSTRORM_INTMEM +
4403                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4404                U64_HI(section));
4405         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4406                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4407
4408         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4409                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4410                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4411
4412         /* TSTORM */
4413         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4414                                             t_def_status_block);
4415         def_sb->t_def_status_block.status_block_id = sb_id;
4416
4417         REG_WR(bp, BAR_TSTRORM_INTMEM +
4418                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4419         REG_WR(bp, BAR_TSTRORM_INTMEM +
4420                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4421                U64_HI(section));
4422         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4423                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4424
4425         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4426                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4427                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4428
4429         /* XSTORM */
4430         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4431                                             x_def_status_block);
4432         def_sb->x_def_status_block.status_block_id = sb_id;
4433
4434         REG_WR(bp, BAR_XSTRORM_INTMEM +
4435                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4436         REG_WR(bp, BAR_XSTRORM_INTMEM +
4437                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4438                U64_HI(section));
4439         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4440                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4441
4442         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4443                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4444                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4445
4446         bp->stats_pending = 0;
4447         bp->set_mac_pending = 0;
4448
4449         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4450 }
4451
4452 static void bnx2x_update_coalesce(struct bnx2x *bp)
4453 {
4454         int port = BP_PORT(bp);
4455         int i;
4456
4457         for_each_queue(bp, i) {
4458                 int sb_id = bp->fp[i].sb_id;
4459
4460                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4462                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4463                                                     U_SB_ETH_RX_CQ_INDEX),
4464                         bp->rx_ticks/12);
4465                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4466                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4467                                                      U_SB_ETH_RX_CQ_INDEX),
4468                          (bp->rx_ticks/12) ? 0 : 1);
4469
4470                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4472                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4473                                                     C_SB_ETH_TX_CQ_INDEX),
4474                         bp->tx_ticks/12);
4475                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4476                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4477                                                      C_SB_ETH_TX_CQ_INDEX),
4478                          (bp->tx_ticks/12) ? 0 : 1);
4479         }
4480 }
4481
4482 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4483                                        struct bnx2x_fastpath *fp, int last)
4484 {
4485         int i;
4486
4487         for (i = 0; i < last; i++) {
4488                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4489                 struct sk_buff *skb = rx_buf->skb;
4490
4491                 if (skb == NULL) {
4492                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4493                         continue;
4494                 }
4495
4496                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4497                         pci_unmap_single(bp->pdev,
4498                                          pci_unmap_addr(rx_buf, mapping),
4499                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4500
4501                 dev_kfree_skb(skb);
4502                 rx_buf->skb = NULL;
4503         }
4504 }
4505
4506 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4507 {
4508         int func = BP_FUNC(bp);
4509         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4510                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4511         u16 ring_prod, cqe_ring_prod;
4512         int i, j;
4513
4514         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4515         DP(NETIF_MSG_IFUP,
4516            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4517
4518         if (bp->flags & TPA_ENABLE_FLAG) {
4519
4520                 for_each_rx_queue(bp, j) {
4521                         struct bnx2x_fastpath *fp = &bp->fp[j];
4522
4523                         for (i = 0; i < max_agg_queues; i++) {
4524                                 fp->tpa_pool[i].skb =
4525                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4526                                 if (!fp->tpa_pool[i].skb) {
4527                                         BNX2X_ERR("Failed to allocate TPA "
4528                                                   "skb pool for queue[%d] - "
4529                                                   "disabling TPA on this "
4530                                                   "queue!\n", j);
4531                                         bnx2x_free_tpa_pool(bp, fp, i);
4532                                         fp->disable_tpa = 1;
4533                                         break;
4534                                 }
4535                                 pci_unmap_addr_set((struct sw_rx_bd *)
4536                                                         &bp->fp->tpa_pool[i],
4537                                                    mapping, 0);
4538                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4539                         }
4540                 }
4541         }
4542
4543         for_each_rx_queue(bp, j) {
4544                 struct bnx2x_fastpath *fp = &bp->fp[j];
4545
4546                 fp->rx_bd_cons = 0;
4547                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4548                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4549
4550                 /* "next page" elements initialization */
4551                 /* SGE ring */
4552                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4553                         struct eth_rx_sge *sge;
4554
4555                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4556                         sge->addr_hi =
4557                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4558                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4559                         sge->addr_lo =
4560                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4561                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4562                 }
4563
4564                 bnx2x_init_sge_ring_bit_mask(fp);
4565
4566                 /* RX BD ring */
4567                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4568                         struct eth_rx_bd *rx_bd;
4569
4570                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4571                         rx_bd->addr_hi =
4572                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4573                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4574                         rx_bd->addr_lo =
4575                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4576                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4577                 }
4578
4579                 /* CQ ring */
4580                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4581                         struct eth_rx_cqe_next_page *nextpg;
4582
4583                         nextpg = (struct eth_rx_cqe_next_page *)
4584                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4585                         nextpg->addr_hi =
4586                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4587                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4588                         nextpg->addr_lo =
4589                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4590                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4591                 }
4592
4593                 /* Allocate SGEs and initialize the ring elements */
4594                 for (i = 0, ring_prod = 0;
4595                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4596
4597                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4598                                 BNX2X_ERR("was only able to allocate "
4599                                           "%d rx sges\n", i);
4600                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4601                                 /* Cleanup already allocated elements */
4602                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4603                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4604                                 fp->disable_tpa = 1;
4605                                 ring_prod = 0;
4606                                 break;
4607                         }
4608                         ring_prod = NEXT_SGE_IDX(ring_prod);
4609                 }
4610                 fp->rx_sge_prod = ring_prod;
4611
4612                 /* Allocate BDs and initialize BD ring */
4613                 fp->rx_comp_cons = 0;
4614                 cqe_ring_prod = ring_prod = 0;
4615                 for (i = 0; i < bp->rx_ring_size; i++) {
4616                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4617                                 BNX2X_ERR("was only able to allocate "
4618                                           "%d rx skbs on queue[%d]\n", i, j);
4619                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4620                                 break;
4621                         }
4622                         ring_prod = NEXT_RX_IDX(ring_prod);
4623                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4624                         WARN_ON(ring_prod <= i);
4625                 }
4626
4627                 fp->rx_bd_prod = ring_prod;
4628                 /* must not have more available CQEs than BDs */
4629                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4630                                        cqe_ring_prod);
4631                 fp->rx_pkt = fp->rx_calls = 0;
4632
4633                 /* Warning!
4634                  * this will generate an interrupt (to the TSTORM)
4635                  * must only be done after chip is initialized
4636                  */
4637                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4638                                      fp->rx_sge_prod);
4639                 if (j != 0)
4640                         continue;
4641
4642                 REG_WR(bp, BAR_USTRORM_INTMEM +
4643                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4644                        U64_LO(fp->rx_comp_mapping));
4645                 REG_WR(bp, BAR_USTRORM_INTMEM +
4646                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4647                        U64_HI(fp->rx_comp_mapping));
4648         }
4649 }
4650
4651 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4652 {
4653         int i, j;
4654
4655         for_each_tx_queue(bp, j) {
4656                 struct bnx2x_fastpath *fp = &bp->fp[j];
4657
4658                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4659                         struct eth_tx_bd *tx_bd =
4660                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4661
4662                         tx_bd->addr_hi =
4663                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4664                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4665                         tx_bd->addr_lo =
4666                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4667                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4668                 }
4669
4670                 fp->tx_pkt_prod = 0;
4671                 fp->tx_pkt_cons = 0;
4672                 fp->tx_bd_prod = 0;
4673                 fp->tx_bd_cons = 0;
4674                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4675                 fp->tx_pkt = 0;
4676         }
4677 }
4678
4679 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4680 {
4681         int func = BP_FUNC(bp);
4682
4683         spin_lock_init(&bp->spq_lock);
4684
4685         bp->spq_left = MAX_SPQ_PENDING;
4686         bp->spq_prod_idx = 0;
4687         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4688         bp->spq_prod_bd = bp->spq;
4689         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4690
4691         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4692                U64_LO(bp->spq_mapping));
4693         REG_WR(bp,
4694                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4695                U64_HI(bp->spq_mapping));
4696
4697         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4698                bp->spq_prod_idx);
4699 }
4700
4701 static void bnx2x_init_context(struct bnx2x *bp)
4702 {
4703         int i;
4704
4705         for_each_queue(bp, i) {
4706                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4707                 struct bnx2x_fastpath *fp = &bp->fp[i];
4708                 u8 cl_id = fp->cl_id;
4709                 u8 sb_id = fp->sb_id;
4710
4711                 context->ustorm_st_context.common.sb_index_numbers =
4712                                                 BNX2X_RX_SB_INDEX_NUM;
4713                 context->ustorm_st_context.common.clientId = cl_id;
4714                 context->ustorm_st_context.common.status_block_id = sb_id;
4715                 context->ustorm_st_context.common.flags =
4716                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4717                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4718                 context->ustorm_st_context.common.statistics_counter_id =
4719                                                 cl_id;
4720                 context->ustorm_st_context.common.mc_alignment_log_size =
4721                                                 BNX2X_RX_ALIGN_SHIFT;
4722                 context->ustorm_st_context.common.bd_buff_size =
4723                                                 bp->rx_buf_size;
4724                 context->ustorm_st_context.common.bd_page_base_hi =
4725                                                 U64_HI(fp->rx_desc_mapping);
4726                 context->ustorm_st_context.common.bd_page_base_lo =
4727                                                 U64_LO(fp->rx_desc_mapping);
4728                 if (!fp->disable_tpa) {
4729                         context->ustorm_st_context.common.flags |=
4730                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4731                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4732                         context->ustorm_st_context.common.sge_buff_size =
4733                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4734                                          (u32)0xffff);
4735                         context->ustorm_st_context.common.sge_page_base_hi =
4736                                                 U64_HI(fp->rx_sge_mapping);
4737                         context->ustorm_st_context.common.sge_page_base_lo =
4738                                                 U64_LO(fp->rx_sge_mapping);
4739                 }
4740
4741                 context->ustorm_ag_context.cdu_usage =
4742                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4743                                                CDU_REGION_NUMBER_UCM_AG,
4744                                                ETH_CONNECTION_TYPE);
4745
4746                 context->xstorm_st_context.tx_bd_page_base_hi =
4747                                                 U64_HI(fp->tx_desc_mapping);
4748                 context->xstorm_st_context.tx_bd_page_base_lo =
4749                                                 U64_LO(fp->tx_desc_mapping);
4750                 context->xstorm_st_context.db_data_addr_hi =
4751                                                 U64_HI(fp->tx_prods_mapping);
4752                 context->xstorm_st_context.db_data_addr_lo =
4753                                                 U64_LO(fp->tx_prods_mapping);
4754                 context->xstorm_st_context.statistics_data = (cl_id |
4755                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4756                 context->cstorm_st_context.sb_index_number =
4757                                                 C_SB_ETH_TX_CQ_INDEX;
4758                 context->cstorm_st_context.status_block_id = sb_id;
4759
4760                 context->xstorm_ag_context.cdu_reserved =
4761                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4762                                                CDU_REGION_NUMBER_XCM_AG,
4763                                                ETH_CONNECTION_TYPE);
4764         }
4765 }
4766
4767 static void bnx2x_init_ind_table(struct bnx2x *bp)
4768 {
4769         int func = BP_FUNC(bp);
4770         int i;
4771
4772         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4773                 return;
4774
4775         DP(NETIF_MSG_IFUP,
4776            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4777         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4778                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4779                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4780                         bp->fp->cl_id + (i % bp->num_rx_queues));
4781 }
4782
4783 static void bnx2x_set_client_config(struct bnx2x *bp)
4784 {
4785         struct tstorm_eth_client_config tstorm_client = {0};
4786         int port = BP_PORT(bp);
4787         int i;
4788
4789         tstorm_client.mtu = bp->dev->mtu;
4790         tstorm_client.config_flags =
4791                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4792                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4793 #ifdef BCM_VLAN
4794         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4795                 tstorm_client.config_flags |=
4796                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4797                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4798         }
4799 #endif
4800
4801         if (bp->flags & TPA_ENABLE_FLAG) {
4802                 tstorm_client.max_sges_for_packet =
4803                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4804                 tstorm_client.max_sges_for_packet =
4805                         ((tstorm_client.max_sges_for_packet +
4806                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4807                         PAGES_PER_SGE_SHIFT;
4808
4809                 tstorm_client.config_flags |=
4810                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4811         }
4812
4813         for_each_queue(bp, i) {
4814                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4815
4816                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4817                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4818                        ((u32 *)&tstorm_client)[0]);
4819                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4820                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4821                        ((u32 *)&tstorm_client)[1]);
4822         }
4823
4824         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4825            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4826 }
4827
4828 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4829 {
4830         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4831         int mode = bp->rx_mode;
4832         int mask = (1 << BP_L_ID(bp));
4833         int func = BP_FUNC(bp);
4834         int i;
4835
4836         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4837
4838         switch (mode) {
4839         case BNX2X_RX_MODE_NONE: /* no Rx */
4840                 tstorm_mac_filter.ucast_drop_all = mask;
4841                 tstorm_mac_filter.mcast_drop_all = mask;
4842                 tstorm_mac_filter.bcast_drop_all = mask;
4843                 break;
4844
4845         case BNX2X_RX_MODE_NORMAL:
4846                 tstorm_mac_filter.bcast_accept_all = mask;
4847                 break;
4848
4849         case BNX2X_RX_MODE_ALLMULTI:
4850                 tstorm_mac_filter.mcast_accept_all = mask;
4851                 tstorm_mac_filter.bcast_accept_all = mask;
4852                 break;
4853
4854         case BNX2X_RX_MODE_PROMISC:
4855                 tstorm_mac_filter.ucast_accept_all = mask;
4856                 tstorm_mac_filter.mcast_accept_all = mask;
4857                 tstorm_mac_filter.bcast_accept_all = mask;
4858                 break;
4859
4860         default:
4861                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4862                 break;
4863         }
4864
4865         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4866                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4867                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4868                        ((u32 *)&tstorm_mac_filter)[i]);
4869
4870 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4871                    ((u32 *)&tstorm_mac_filter)[i]); */
4872         }
4873
4874         if (mode != BNX2X_RX_MODE_NONE)
4875                 bnx2x_set_client_config(bp);
4876 }
4877
4878 static void bnx2x_init_internal_common(struct bnx2x *bp)
4879 {
4880         int i;
4881
4882         if (bp->flags & TPA_ENABLE_FLAG) {
4883                 struct tstorm_eth_tpa_exist tpa = {0};
4884
4885                 tpa.tpa_exist = 1;
4886
4887                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4888                        ((u32 *)&tpa)[0]);
4889                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4890                        ((u32 *)&tpa)[1]);
4891         }
4892
4893         /* Zero this manually as its initialization is
4894            currently missing in the initTool */
4895         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4896                 REG_WR(bp, BAR_USTRORM_INTMEM +
4897                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4898 }
4899
4900 static void bnx2x_init_internal_port(struct bnx2x *bp)
4901 {
4902         int port = BP_PORT(bp);
4903
4904         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4905         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4906         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4907         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4908 }
4909
4910 /* Calculates the sum of vn_min_rates.
4911    It's needed for further normalizing of the min_rates.
4912    Returns:
4913      sum of vn_min_rates.
4914        or
4915      0 - if all the min_rates are 0.
4916      In the later case fainess algorithm should be deactivated.
4917      If not all min_rates are zero then those that are zeroes will be set to 1.
4918  */
4919 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4920 {
4921         int all_zero = 1;
4922         int port = BP_PORT(bp);
4923         int vn;
4924
4925         bp->vn_weight_sum = 0;
4926         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4927                 int func = 2*vn + port;
4928                 u32 vn_cfg =
4929                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4930                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4931                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4932
4933                 /* Skip hidden vns */
4934                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4935                         continue;
4936
4937                 /* If min rate is zero - set it to 1 */
4938                 if (!vn_min_rate)
4939                         vn_min_rate = DEF_MIN_RATE;
4940                 else
4941                         all_zero = 0;
4942
4943                 bp->vn_weight_sum += vn_min_rate;
4944         }
4945
4946         /* ... only if all min rates are zeros - disable fairness */
4947         if (all_zero)
4948                 bp->vn_weight_sum = 0;
4949 }
4950
4951 static void bnx2x_init_internal_func(struct bnx2x *bp)
4952 {
4953         struct tstorm_eth_function_common_config tstorm_config = {0};
4954         struct stats_indication_flags stats_flags = {0};
4955         int port = BP_PORT(bp);
4956         int func = BP_FUNC(bp);
4957         int i, j;
4958         u32 offset;
4959         u16 max_agg_size;
4960
4961         if (is_multi(bp)) {
4962                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4963                 tstorm_config.rss_result_mask = MULTI_MASK;
4964         }
4965         if (IS_E1HMF(bp))
4966                 tstorm_config.config_flags |=
4967                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4968
4969         tstorm_config.leading_client_id = BP_L_ID(bp);
4970
4971         REG_WR(bp, BAR_TSTRORM_INTMEM +
4972                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4973                (*(u32 *)&tstorm_config));
4974
4975         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4976         bnx2x_set_storm_rx_mode(bp);
4977
4978         for_each_queue(bp, i) {
4979                 u8 cl_id = bp->fp[i].cl_id;
4980
4981                 /* reset xstorm per client statistics */
4982                 offset = BAR_XSTRORM_INTMEM +
4983                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4984                 for (j = 0;
4985                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4986                         REG_WR(bp, offset + j*4, 0);
4987
4988                 /* reset tstorm per client statistics */
4989                 offset = BAR_TSTRORM_INTMEM +
4990                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4991                 for (j = 0;
4992                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4993                         REG_WR(bp, offset + j*4, 0);
4994
4995                 /* reset ustorm per client statistics */
4996                 offset = BAR_USTRORM_INTMEM +
4997                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4998                 for (j = 0;
4999                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5000                         REG_WR(bp, offset + j*4, 0);
5001         }
5002
5003         /* Init statistics related context */
5004         stats_flags.collect_eth = 1;
5005
5006         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5007                ((u32 *)&stats_flags)[0]);
5008         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5009                ((u32 *)&stats_flags)[1]);
5010
5011         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5012                ((u32 *)&stats_flags)[0]);
5013         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5014                ((u32 *)&stats_flags)[1]);
5015
5016         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5017                ((u32 *)&stats_flags)[0]);
5018         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5019                ((u32 *)&stats_flags)[1]);
5020
5021         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5022                ((u32 *)&stats_flags)[0]);
5023         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5024                ((u32 *)&stats_flags)[1]);
5025
5026         REG_WR(bp, BAR_XSTRORM_INTMEM +
5027                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5028                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5029         REG_WR(bp, BAR_XSTRORM_INTMEM +
5030                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5031                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5032
5033         REG_WR(bp, BAR_TSTRORM_INTMEM +
5034                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5035                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5036         REG_WR(bp, BAR_TSTRORM_INTMEM +
5037                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5038                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5039
5040         REG_WR(bp, BAR_USTRORM_INTMEM +
5041                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5042                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5043         REG_WR(bp, BAR_USTRORM_INTMEM +
5044                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5045                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5046
5047         if (CHIP_IS_E1H(bp)) {
5048                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5049                         IS_E1HMF(bp));
5050                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5051                         IS_E1HMF(bp));
5052                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5053                         IS_E1HMF(bp));
5054                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5055                         IS_E1HMF(bp));
5056
5057                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5058                          bp->e1hov);
5059         }
5060
5061         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5062         max_agg_size =
5063                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5064                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5065                     (u32)0xffff);
5066         for_each_rx_queue(bp, i) {
5067                 struct bnx2x_fastpath *fp = &bp->fp[i];
5068
5069                 REG_WR(bp, BAR_USTRORM_INTMEM +
5070                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5071                        U64_LO(fp->rx_comp_mapping));
5072                 REG_WR(bp, BAR_USTRORM_INTMEM +
5073                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5074                        U64_HI(fp->rx_comp_mapping));
5075
5076                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5077                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5078                          max_agg_size);
5079         }
5080
5081         /* dropless flow control */
5082         if (CHIP_IS_E1H(bp)) {
5083                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5084
5085                 rx_pause.bd_thr_low = 250;
5086                 rx_pause.cqe_thr_low = 250;
5087                 rx_pause.cos = 1;
5088                 rx_pause.sge_thr_low = 0;
5089                 rx_pause.bd_thr_high = 350;
5090                 rx_pause.cqe_thr_high = 350;
5091                 rx_pause.sge_thr_high = 0;
5092
5093                 for_each_rx_queue(bp, i) {
5094                         struct bnx2x_fastpath *fp = &bp->fp[i];
5095
5096                         if (!fp->disable_tpa) {
5097                                 rx_pause.sge_thr_low = 150;
5098                                 rx_pause.sge_thr_high = 250;
5099                         }
5100
5101
5102                         offset = BAR_USTRORM_INTMEM +
5103                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5104                                                                    fp->cl_id);
5105                         for (j = 0;
5106                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5107                              j++)
5108                                 REG_WR(bp, offset + j*4,
5109                                        ((u32 *)&rx_pause)[j]);
5110                 }
5111         }
5112
5113         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5114
5115         /* Init rate shaping and fairness contexts */
5116         if (IS_E1HMF(bp)) {
5117                 int vn;
5118
5119                 /* During init there is no active link
5120                    Until link is up, set link rate to 10Gbps */
5121                 bp->link_vars.line_speed = SPEED_10000;
5122                 bnx2x_init_port_minmax(bp);
5123
5124                 bnx2x_calc_vn_weight_sum(bp);
5125
5126                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5127                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5128
5129                 /* Enable rate shaping and fairness */
5130                 bp->cmng.flags.cmng_enables =
5131                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5132                 if (bp->vn_weight_sum)
5133                         bp->cmng.flags.cmng_enables |=
5134                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5135                 else
5136                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5137                            "  fairness will be disabled\n");
5138         } else {
5139                 /* rate shaping and fairness are disabled */
5140                 DP(NETIF_MSG_IFUP,
5141                    "single function mode  minmax will be disabled\n");
5142         }
5143
5144
5145         /* Store it to internal memory */
5146         if (bp->port.pmf)
5147                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5148                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5149                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5150                                ((u32 *)(&bp->cmng))[i]);
5151 }
5152
5153 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5154 {
5155         switch (load_code) {
5156         case FW_MSG_CODE_DRV_LOAD_COMMON:
5157                 bnx2x_init_internal_common(bp);
5158                 /* no break */
5159
5160         case FW_MSG_CODE_DRV_LOAD_PORT:
5161                 bnx2x_init_internal_port(bp);
5162                 /* no break */
5163
5164         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5165                 bnx2x_init_internal_func(bp);
5166                 break;
5167
5168         default:
5169                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5170                 break;
5171         }
5172 }
5173
5174 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5175 {
5176         int i;
5177
5178         for_each_queue(bp, i) {
5179                 struct bnx2x_fastpath *fp = &bp->fp[i];
5180
5181                 fp->bp = bp;
5182                 fp->state = BNX2X_FP_STATE_CLOSED;
5183                 fp->index = i;
5184                 fp->cl_id = BP_L_ID(bp) + i;
5185                 fp->sb_id = fp->cl_id;
5186                 DP(NETIF_MSG_IFUP,
5187                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5188                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5189                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5190                               fp->sb_id);
5191                 bnx2x_update_fpsb_idx(fp);
5192         }
5193
5194         /* ensure status block indices were read */
5195         rmb();
5196
5197
5198         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5199                           DEF_SB_ID);
5200         bnx2x_update_dsb_idx(bp);
5201         bnx2x_update_coalesce(bp);
5202         bnx2x_init_rx_rings(bp);
5203         bnx2x_init_tx_ring(bp);
5204         bnx2x_init_sp_ring(bp);
5205         bnx2x_init_context(bp);
5206         bnx2x_init_internal(bp, load_code);
5207         bnx2x_init_ind_table(bp);
5208         bnx2x_stats_init(bp);
5209
5210         /* At this point, we are ready for interrupts */
5211         atomic_set(&bp->intr_sem, 0);
5212
5213         /* flush all before enabling interrupts */
5214         mb();
5215         mmiowb();
5216
5217         bnx2x_int_enable(bp);
5218
5219         /* Check for SPIO5 */
5220         bnx2x_attn_int_deasserted0(bp,
5221                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5222                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5223 }
5224
5225 /* end of nic init */
5226
5227 /*
5228  * gzip service functions
5229  */
5230
5231 static int bnx2x_gunzip_init(struct bnx2x *bp)
5232 {
5233         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5234                                               &bp->gunzip_mapping);
5235         if (bp->gunzip_buf  == NULL)
5236                 goto gunzip_nomem1;
5237
5238         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5239         if (bp->strm  == NULL)
5240                 goto gunzip_nomem2;
5241
5242         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5243                                       GFP_KERNEL);
5244         if (bp->strm->workspace == NULL)
5245                 goto gunzip_nomem3;
5246
5247         return 0;
5248
5249 gunzip_nomem3:
5250         kfree(bp->strm);
5251         bp->strm = NULL;
5252
5253 gunzip_nomem2:
5254         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5255                             bp->gunzip_mapping);
5256         bp->gunzip_buf = NULL;
5257
5258 gunzip_nomem1:
5259         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5260                " un-compression\n", bp->dev->name);
5261         return -ENOMEM;
5262 }
5263
5264 static void bnx2x_gunzip_end(struct bnx2x *bp)
5265 {
5266         kfree(bp->strm->workspace);
5267
5268         kfree(bp->strm);
5269         bp->strm = NULL;
5270
5271         if (bp->gunzip_buf) {
5272                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5273                                     bp->gunzip_mapping);
5274                 bp->gunzip_buf = NULL;
5275         }
5276 }
5277
5278 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5279 {
5280         int n, rc;
5281
5282         /* check gzip header */
5283         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5284                 BNX2X_ERR("Bad gzip header\n");
5285                 return -EINVAL;
5286         }
5287
5288         n = 10;
5289
5290 #define FNAME                           0x8
5291
5292         if (zbuf[3] & FNAME)
5293                 while ((zbuf[n++] != 0) && (n < len));
5294
5295         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5296         bp->strm->avail_in = len - n;
5297         bp->strm->next_out = bp->gunzip_buf;
5298         bp->strm->avail_out = FW_BUF_SIZE;
5299
5300         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5301         if (rc != Z_OK)
5302                 return rc;
5303
5304         rc = zlib_inflate(bp->strm, Z_FINISH);
5305         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5306                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5307                        bp->dev->name, bp->strm->msg);
5308
5309         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5310         if (bp->gunzip_outlen & 0x3)
5311                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5312                                     " gunzip_outlen (%d) not aligned\n",
5313                        bp->dev->name, bp->gunzip_outlen);
5314         bp->gunzip_outlen >>= 2;
5315
5316         zlib_inflateEnd(bp->strm);
5317
5318         if (rc == Z_STREAM_END)
5319                 return 0;
5320
5321         return rc;
5322 }
5323
5324 /* nic load/unload */
5325
5326 /*
5327  * General service functions
5328  */
5329
5330 /* send a NIG loopback debug packet */
5331 static void bnx2x_lb_pckt(struct bnx2x *bp)
5332 {
5333         u32 wb_write[3];
5334
5335         /* Ethernet source and destination addresses */
5336         wb_write[0] = 0x55555555;
5337         wb_write[1] = 0x55555555;
5338         wb_write[2] = 0x20;             /* SOP */
5339         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5340
5341         /* NON-IP protocol */
5342         wb_write[0] = 0x09000000;
5343         wb_write[1] = 0x55555555;
5344         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5345         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5346 }
5347
5348 /* some of the internal memories
5349  * are not directly readable from the driver
5350  * to test them we send debug packets
5351  */
5352 static int bnx2x_int_mem_test(struct bnx2x *bp)
5353 {
5354         int factor;
5355         int count, i;
5356         u32 val = 0;
5357
5358         if (CHIP_REV_IS_FPGA(bp))
5359                 factor = 120;
5360         else if (CHIP_REV_IS_EMUL(bp))
5361                 factor = 200;
5362         else
5363                 factor = 1;
5364
5365         DP(NETIF_MSG_HW, "start part1\n");
5366
5367         /* Disable inputs of parser neighbor blocks */
5368         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5369         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5370         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5371         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5372
5373         /*  Write 0 to parser credits for CFC search request */
5374         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5375
5376         /* send Ethernet packet */
5377         bnx2x_lb_pckt(bp);
5378
5379         /* TODO do i reset NIG statistic? */
5380         /* Wait until NIG register shows 1 packet of size 0x10 */
5381         count = 1000 * factor;
5382         while (count) {
5383
5384                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385                 val = *bnx2x_sp(bp, wb_data[0]);
5386                 if (val == 0x10)
5387                         break;
5388
5389                 msleep(10);
5390                 count--;
5391         }
5392         if (val != 0x10) {
5393                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5394                 return -1;
5395         }
5396
5397         /* Wait until PRS register shows 1 packet */
5398         count = 1000 * factor;
5399         while (count) {
5400                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5401                 if (val == 1)
5402                         break;
5403
5404                 msleep(10);
5405                 count--;
5406         }
5407         if (val != 0x1) {
5408                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5409                 return -2;
5410         }
5411
5412         /* Reset and init BRB, PRS */
5413         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5414         msleep(50);
5415         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5416         msleep(50);
5417         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5418         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5419
5420         DP(NETIF_MSG_HW, "part2\n");
5421
5422         /* Disable inputs of parser neighbor blocks */
5423         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5424         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5425         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5426         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5427
5428         /* Write 0 to parser credits for CFC search request */
5429         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5430
5431         /* send 10 Ethernet packets */
5432         for (i = 0; i < 10; i++)
5433                 bnx2x_lb_pckt(bp);
5434
5435         /* Wait until NIG register shows 10 + 1
5436            packets of size 11*0x10 = 0xb0 */
5437         count = 1000 * factor;
5438         while (count) {
5439
5440                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5441                 val = *bnx2x_sp(bp, wb_data[0]);
5442                 if (val == 0xb0)
5443                         break;
5444
5445                 msleep(10);
5446                 count--;
5447         }
5448         if (val != 0xb0) {
5449                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5450                 return -3;
5451         }
5452
5453         /* Wait until PRS register shows 2 packets */
5454         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5455         if (val != 2)
5456                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5457
5458         /* Write 1 to parser credits for CFC search request */
5459         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5460
5461         /* Wait until PRS register shows 3 packets */
5462         msleep(10 * factor);
5463         /* Wait until NIG register shows 1 packet of size 0x10 */
5464         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5465         if (val != 3)
5466                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5467
5468         /* clear NIG EOP FIFO */
5469         for (i = 0; i < 11; i++)
5470                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5471         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5472         if (val != 1) {
5473                 BNX2X_ERR("clear of NIG failed\n");
5474                 return -4;
5475         }
5476
5477         /* Reset and init BRB, PRS, NIG */
5478         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5479         msleep(50);
5480         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5481         msleep(50);
5482         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5483         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5484 #ifndef BCM_ISCSI
5485         /* set NIC mode */
5486         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5487 #endif
5488
5489         /* Enable inputs of parser neighbor blocks */
5490         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5491         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5492         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5493         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5494
5495         DP(NETIF_MSG_HW, "done\n");
5496
5497         return 0; /* OK */
5498 }
5499
5500 static void enable_blocks_attention(struct bnx2x *bp)
5501 {
5502         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5503         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5504         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5505         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5506         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5507         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5508         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5509         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5510         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5511 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5512 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5513         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5514         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5515         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5516 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5517 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5518         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5519         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5520         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5521         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5522 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5523 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5524         if (CHIP_REV_IS_FPGA(bp))
5525                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5526         else
5527                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5528         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5529         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5530         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5531 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5532 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5533         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5534         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5535 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5536         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5537 }
5538
5539
5540 static void bnx2x_reset_common(struct bnx2x *bp)
5541 {
5542         /* reset_common */
5543         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5544                0xd3ffff7f);
5545         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5546 }
5547
5548
5549 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5550 {
5551         u32 val;
5552         u8 port;
5553         u8 is_required = 0;
5554
5555         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5556               SHARED_HW_CFG_FAN_FAILURE_MASK;
5557
5558         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5559                 is_required = 1;
5560
5561         /*
5562          * The fan failure mechanism is usually related to the PHY type since
5563          * the power consumption of the board is affected by the PHY. Currently,
5564          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5565          */
5566         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5567                 for (port = PORT_0; port < PORT_MAX; port++) {
5568                         u32 phy_type =
5569                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5570                                          external_phy_config) &
5571                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5572                         is_required |=
5573                                 ((phy_type ==
5574                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5575                                  (phy_type ==
5576                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5577                                  (phy_type ==
5578                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5579                 }
5580
5581         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5582
5583         if (is_required == 0)
5584                 return;
5585
5586         /* Fan failure is indicated by SPIO 5 */
5587         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5588                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5589
5590         /* set to active low mode */
5591         val = REG_RD(bp, MISC_REG_SPIO_INT);
5592         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5593                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5594         REG_WR(bp, MISC_REG_SPIO_INT, val);
5595
5596         /* enable interrupt to signal the IGU */
5597         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5598         val |= (1 << MISC_REGISTERS_SPIO_5);
5599         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5600 }
5601
5602 static int bnx2x_init_common(struct bnx2x *bp)
5603 {
5604         u32 val, i;
5605
5606         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5607
5608         bnx2x_reset_common(bp);
5609         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5610         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5611
5612         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5613         if (CHIP_IS_E1H(bp))
5614                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5615
5616         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5617         msleep(30);
5618         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5619
5620         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5621         if (CHIP_IS_E1(bp)) {
5622                 /* enable HW interrupt from PXP on USDM overflow
5623                    bit 16 on INT_MASK_0 */
5624                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5625         }
5626
5627         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5628         bnx2x_init_pxp(bp);
5629
5630 #ifdef __BIG_ENDIAN
5631         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5632         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5633         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5634         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5635         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5636         /* make sure this value is 0 */
5637         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5638
5639 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5640         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5641         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5642         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5643         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5644 #endif
5645
5646         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5647 #ifdef BCM_ISCSI
5648         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5649         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5650         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5651 #endif
5652
5653         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5654                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5655
5656         /* let the HW do it's magic ... */
5657         msleep(100);
5658         /* finish PXP init */
5659         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5660         if (val != 1) {
5661                 BNX2X_ERR("PXP2 CFG failed\n");
5662                 return -EBUSY;
5663         }
5664         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5665         if (val != 1) {
5666                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5667                 return -EBUSY;
5668         }
5669
5670         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5671         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5672
5673         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5674
5675         /* clean the DMAE memory */
5676         bp->dmae_ready = 1;
5677         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5678
5679         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5680         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5681         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5682         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5683
5684         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5685         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5686         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5687         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5688
5689         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5690         /* soft reset pulse */
5691         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5692         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5693
5694 #ifdef BCM_ISCSI
5695         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5696 #endif
5697
5698         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5699         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5700         if (!CHIP_REV_IS_SLOW(bp)) {
5701                 /* enable hw interrupt from doorbell Q */
5702                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5703         }
5704
5705         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5706         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5707         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5708         /* set NIC mode */
5709         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5710         if (CHIP_IS_E1H(bp))
5711                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5712
5713         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5714         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5715         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5716         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5717
5718         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5719         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5720         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5721         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5722
5723         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5724         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5725         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5726         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5727
5728         /* sync semi rtc */
5729         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5730                0x80000000);
5731         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5732                0x80000000);
5733
5734         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5735         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5736         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5737
5738         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5739         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5740                 REG_WR(bp, i, 0xc0cac01a);
5741                 /* TODO: replace with something meaningful */
5742         }
5743         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5744         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5745
5746         if (sizeof(union cdu_context) != 1024)
5747                 /* we currently assume that a context is 1024 bytes */
5748                 printk(KERN_ALERT PFX "please adjust the size of"
5749                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5750
5751         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5752         val = (4 << 24) + (0 << 12) + 1024;
5753         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5754         if (CHIP_IS_E1(bp)) {
5755                 /* !!! fix pxp client crdit until excel update */
5756                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5757                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5758         }
5759
5760         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5761         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5762         /* enable context validation interrupt from CFC */
5763         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5764
5765         /* set the thresholds to prevent CFC/CDU race */
5766         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5767
5768         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5769         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5770
5771         /* PXPCS COMMON comes here */
5772         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5773       &nbs