bnx2x: Using the new FW
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int poll;
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
107
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
112 static int debug;
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
117
118 static struct workqueue_struct *bnx2x_wq;
119
120 enum bnx2x_board_type {
121         BCM57710 = 0,
122         BCM57711 = 1,
123         BCM57711E = 2,
124 };
125
126 /* indexed by board_type, above */
127 static struct {
128         char *name;
129 } board_info[] __devinitdata = {
130         { "Broadcom NetXtreme II BCM57710 XGb" },
131         { "Broadcom NetXtreme II BCM57711 XGb" },
132         { "Broadcom NetXtreme II BCM57711E XGb" }
133 };
134
135
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 static const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184                             int idx)
185 {
186         u32 cmd_offset;
187         int i;
188
189         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
193                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195         }
196         REG_WR(bp, dmae_reg_go_c[idx], 1);
197 }
198
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200                       u32 len32)
201 {
202         struct dmae_command *dmae = &bp->init_dmae;
203         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
204         int cnt = 200;
205
206         if (!bp->dmae_ready) {
207                 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
210                    "  using indirect\n", dst_addr, len32);
211                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212                 return;
213         }
214
215         mutex_lock(&bp->dmae_mutex);
216
217         memset(dmae, 0, sizeof(struct dmae_command));
218
219         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 #ifdef __BIG_ENDIAN
223                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 #else
225                         DMAE_CMD_ENDIANITY_DW_SWAP |
226 #endif
227                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229         dmae->src_addr_lo = U64_LO(dma_addr);
230         dmae->src_addr_hi = U64_HI(dma_addr);
231         dmae->dst_addr_lo = dst_addr >> 2;
232         dmae->dst_addr_hi = 0;
233         dmae->len = len32;
234         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236         dmae->comp_val = DMAE_COMP_VAL;
237
238         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
240                     "dst_addr [%x:%08x (%08x)]\n"
241            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
242            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248
249         *wb_comp = 0;
250
251         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
252
253         udelay(5);
254
255         while (*wb_comp != DMAE_COMP_VAL) {
256                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
258                 if (!cnt) {
259                         BNX2X_ERR("DMAE timeout!\n");
260                         break;
261                 }
262                 cnt--;
263                 /* adjust delay for emulation/FPGA */
264                 if (CHIP_REV_IS_SLOW(bp))
265                         msleep(100);
266                 else
267                         udelay(5);
268         }
269
270         mutex_unlock(&bp->dmae_mutex);
271 }
272
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 {
275         struct dmae_command *dmae = &bp->init_dmae;
276         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
277         int cnt = 200;
278
279         if (!bp->dmae_ready) {
280                 u32 *data = bnx2x_sp(bp, wb_data[0]);
281                 int i;
282
283                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
284                    "  using indirect\n", src_addr, len32);
285                 for (i = 0; i < len32; i++)
286                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287                 return;
288         }
289
290         mutex_lock(&bp->dmae_mutex);
291
292         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293         memset(dmae, 0, sizeof(struct dmae_command));
294
295         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298 #ifdef __BIG_ENDIAN
299                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
300 #else
301                         DMAE_CMD_ENDIANITY_DW_SWAP |
302 #endif
303                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305         dmae->src_addr_lo = src_addr >> 2;
306         dmae->src_addr_hi = 0;
307         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309         dmae->len = len32;
310         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312         dmae->comp_val = DMAE_COMP_VAL;
313
314         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
316                     "dst_addr [%x:%08x (%08x)]\n"
317            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
318            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
321
322         *wb_comp = 0;
323
324         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
325
326         udelay(5);
327
328         while (*wb_comp != DMAE_COMP_VAL) {
329
330                 if (!cnt) {
331                         BNX2X_ERR("DMAE timeout!\n");
332                         break;
333                 }
334                 cnt--;
335                 /* adjust delay for emulation/FPGA */
336                 if (CHIP_REV_IS_SLOW(bp))
337                         msleep(100);
338                 else
339                         udelay(5);
340         }
341         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344
345         mutex_unlock(&bp->dmae_mutex);
346 }
347
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350 {
351         u32 wb_write[2];
352
353         wb_write[0] = val_hi;
354         wb_write[1] = val_lo;
355         REG_WR_DMAE(bp, reg, wb_write, 2);
356 }
357
358 #ifdef USE_WB_RD
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360 {
361         u32 wb_data[2];
362
363         REG_RD_DMAE(bp, reg, wb_data, 2);
364
365         return HILO_U64(wb_data[0], wb_data[1]);
366 }
367 #endif
368
369 static int bnx2x_mc_assert(struct bnx2x *bp)
370 {
371         char last_idx;
372         int i, rc = 0;
373         u32 row0, row1, row2, row3;
374
375         /* XSTORM */
376         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
378         if (last_idx)
379                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381         /* print the asserts */
382         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385                               XSTORM_ASSERT_LIST_OFFSET(i));
386                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395                                   " 0x%08x 0x%08x 0x%08x\n",
396                                   i, row3, row2, row1, row0);
397                         rc++;
398                 } else {
399                         break;
400                 }
401         }
402
403         /* TSTORM */
404         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
406         if (last_idx)
407                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409         /* print the asserts */
410         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413                               TSTORM_ASSERT_LIST_OFFSET(i));
414                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423                                   " 0x%08x 0x%08x 0x%08x\n",
424                                   i, row3, row2, row1, row0);
425                         rc++;
426                 } else {
427                         break;
428                 }
429         }
430
431         /* CSTORM */
432         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
434         if (last_idx)
435                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437         /* print the asserts */
438         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441                               CSTORM_ASSERT_LIST_OFFSET(i));
442                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451                                   " 0x%08x 0x%08x 0x%08x\n",
452                                   i, row3, row2, row1, row0);
453                         rc++;
454                 } else {
455                         break;
456                 }
457         }
458
459         /* USTORM */
460         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461                            USTORM_ASSERT_LIST_INDEX_OFFSET);
462         if (last_idx)
463                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465         /* print the asserts */
466         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469                               USTORM_ASSERT_LIST_OFFSET(i));
470                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
472                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
474                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479                                   " 0x%08x 0x%08x 0x%08x\n",
480                                   i, row3, row2, row1, row0);
481                         rc++;
482                 } else {
483                         break;
484                 }
485         }
486
487         return rc;
488 }
489
490 static void bnx2x_fw_dump(struct bnx2x *bp)
491 {
492         u32 mark, offset;
493         __be32 data[9];
494         int word;
495
496         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497         mark = ((mark + 0x3) & ~0x3);
498         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
499
500         printk(KERN_ERR PFX);
501         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502                 for (word = 0; word < 8; word++)
503                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504                                                   offset + 4*word));
505                 data[8] = 0x0;
506                 printk(KERN_CONT "%s", (char *)data);
507         }
508         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509                 for (word = 0; word < 8; word++)
510                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511                                                   offset + 4*word));
512                 data[8] = 0x0;
513                 printk(KERN_CONT "%s", (char *)data);
514         }
515         printk(KERN_ERR PFX "end of fw dump\n");
516 }
517
518 static void bnx2x_panic_dump(struct bnx2x *bp)
519 {
520         int i;
521         u16 j, start, end;
522
523         bp->stats_state = STATS_STATE_DISABLED;
524         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
526         BNX2X_ERR("begin crash dump -----------------\n");
527
528         /* Indices */
529         /* Common */
530         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
531                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
532                   "  spq_prod_idx(%u)\n",
533                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536         /* Rx */
537         for_each_rx_queue(bp, i) {
538                 struct bnx2x_fastpath *fp = &bp->fp[i];
539
540                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
541                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
542                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
543                           i, fp->rx_bd_prod, fp->rx_bd_cons,
544                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
547                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
548                           fp->rx_sge_prod, fp->last_max_sge,
549                           le16_to_cpu(fp->fp_u_idx),
550                           fp->status_blk->u_status_block.status_block_index);
551         }
552
553         /* Tx */
554         for_each_tx_queue(bp, i) {
555                 struct bnx2x_fastpath *fp = &bp->fp[i];
556
557                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
558                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
559                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
562                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563                           fp->status_blk->c_status_block.status_block_index,
564                           fp->tx_db.data.prod);
565         }
566
567         /* Rings */
568         /* Rx */
569         for_each_rx_queue(bp, i) {
570                 struct bnx2x_fastpath *fp = &bp->fp[i];
571
572                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574                 for (j = start; j != end; j = RX_BD(j + 1)) {
575                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
578                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
579                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
580                 }
581
582                 start = RX_SGE(fp->rx_sge_prod);
583                 end = RX_SGE(fp->last_max_sge);
584                 for (j = start; j != end; j = RX_SGE(j + 1)) {
585                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
588                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
589                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
590                 }
591
592                 start = RCQ_BD(fp->rx_comp_cons - 10);
593                 end = RCQ_BD(fp->rx_comp_cons + 503);
594                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
597                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
599                 }
600         }
601
602         /* Tx */
603         for_each_tx_queue(bp, i) {
604                 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608                 for (j = start; j != end; j = TX_BD(j + 1)) {
609                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
611                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612                                   i, j, sw_bd->skb, sw_bd->first_bd);
613                 }
614
615                 start = TX_BD(fp->tx_bd_cons - 10);
616                 end = TX_BD(fp->tx_bd_cons + 254);
617                 for (j = start; j != end; j = TX_BD(j + 1)) {
618                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
620                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
622                 }
623         }
624
625         bnx2x_fw_dump(bp);
626         bnx2x_mc_assert(bp);
627         BNX2X_ERR("end crash dump -----------------\n");
628 }
629
630 static void bnx2x_int_enable(struct bnx2x *bp)
631 {
632         int port = BP_PORT(bp);
633         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634         u32 val = REG_RD(bp, addr);
635         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
637
638         if (msix) {
639                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                          HC_CONFIG_0_REG_INT_LINE_EN_0);
641                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643         } else if (msi) {
644                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
648         } else {
649                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
652                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
653
654                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655                    val, port, addr);
656
657                 REG_WR(bp, addr, val);
658
659                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660         }
661
662         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
663            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
664
665         REG_WR(bp, addr, val);
666         /*
667          * Ensure that HC_CONFIG is written before leading/trailing edge config
668          */
669         mmiowb();
670         barrier();
671
672         if (CHIP_IS_E1H(bp)) {
673                 /* init leading/trailing edge */
674                 if (IS_E1HMF(bp)) {
675                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
676                         if (bp->port.pmf)
677                                 /* enable nig and gpio3 attention */
678                                 val |= 0x1100;
679                 } else
680                         val = 0xffff;
681
682                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684         }
685
686         /* Make sure that interrupts are indeed enabled from here on */
687         mmiowb();
688 }
689
690 static void bnx2x_int_disable(struct bnx2x *bp)
691 {
692         int port = BP_PORT(bp);
693         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694         u32 val = REG_RD(bp, addr);
695
696         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
699                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702            val, port, addr);
703
704         /* flush all outstanding writes */
705         mmiowb();
706
707         REG_WR(bp, addr, val);
708         if (REG_RD(bp, addr) != val)
709                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
710
711 }
712
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
714 {
715         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
716         int i, offset;
717
718         /* disable interrupt handling */
719         atomic_inc(&bp->intr_sem);
720         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
722         if (disable_hw)
723                 /* prevent the HW from sending interrupts */
724                 bnx2x_int_disable(bp);
725
726         /* make sure all ISRs are done */
727         if (msix) {
728                 synchronize_irq(bp->msix_table[0].vector);
729                 offset = 1;
730                 for_each_queue(bp, i)
731                         synchronize_irq(bp->msix_table[i + offset].vector);
732         } else
733                 synchronize_irq(bp->pdev->irq);
734
735         /* make sure sp_task is not running */
736         cancel_delayed_work(&bp->sp_task);
737         flush_workqueue(bnx2x_wq);
738 }
739
740 /* fast path */
741
742 /*
743  * General service functions
744  */
745
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747                                 u8 storm, u16 index, u8 op, u8 update)
748 {
749         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750                        COMMAND_REG_INT_ACK);
751         struct igu_ack_register igu_ack;
752
753         igu_ack.status_block_index = index;
754         igu_ack.sb_id_and_flags =
755                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
760         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761            (*(u32 *)&igu_ack), hc_addr);
762         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
763
764         /* Make sure that ACK is written */
765         mmiowb();
766         barrier();
767 }
768
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770 {
771         struct host_status_block *fpsb = fp->status_blk;
772         u16 rc = 0;
773
774         barrier(); /* status block is written to by the chip */
775         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777                 rc |= 1;
778         }
779         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781                 rc |= 2;
782         }
783         return rc;
784 }
785
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
787 {
788         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789                        COMMAND_REG_SIMD_MASK);
790         u32 result = REG_RD(bp, hc_addr);
791
792         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793            result, hc_addr);
794
795         return result;
796 }
797
798
799 /*
800  * fast path service functions
801  */
802
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 {
805         /* Tell compiler that consumer and producer can change */
806         barrier();
807         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
808 }
809
810 /* free skb in the packet ring at pos idx
811  * return idx of last bd freed
812  */
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814                              u16 idx)
815 {
816         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817         struct eth_tx_start_bd *tx_start_bd;
818         struct eth_tx_bd *tx_data_bd;
819         struct sk_buff *skb = tx_buf->skb;
820         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821         int nbd;
822
823         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
824            idx, tx_buf, skb);
825
826         /* unmap first bd */
827         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
831
832         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835                 BNX2X_ERR("BAD nbd!\n");
836                 bnx2x_panic();
837         }
838 #endif
839         new_cons = nbd + tx_buf->first_bd;
840
841         /* Get the next bd */
842         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         /* Skip a parse bd... */
845         --nbd;
846         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848         /* ...and the TSO split header bd since they have no mapping */
849         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850                 --nbd;
851                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852         }
853
854         /* now free frags */
855         while (nbd > 0) {
856
857                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
861                 if (--nbd)
862                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863         }
864
865         /* release skb */
866         WARN_ON(!skb);
867         dev_kfree_skb_any(skb);
868         tx_buf->first_bd = 0;
869         tx_buf->skb = NULL;
870
871         return new_cons;
872 }
873
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
875 {
876         s16 used;
877         u16 prod;
878         u16 cons;
879
880         barrier(); /* Tell compiler that prod and cons can change */
881         prod = fp->tx_bd_prod;
882         cons = fp->tx_bd_cons;
883
884         /* NUM_TX_RINGS = number of "next-page" entries
885            It will be used as a threshold */
886         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
887
888 #ifdef BNX2X_STOP_ON_ERROR
889         WARN_ON(used < 0);
890         WARN_ON(used > fp->bp->tx_ring_size);
891         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
892 #endif
893
894         return (s16)(fp->bp->tx_ring_size) - used;
895 }
896
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
898 {
899         struct bnx2x *bp = fp->bp;
900         struct netdev_queue *txq;
901         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902         int done = 0;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         if (unlikely(bp->panic))
906                 return;
907 #endif
908
909         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911         sw_cons = fp->tx_pkt_cons;
912
913         while (sw_cons != hw_cons) {
914                 u16 pkt_cons;
915
916                 pkt_cons = TX_BD(sw_cons);
917
918                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
920                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
921                    hw_cons, sw_cons, pkt_cons);
922
923 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
924                         rmb();
925                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926                 }
927 */
928                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929                 sw_cons++;
930                 done++;
931         }
932
933         fp->tx_pkt_cons = sw_cons;
934         fp->tx_bd_cons = bd_cons;
935
936         /* TBD need a thresh? */
937         if (unlikely(netif_tx_queue_stopped(txq))) {
938
939                 /* Need to make the tx_bd_cons update visible to start_xmit()
940                  * before checking for netif_tx_queue_stopped().  Without the
941                  * memory barrier, there is a small possibility that
942                  * start_xmit() will miss it and cause the queue to be stopped
943                  * forever.
944                  */
945                 smp_mb();
946
947                 if ((netif_tx_queue_stopped(txq)) &&
948                     (bp->state == BNX2X_STATE_OPEN) &&
949                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950                         netif_tx_wake_queue(txq);
951         }
952 }
953
954
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956                            union eth_rx_cqe *rr_cqe)
957 {
958         struct bnx2x *bp = fp->bp;
959         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
962         DP(BNX2X_MSG_SP,
963            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
964            fp->index, cid, command, bp->state,
965            rr_cqe->ramrod_cqe.ramrod_type);
966
967         bp->spq_left++;
968
969         if (fp->index) {
970                 switch (command | fp->state) {
971                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972                                                 BNX2X_FP_STATE_OPENING):
973                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974                            cid);
975                         fp->state = BNX2X_FP_STATE_OPEN;
976                         break;
977
978                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980                            cid);
981                         fp->state = BNX2X_FP_STATE_HALTED;
982                         break;
983
984                 default:
985                         BNX2X_ERR("unexpected MC reply (%d)  "
986                                   "fp->state is %x\n", command, fp->state);
987                         break;
988                 }
989                 mb(); /* force bnx2x_wait_ramrod() to see the change */
990                 return;
991         }
992
993         switch (command | bp->state) {
994         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996                 bp->state = BNX2X_STATE_OPEN;
997                 break;
998
999         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002                 fp->state = BNX2X_FP_STATE_HALTED;
1003                 break;
1004
1005         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1008                 break;
1009
1010
1011         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014                 bp->set_mac_pending = 0;
1015                 break;
1016
1017         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1020                 break;
1021
1022         default:
1023                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1024                           command, bp->state);
1025                 break;
1026         }
1027         mb(); /* force bnx2x_wait_ramrod() to see the change */
1028 }
1029
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034         struct page *page = sw_buf->page;
1035         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037         /* Skip "next page" elements */
1038         if (!page)
1039                 return;
1040
1041         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043         __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045         sw_buf->page = NULL;
1046         sge->addr_hi = 0;
1047         sge->addr_lo = 0;
1048 }
1049
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051                                            struct bnx2x_fastpath *fp, int last)
1052 {
1053         int i;
1054
1055         for (i = 0; i < last; i++)
1056                 bnx2x_free_rx_sge(bp, fp, i);
1057 }
1058
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060                                      struct bnx2x_fastpath *fp, u16 index)
1061 {
1062         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065         dma_addr_t mapping;
1066
1067         if (unlikely(page == NULL))
1068                 return -ENOMEM;
1069
1070         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071                                PCI_DMA_FROMDEVICE);
1072         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074                 return -ENOMEM;
1075         }
1076
1077         sw_buf->page = page;
1078         pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083         return 0;
1084 }
1085
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087                                      struct bnx2x_fastpath *fp, u16 index)
1088 {
1089         struct sk_buff *skb;
1090         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092         dma_addr_t mapping;
1093
1094         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095         if (unlikely(skb == NULL))
1096                 return -ENOMEM;
1097
1098         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099                                  PCI_DMA_FROMDEVICE);
1100         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1101                 dev_kfree_skb(skb);
1102                 return -ENOMEM;
1103         }
1104
1105         rx_buf->skb = skb;
1106         pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111         return 0;
1112 }
1113
1114 /* note that we are not allocating a new skb,
1115  * we are just moving one from cons to prod
1116  * we are not creating a new mapping,
1117  * so there is no need to check for dma_mapping_error().
1118  */
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120                                struct sk_buff *skb, u16 cons, u16 prod)
1121 {
1122         struct bnx2x *bp = fp->bp;
1123         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128         pci_dma_sync_single_for_device(bp->pdev,
1129                                        pci_unmap_addr(cons_rx_buf, mapping),
1130                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1131
1132         prod_rx_buf->skb = cons_rx_buf->skb;
1133         pci_unmap_addr_set(prod_rx_buf, mapping,
1134                            pci_unmap_addr(cons_rx_buf, mapping));
1135         *prod_bd = *cons_bd;
1136 }
1137
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139                                              u16 idx)
1140 {
1141         u16 last_max = fp->last_max_sge;
1142
1143         if (SUB_S16(idx, last_max) > 0)
1144                 fp->last_max_sge = idx;
1145 }
1146
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148 {
1149         int i, j;
1150
1151         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152                 int idx = RX_SGE_CNT * i - 1;
1153
1154                 for (j = 0; j < 2; j++) {
1155                         SGE_MASK_CLEAR_BIT(fp, idx);
1156                         idx--;
1157                 }
1158         }
1159 }
1160
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162                                   struct eth_fast_path_rx_cqe *fp_cqe)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1167                       SGE_PAGE_SHIFT;
1168         u16 last_max, last_elem, first_elem;
1169         u16 delta = 0;
1170         u16 i;
1171
1172         if (!sge_len)
1173                 return;
1174
1175         /* First mark all used pages */
1176         for (i = 0; i < sge_len; i++)
1177                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182         /* Here we assume that the last SGE index is the biggest */
1183         prefetch((void *)(fp->sge_mask));
1184         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186         last_max = RX_SGE(fp->last_max_sge);
1187         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190         /* If ring is not full */
1191         if (last_elem + 1 != first_elem)
1192                 last_elem++;
1193
1194         /* Now update the prod */
1195         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196                 if (likely(fp->sge_mask[i]))
1197                         break;
1198
1199                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200                 delta += RX_SGE_MASK_ELEM_SZ;
1201         }
1202
1203         if (delta > 0) {
1204                 fp->rx_sge_prod += delta;
1205                 /* clear page-end entries */
1206                 bnx2x_clear_sge_mask_next_elems(fp);
1207         }
1208
1209         DP(NETIF_MSG_RX_STATUS,
1210            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1211            fp->last_max_sge, fp->rx_sge_prod);
1212 }
1213
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215 {
1216         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217         memset(fp->sge_mask, 0xff,
1218                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
1220         /* Clear the two last indices in the page to 1:
1221            these are the indices that correspond to the "next" element,
1222            hence will never be indicated and should be removed from
1223            the calculations. */
1224         bnx2x_clear_sge_mask_next_elems(fp);
1225 }
1226
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228                             struct sk_buff *skb, u16 cons, u16 prod)
1229 {
1230         struct bnx2x *bp = fp->bp;
1231         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234         dma_addr_t mapping;
1235
1236         /* move empty skb from pool to prod and map it */
1237         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242         /* move partial skb from cons to pool (don't unmap yet) */
1243         fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245         /* mark bin state as start - print error if current state != stop */
1246         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249         fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251         /* point prod_bd to new skb */
1252         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255 #ifdef BNX2X_STOP_ON_ERROR
1256         fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259 #else
1260         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261 #endif
1262            fp->tpa_queue_used);
1263 #endif
1264 }
1265
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                                struct sk_buff *skb,
1268                                struct eth_fast_path_rx_cqe *fp_cqe,
1269                                u16 cqe_idx)
1270 {
1271         struct sw_rx_page *rx_pg, old_rx_pg;
1272         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273         u32 i, frag_len, frag_size, pages;
1274         int err;
1275         int j;
1276
1277         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1279
1280         /* This is needed in order to enable forwarding support */
1281         if (frag_size)
1282                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283                                                max(frag_size, (u32)len_on_bd));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         if (pages >
1287             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289                           pages, cqe_idx);
1290                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1291                           fp_cqe->pkt_len, len_on_bd);
1292                 bnx2x_panic();
1293                 return -EINVAL;
1294         }
1295 #endif
1296
1297         /* Run through the SGL and compose the fragmented skb */
1298         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301                 /* FW gives the indices of the SGE as if the ring is an array
1302                    (meaning that "next" element will consume 2 indices) */
1303                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304                 rx_pg = &fp->rx_page_ring[sge_idx];
1305                 old_rx_pg = *rx_pg;
1306
1307                 /* If we fail to allocate a substitute page, we simply stop
1308                    where we are and drop the whole packet */
1309                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310                 if (unlikely(err)) {
1311                         fp->eth_q_stats.rx_skb_alloc_failed++;
1312                         return err;
1313                 }
1314
1315                 /* Unmap the page as we r going to pass it to the stack */
1316                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1318
1319                 /* Add one frag and update the appropriate fields in the skb */
1320                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322                 skb->data_len += frag_len;
1323                 skb->truesize += frag_len;
1324                 skb->len += frag_len;
1325
1326                 frag_size -= frag_len;
1327         }
1328
1329         return 0;
1330 }
1331
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334                            u16 cqe_idx)
1335 {
1336         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337         struct sk_buff *skb = rx_buf->skb;
1338         /* alloc new skb */
1339         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341         /* Unmap skb in the pool anyway, as we are going to change
1342            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343            fails. */
1344         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1346
1347         if (likely(new_skb)) {
1348                 /* fix ip xsum and give it to the stack */
1349                 /* (no need to map the new skb) */
1350 #ifdef BCM_VLAN
1351                 int is_vlan_cqe =
1352                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353                          PARSING_FLAGS_VLAN);
1354                 int is_not_hwaccel_vlan_cqe =
1355                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356 #endif
1357
1358                 prefetch(skb);
1359                 prefetch(((char *)(skb)) + 128);
1360
1361 #ifdef BNX2X_STOP_ON_ERROR
1362                 if (pad + len > bp->rx_buf_size) {
1363                         BNX2X_ERR("skb_put is about to fail...  "
1364                                   "pad %d  len %d  rx_buf_size %d\n",
1365                                   pad, len, bp->rx_buf_size);
1366                         bnx2x_panic();
1367                         return;
1368                 }
1369 #endif
1370
1371                 skb_reserve(skb, pad);
1372                 skb_put(skb, len);
1373
1374                 skb->protocol = eth_type_trans(skb, bp->dev);
1375                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377                 {
1378                         struct iphdr *iph;
1379
1380                         iph = (struct iphdr *)skb->data;
1381 #ifdef BCM_VLAN
1382                         /* If there is no Rx VLAN offloading -
1383                            take VLAN tag into an account */
1384                         if (unlikely(is_not_hwaccel_vlan_cqe))
1385                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386 #endif
1387                         iph->check = 0;
1388                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389                 }
1390
1391                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392                                          &cqe->fast_path_cqe, cqe_idx)) {
1393 #ifdef BCM_VLAN
1394                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395                             (!is_not_hwaccel_vlan_cqe))
1396                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397                                                 le16_to_cpu(cqe->fast_path_cqe.
1398                                                             vlan_tag));
1399                         else
1400 #endif
1401                                 netif_receive_skb(skb);
1402                 } else {
1403                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404                            " - dropping packet!\n");
1405                         dev_kfree_skb(skb);
1406                 }
1407
1408
1409                 /* put new skb in bin */
1410                 fp->tpa_pool[queue].skb = new_skb;
1411
1412         } else {
1413                 /* else drop the packet and keep the buffer in the bin */
1414                 DP(NETIF_MSG_RX_STATUS,
1415                    "Failed to allocate new skb - dropping packet!\n");
1416                 fp->eth_q_stats.rx_skb_alloc_failed++;
1417         }
1418
1419         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420 }
1421
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423                                         struct bnx2x_fastpath *fp,
1424                                         u16 bd_prod, u16 rx_comp_prod,
1425                                         u16 rx_sge_prod)
1426 {
1427         struct ustorm_eth_rx_producers rx_prods = {0};
1428         int i;
1429
1430         /* Update producers */
1431         rx_prods.bd_prod = bd_prod;
1432         rx_prods.cqe_prod = rx_comp_prod;
1433         rx_prods.sge_prod = rx_sge_prod;
1434
1435         /*
1436          * Make sure that the BD and SGE data is updated before updating the
1437          * producers since FW might read the BD/SGE right after the producer
1438          * is updated.
1439          * This is only applicable for weak-ordered memory model archs such
1440          * as IA-64. The following barrier is also mandatory since FW will
1441          * assumes BDs must have buffers.
1442          */
1443         wmb();
1444
1445         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446                 REG_WR(bp, BAR_USTRORM_INTMEM +
1447                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448                        ((u32 *)&rx_prods)[i]);
1449
1450         mmiowb(); /* keep prod updates ordered */
1451
1452         DP(NETIF_MSG_RX_STATUS,
1453            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1454            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1455 }
1456
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458 {
1459         struct bnx2x *bp = fp->bp;
1460         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462         int rx_pkt = 0;
1463
1464 #ifdef BNX2X_STOP_ON_ERROR
1465         if (unlikely(bp->panic))
1466                 return 0;
1467 #endif
1468
1469         /* CQ "next element" is of the size of the regular element,
1470            that's why it's ok here */
1471         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473                 hw_comp_cons++;
1474
1475         bd_cons = fp->rx_bd_cons;
1476         bd_prod = fp->rx_bd_prod;
1477         bd_prod_fw = bd_prod;
1478         sw_comp_cons = fp->rx_comp_cons;
1479         sw_comp_prod = fp->rx_comp_prod;
1480
1481         /* Memory barrier necessary as speculative reads of the rx
1482          * buffer can be ahead of the index in the status block
1483          */
1484         rmb();
1485
1486         DP(NETIF_MSG_RX_STATUS,
1487            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1488            fp->index, hw_comp_cons, sw_comp_cons);
1489
1490         while (sw_comp_cons != hw_comp_cons) {
1491                 struct sw_rx_bd *rx_buf = NULL;
1492                 struct sk_buff *skb;
1493                 union eth_rx_cqe *cqe;
1494                 u8 cqe_fp_flags;
1495                 u16 len, pad;
1496
1497                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498                 bd_prod = RX_BD(bd_prod);
1499                 bd_cons = RX_BD(bd_cons);
1500
1501                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1503
1504                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1505                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1506                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1510
1511                 /* is this a slowpath msg? */
1512                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513                         bnx2x_sp_event(fp, cqe);
1514                         goto next_cqe;
1515
1516                 /* this is an rx packet */
1517                 } else {
1518                         rx_buf = &fp->rx_buf_ring[bd_cons];
1519                         skb = rx_buf->skb;
1520                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521                         pad = cqe->fast_path_cqe.placement_offset;
1522
1523                         /* If CQE is marked both TPA_START and TPA_END
1524                            it is a non-TPA CQE */
1525                         if ((!fp->disable_tpa) &&
1526                             (TPA_TYPE(cqe_fp_flags) !=
1527                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1528                                 u16 queue = cqe->fast_path_cqe.queue_index;
1529
1530                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531                                         DP(NETIF_MSG_RX_STATUS,
1532                                            "calling tpa_start on queue %d\n",
1533                                            queue);
1534
1535                                         bnx2x_tpa_start(fp, queue, skb,
1536                                                         bd_cons, bd_prod);
1537                                         goto next_rx;
1538                                 }
1539
1540                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541                                         DP(NETIF_MSG_RX_STATUS,
1542                                            "calling tpa_stop on queue %d\n",
1543                                            queue);
1544
1545                                         if (!BNX2X_RX_SUM_FIX(cqe))
1546                                                 BNX2X_ERR("STOP on none TCP "
1547                                                           "data\n");
1548
1549                                         /* This is a size of the linear data
1550                                            on this skb */
1551                                         len = le16_to_cpu(cqe->fast_path_cqe.
1552                                                                 len_on_bd);
1553                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1554                                                     len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1556                                         if (bp->panic)
1557                                                 return 0;
1558 #endif
1559
1560                                         bnx2x_update_sge_prod(fp,
1561                                                         &cqe->fast_path_cqe);
1562                                         goto next_cqe;
1563                                 }
1564                         }
1565
1566                         pci_dma_sync_single_for_device(bp->pdev,
1567                                         pci_unmap_addr(rx_buf, mapping),
1568                                                        pad + RX_COPY_THRESH,
1569                                                        PCI_DMA_FROMDEVICE);
1570                         prefetch(skb);
1571                         prefetch(((char *)(skb)) + 128);
1572
1573                         /* is this an error packet? */
1574                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575                                 DP(NETIF_MSG_RX_ERR,
1576                                    "ERROR  flags %x  rx packet %u\n",
1577                                    cqe_fp_flags, sw_comp_cons);
1578                                 fp->eth_q_stats.rx_err_discard_pkt++;
1579                                 goto reuse_rx;
1580                         }
1581
1582                         /* Since we don't have a jumbo ring
1583                          * copy small packets if mtu > 1500
1584                          */
1585                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586                             (len <= RX_COPY_THRESH)) {
1587                                 struct sk_buff *new_skb;
1588
1589                                 new_skb = netdev_alloc_skb(bp->dev,
1590                                                            len + pad);
1591                                 if (new_skb == NULL) {
1592                                         DP(NETIF_MSG_RX_ERR,
1593                                            "ERROR  packet dropped "
1594                                            "because of alloc failure\n");
1595                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1596                                         goto reuse_rx;
1597                                 }
1598
1599                                 /* aligned copy */
1600                                 skb_copy_from_linear_data_offset(skb, pad,
1601                                                     new_skb->data + pad, len);
1602                                 skb_reserve(new_skb, pad);
1603                                 skb_put(new_skb, len);
1604
1605                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607                                 skb = new_skb;
1608
1609                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610                                 pci_unmap_single(bp->pdev,
1611                                         pci_unmap_addr(rx_buf, mapping),
1612                                                  bp->rx_buf_size,
1613                                                  PCI_DMA_FROMDEVICE);
1614                                 skb_reserve(skb, pad);
1615                                 skb_put(skb, len);
1616
1617                         } else {
1618                                 DP(NETIF_MSG_RX_ERR,
1619                                    "ERROR  packet dropped because "
1620                                    "of alloc failure\n");
1621                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1622 reuse_rx:
1623                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624                                 goto next_rx;
1625                         }
1626
1627                         skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629                         skb->ip_summed = CHECKSUM_NONE;
1630                         if (bp->rx_csum) {
1631                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1633                                 else
1634                                         fp->eth_q_stats.hw_csum_err++;
1635                         }
1636                 }
1637
1638                 skb_record_rx_queue(skb, fp->index);
1639 #ifdef BCM_VLAN
1640                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642                      PARSING_FLAGS_VLAN))
1643                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645                 else
1646 #endif
1647                         netif_receive_skb(skb);
1648
1649
1650 next_rx:
1651                 rx_buf->skb = NULL;
1652
1653                 bd_cons = NEXT_RX_IDX(bd_cons);
1654                 bd_prod = NEXT_RX_IDX(bd_prod);
1655                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656                 rx_pkt++;
1657 next_cqe:
1658                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1660
1661                 if (rx_pkt == budget)
1662                         break;
1663         } /* while */
1664
1665         fp->rx_bd_cons = bd_cons;
1666         fp->rx_bd_prod = bd_prod_fw;
1667         fp->rx_comp_cons = sw_comp_cons;
1668         fp->rx_comp_prod = sw_comp_prod;
1669
1670         /* Update producers */
1671         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672                              fp->rx_sge_prod);
1673
1674         fp->rx_pkt += rx_pkt;
1675         fp->rx_calls++;
1676
1677         return rx_pkt;
1678 }
1679
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681 {
1682         struct bnx2x_fastpath *fp = fp_cookie;
1683         struct bnx2x *bp = fp->bp;
1684
1685         /* Return here if interrupt is disabled */
1686         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688                 return IRQ_HANDLED;
1689         }
1690
1691         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692            fp->index, fp->sb_id);
1693         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1694
1695 #ifdef BNX2X_STOP_ON_ERROR
1696         if (unlikely(bp->panic))
1697                 return IRQ_HANDLED;
1698 #endif
1699         /* Handle Rx or Tx according to MSI-X vector */
1700         if (fp->is_rx_queue) {
1701                 prefetch(fp->rx_cons_sb);
1702                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1703
1704                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1705
1706         } else {
1707                 prefetch(fp->tx_cons_sb);
1708                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710                 bnx2x_update_fpsb_idx(fp);
1711                 rmb();
1712                 bnx2x_tx_int(fp);
1713
1714                 /* Re-enable interrupts */
1715                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719         }
1720
1721         return IRQ_HANDLED;
1722 }
1723
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725 {
1726         struct bnx2x *bp = netdev_priv(dev_instance);
1727         u16 status = bnx2x_ack_int(bp);
1728         u16 mask;
1729         int i;
1730
1731         /* Return here if interrupt is shared and it's not for us */
1732         if (unlikely(status == 0)) {
1733                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734                 return IRQ_NONE;
1735         }
1736         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1737
1738         /* Return here if interrupt is disabled */
1739         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741                 return IRQ_HANDLED;
1742         }
1743
1744 #ifdef BNX2X_STOP_ON_ERROR
1745         if (unlikely(bp->panic))
1746                 return IRQ_HANDLED;
1747 #endif
1748
1749         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750                 struct bnx2x_fastpath *fp = &bp->fp[i];
1751
1752                 mask = 0x2 << fp->sb_id;
1753                 if (status & mask) {
1754                         /* Handle Rx or Tx according to SB id */
1755                         if (fp->is_rx_queue) {
1756                                 prefetch(fp->rx_cons_sb);
1757                                 prefetch(&fp->status_blk->u_status_block.
1758                                                         status_block_index);
1759
1760                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1761
1762                         } else {
1763                                 prefetch(fp->tx_cons_sb);
1764                                 prefetch(&fp->status_blk->c_status_block.
1765                                                         status_block_index);
1766
1767                                 bnx2x_update_fpsb_idx(fp);
1768                                 rmb();
1769                                 bnx2x_tx_int(fp);
1770
1771                                 /* Re-enable interrupts */
1772                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773                                              le16_to_cpu(fp->fp_u_idx),
1774                                              IGU_INT_NOP, 1);
1775                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776                                              le16_to_cpu(fp->fp_c_idx),
1777                                              IGU_INT_ENABLE, 1);
1778                         }
1779                         status &= ~mask;
1780                 }
1781         }
1782
1783
1784         if (unlikely(status & 0x1)) {
1785                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1786
1787                 status &= ~0x1;
1788                 if (!status)
1789                         return IRQ_HANDLED;
1790         }
1791
1792         if (status)
1793                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794                    status);
1795
1796         return IRQ_HANDLED;
1797 }
1798
1799 /* end of fast path */
1800
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1802
1803 /* Link */
1804
1805 /*
1806  * General service functions
1807  */
1808
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1810 {
1811         u32 lock_status;
1812         u32 resource_bit = (1 << resource);
1813         int func = BP_FUNC(bp);
1814         u32 hw_lock_control_reg;
1815         int cnt;
1816
1817         /* Validating that the resource is within range */
1818         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819                 DP(NETIF_MSG_HW,
1820                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822                 return -EINVAL;
1823         }
1824
1825         if (func <= 5) {
1826                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827         } else {
1828                 hw_lock_control_reg =
1829                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830         }
1831
1832         /* Validating that the resource is not already taken */
1833         lock_status = REG_RD(bp, hw_lock_control_reg);
1834         if (lock_status & resource_bit) {
1835                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1836                    lock_status, resource_bit);
1837                 return -EEXIST;
1838         }
1839
1840         /* Try for 5 second every 5ms */
1841         for (cnt = 0; cnt < 1000; cnt++) {
1842                 /* Try to acquire the lock */
1843                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844                 lock_status = REG_RD(bp, hw_lock_control_reg);
1845                 if (lock_status & resource_bit)
1846                         return 0;
1847
1848                 msleep(5);
1849         }
1850         DP(NETIF_MSG_HW, "Timeout\n");
1851         return -EAGAIN;
1852 }
1853
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1855 {
1856         u32 lock_status;
1857         u32 resource_bit = (1 << resource);
1858         int func = BP_FUNC(bp);
1859         u32 hw_lock_control_reg;
1860
1861         /* Validating that the resource is within range */
1862         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863                 DP(NETIF_MSG_HW,
1864                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866                 return -EINVAL;
1867         }
1868
1869         if (func <= 5) {
1870                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871         } else {
1872                 hw_lock_control_reg =
1873                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874         }
1875
1876         /* Validating that the resource is currently taken */
1877         lock_status = REG_RD(bp, hw_lock_control_reg);
1878         if (!(lock_status & resource_bit)) {
1879                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1880                    lock_status, resource_bit);
1881                 return -EFAULT;
1882         }
1883
1884         REG_WR(bp, hw_lock_control_reg, resource_bit);
1885         return 0;
1886 }
1887
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1890 {
1891         mutex_lock(&bp->port.phy_mutex);
1892
1893         if (bp->port.need_hw_lock)
1894                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1895 }
1896
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1898 {
1899         if (bp->port.need_hw_lock)
1900                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1901
1902         mutex_unlock(&bp->port.phy_mutex);
1903 }
1904
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906 {
1907         /* The GPIO should be swapped if swap register is set and active */
1908         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910         int gpio_shift = gpio_num +
1911                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912         u32 gpio_mask = (1 << gpio_shift);
1913         u32 gpio_reg;
1914         int value;
1915
1916         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918                 return -EINVAL;
1919         }
1920
1921         /* read GPIO value */
1922         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924         /* get the requested pin value */
1925         if ((gpio_reg & gpio_mask) == gpio_mask)
1926                 value = 1;
1927         else
1928                 value = 0;
1929
1930         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1931
1932         return value;
1933 }
1934
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944
1945         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947                 return -EINVAL;
1948         }
1949
1950         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951         /* read GPIO and mask except the float bits */
1952         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1953
1954         switch (mode) {
1955         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957                    gpio_num, gpio_shift);
1958                 /* clear FLOAT and set CLR */
1959                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961                 break;
1962
1963         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965                    gpio_num, gpio_shift);
1966                 /* clear FLOAT and set SET */
1967                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969                 break;
1970
1971         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973                    gpio_num, gpio_shift);
1974                 /* set FLOAT */
1975                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976                 break;
1977
1978         default:
1979                 break;
1980         }
1981
1982         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984
1985         return 0;
1986 }
1987
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO int */
2005         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010                                    "output low\n", gpio_num, gpio_shift);
2011                 /* clear SET and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018                                    "output high\n", gpio_num, gpio_shift);
2019                 /* clear CLR and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022                 break;
2023
2024         default:
2025                 break;
2026         }
2027
2028         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031         return 0;
2032 }
2033
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2035 {
2036         u32 spio_mask = (1 << spio_num);
2037         u32 spio_reg;
2038
2039         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040             (spio_num > MISC_REGISTERS_SPIO_7)) {
2041                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046         /* read SPIO and mask except the float bits */
2047         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052                 /* clear FLOAT and set CLR */
2053                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055                 break;
2056
2057         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059                 /* clear FLOAT and set SET */
2060                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062                 break;
2063
2064         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066                 /* set FLOAT */
2067                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076
2077         return 0;
2078 }
2079
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2081 {
2082         switch (bp->link_vars.ieee_fc &
2083                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2086                                           ADVERTISED_Pause);
2087                 break;
2088
2089         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2091                                          ADVERTISED_Pause);
2092                 break;
2093
2094         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2096                 break;
2097
2098         default:
2099                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2100                                           ADVERTISED_Pause);
2101                 break;
2102         }
2103 }
2104
2105 static void bnx2x_link_report(struct bnx2x *bp)
2106 {
2107         if (bp->link_vars.link_up) {
2108                 if (bp->state == BNX2X_STATE_OPEN)
2109                         netif_carrier_on(bp->dev);
2110                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2111
2112                 printk("%d Mbps ", bp->link_vars.line_speed);
2113
2114                 if (bp->link_vars.duplex == DUPLEX_FULL)
2115                         printk("full duplex");
2116                 else
2117                         printk("half duplex");
2118
2119                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2120                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2121                                 printk(", receive ");
2122                                 if (bp->link_vars.flow_ctrl &
2123                                     BNX2X_FLOW_CTRL_TX)
2124                                         printk("& transmit ");
2125                         } else {
2126                                 printk(", transmit ");
2127                         }
2128                         printk("flow control ON");
2129                 }
2130                 printk("\n");
2131
2132         } else { /* link_down */
2133                 netif_carrier_off(bp->dev);
2134                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2135         }
2136 }
2137
2138 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2139 {
2140         if (!BP_NOMCP(bp)) {
2141                 u8 rc;
2142
2143                 /* Initialize link parameters structure variables */
2144                 /* It is recommended to turn off RX FC for jumbo frames
2145                    for better performance */
2146                 if (IS_E1HMF(bp))
2147                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2148                 else if (bp->dev->mtu > 5000)
2149                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2150                 else
2151                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2152
2153                 bnx2x_acquire_phy_lock(bp);
2154
2155                 if (load_mode == LOAD_DIAG)
2156                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2157
2158                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2159
2160                 bnx2x_release_phy_lock(bp);
2161
2162                 bnx2x_calc_fc_adv(bp);
2163
2164                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2165                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2166                         bnx2x_link_report(bp);
2167                 }
2168
2169                 return rc;
2170         }
2171         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2172         return -EINVAL;
2173 }
2174
2175 static void bnx2x_link_set(struct bnx2x *bp)
2176 {
2177         if (!BP_NOMCP(bp)) {
2178                 bnx2x_acquire_phy_lock(bp);
2179                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2180                 bnx2x_release_phy_lock(bp);
2181
2182                 bnx2x_calc_fc_adv(bp);
2183         } else
2184                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2185 }
2186
2187 static void bnx2x__link_reset(struct bnx2x *bp)
2188 {
2189         if (!BP_NOMCP(bp)) {
2190                 bnx2x_acquire_phy_lock(bp);
2191                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2192                 bnx2x_release_phy_lock(bp);
2193         } else
2194                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2195 }
2196
2197 static u8 bnx2x_link_test(struct bnx2x *bp)
2198 {
2199         u8 rc;
2200
2201         bnx2x_acquire_phy_lock(bp);
2202         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2203         bnx2x_release_phy_lock(bp);
2204
2205         return rc;
2206 }
2207
2208 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2209 {
2210         u32 r_param = bp->link_vars.line_speed / 8;
2211         u32 fair_periodic_timeout_usec;
2212         u32 t_fair;
2213
2214         memset(&(bp->cmng.rs_vars), 0,
2215                sizeof(struct rate_shaping_vars_per_port));
2216         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2217
2218         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2219         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2220
2221         /* this is the threshold below which no timer arming will occur
2222            1.25 coefficient is for the threshold to be a little bigger
2223            than the real time, to compensate for timer in-accuracy */
2224         bp->cmng.rs_vars.rs_threshold =
2225                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2226
2227         /* resolution of fairness timer */
2228         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2229         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2230         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2231
2232         /* this is the threshold below which we won't arm the timer anymore */
2233         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2234
2235         /* we multiply by 1e3/8 to get bytes/msec.
2236            We don't want the credits to pass a credit
2237            of the t_fair*FAIR_MEM (algorithm resolution) */
2238         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2239         /* since each tick is 4 usec */
2240         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2241 }
2242
2243 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2244 {
2245         struct rate_shaping_vars_per_vn m_rs_vn;
2246         struct fairness_vars_per_vn m_fair_vn;
2247         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2248         u16 vn_min_rate, vn_max_rate;
2249         int i;
2250
2251         /* If function is hidden - set min and max to zeroes */
2252         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2253                 vn_min_rate = 0;
2254                 vn_max_rate = 0;
2255
2256         } else {
2257                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2258                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2259                 /* If fairness is enabled (not all min rates are zeroes) and
2260                    if current min rate is zero - set it to 1.
2261                    This is a requirement of the algorithm. */
2262                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2263                         vn_min_rate = DEF_MIN_RATE;
2264                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2265                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2266         }
2267
2268         DP(NETIF_MSG_IFUP,
2269            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2270            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2271
2272         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2273         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2274
2275         /* global vn counter - maximal Mbps for this vn */
2276         m_rs_vn.vn_counter.rate = vn_max_rate;
2277
2278         /* quota - number of bytes transmitted in this period */
2279         m_rs_vn.vn_counter.quota =
2280                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2281
2282         if (bp->vn_weight_sum) {
2283                 /* credit for each period of the fairness algorithm:
2284                    number of bytes in T_FAIR (the vn share the port rate).
2285                    vn_weight_sum should not be larger than 10000, thus
2286                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2287                    than zero */
2288                 m_fair_vn.vn_credit_delta =
2289                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2290                                                  (8 * bp->vn_weight_sum))),
2291                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2292                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2293                    m_fair_vn.vn_credit_delta);
2294         }
2295
2296         /* Store it to internal memory */
2297         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2298                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2299                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2300                        ((u32 *)(&m_rs_vn))[i]);
2301
2302         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2303                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2304                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2305                        ((u32 *)(&m_fair_vn))[i]);
2306 }
2307
2308
2309 /* This function is called upon link interrupt */
2310 static void bnx2x_link_attn(struct bnx2x *bp)
2311 {
2312         /* Make sure that we are synced with the current statistics */
2313         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2314
2315         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2316
2317         if (bp->link_vars.link_up) {
2318
2319                 /* dropless flow control */
2320                 if (CHIP_IS_E1H(bp)) {
2321                         int port = BP_PORT(bp);
2322                         u32 pause_enabled = 0;
2323
2324                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2325                                 pause_enabled = 1;
2326
2327                         REG_WR(bp, BAR_USTRORM_INTMEM +
2328                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2329                                pause_enabled);
2330                 }
2331
2332                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2333                         struct host_port_stats *pstats;
2334
2335                         pstats = bnx2x_sp(bp, port_stats);
2336                         /* reset old bmac stats */
2337                         memset(&(pstats->mac_stx[0]), 0,
2338                                sizeof(struct mac_stx));
2339                 }
2340                 if ((bp->state == BNX2X_STATE_OPEN) ||
2341                     (bp->state == BNX2X_STATE_DISABLED))
2342                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2343         }
2344
2345         /* indicate link status */
2346         bnx2x_link_report(bp);
2347
2348         if (IS_E1HMF(bp)) {
2349                 int port = BP_PORT(bp);
2350                 int func;
2351                 int vn;
2352
2353                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2354                         if (vn == BP_E1HVN(bp))
2355                                 continue;
2356
2357                         func = ((vn << 1) | port);
2358
2359                         /* Set the attention towards other drivers
2360                            on the same port */
2361                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2362                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2363                 }
2364
2365                 if (bp->link_vars.link_up) {
2366                         int i;
2367
2368                         /* Init rate shaping and fairness contexts */
2369                         bnx2x_init_port_minmax(bp);
2370
2371                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2372                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2373
2374                         /* Store it to internal memory */
2375                         for (i = 0;
2376                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2377                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2378                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2379                                        ((u32 *)(&bp->cmng))[i]);
2380                 }
2381         }
2382 }
2383
2384 static void bnx2x__link_status_update(struct bnx2x *bp)
2385 {
2386         if (bp->state != BNX2X_STATE_OPEN)
2387                 return;
2388
2389         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2390
2391         if (bp->link_vars.link_up)
2392                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2393         else
2394                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2395
2396         /* indicate link status */
2397         bnx2x_link_report(bp);
2398 }
2399
2400 static void bnx2x_pmf_update(struct bnx2x *bp)
2401 {
2402         int port = BP_PORT(bp);
2403         u32 val;
2404
2405         bp->port.pmf = 1;
2406         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2407
2408         /* enable nig attention */
2409         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2410         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2411         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2412
2413         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2414 }
2415
2416 /* end of Link */
2417
2418 /* slow path */
2419
2420 /*
2421  * General service functions
2422  */
2423
2424 /* the slow path queue is odd since completions arrive on the fastpath ring */
2425 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2426                          u32 data_hi, u32 data_lo, int common)
2427 {
2428         int func = BP_FUNC(bp);
2429
2430         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2431            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2432            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2433            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2434            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2435
2436 #ifdef BNX2X_STOP_ON_ERROR
2437         if (unlikely(bp->panic))
2438                 return -EIO;
2439 #endif
2440
2441         spin_lock_bh(&bp->spq_lock);
2442
2443         if (!bp->spq_left) {
2444                 BNX2X_ERR("BUG! SPQ ring full!\n");
2445                 spin_unlock_bh(&bp->spq_lock);
2446                 bnx2x_panic();
2447                 return -EBUSY;
2448         }
2449
2450         /* CID needs port number to be encoded int it */
2451         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2452                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2453                                      HW_CID(bp, cid)));
2454         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2455         if (common)
2456                 bp->spq_prod_bd->hdr.type |=
2457                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2458
2459         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2460         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2461
2462         bp->spq_left--;
2463
2464         if (bp->spq_prod_bd == bp->spq_last_bd) {
2465                 bp->spq_prod_bd = bp->spq;
2466                 bp->spq_prod_idx = 0;
2467                 DP(NETIF_MSG_TIMER, "end of spq\n");
2468
2469         } else {
2470                 bp->spq_prod_bd++;
2471                 bp->spq_prod_idx++;
2472         }
2473
2474         /* Make sure that BD data is updated before writing the producer */
2475         wmb();
2476
2477         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2478                bp->spq_prod_idx);
2479
2480         mmiowb();
2481
2482         spin_unlock_bh(&bp->spq_lock);
2483         return 0;
2484 }
2485
2486 /* acquire split MCP access lock register */
2487 static int bnx2x_acquire_alr(struct bnx2x *bp)
2488 {
2489         u32 i, j, val;
2490         int rc = 0;
2491
2492         might_sleep();
2493         i = 100;
2494         for (j = 0; j < i*10; j++) {
2495                 val = (1UL << 31);
2496                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2497                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2498                 if (val & (1L << 31))
2499                         break;
2500
2501                 msleep(5);
2502         }
2503         if (!(val & (1L << 31))) {
2504                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2505                 rc = -EBUSY;
2506         }
2507
2508         return rc;
2509 }
2510
2511 /* release split MCP access lock register */
2512 static void bnx2x_release_alr(struct bnx2x *bp)
2513 {
2514         u32 val = 0;
2515
2516         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2517 }
2518
2519 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2520 {
2521         struct host_def_status_block *def_sb = bp->def_status_blk;
2522         u16 rc = 0;
2523
2524         barrier(); /* status block is written to by the chip */
2525         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2526                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2527                 rc |= 1;
2528         }
2529         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2530                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2531                 rc |= 2;
2532         }
2533         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2534                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2535                 rc |= 4;
2536         }
2537         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2538                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2539                 rc |= 8;
2540         }
2541         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2542                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2543                 rc |= 16;
2544         }
2545         return rc;
2546 }
2547
2548 /*
2549  * slow path service functions
2550  */
2551
2552 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2553 {
2554         int port = BP_PORT(bp);
2555         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2556                        COMMAND_REG_ATTN_BITS_SET);
2557         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2558                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2559         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2560                                        NIG_REG_MASK_INTERRUPT_PORT0;
2561         u32 aeu_mask;
2562         u32 nig_mask = 0;
2563
2564         if (bp->attn_state & asserted)
2565                 BNX2X_ERR("IGU ERROR\n");
2566
2567         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2568         aeu_mask = REG_RD(bp, aeu_addr);
2569
2570         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2571            aeu_mask, asserted);
2572         aeu_mask &= ~(asserted & 0xff);
2573         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2574
2575         REG_WR(bp, aeu_addr, aeu_mask);
2576         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2577
2578         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2579         bp->attn_state |= asserted;
2580         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2581
2582         if (asserted & ATTN_HARD_WIRED_MASK) {
2583                 if (asserted & ATTN_NIG_FOR_FUNC) {
2584
2585                         bnx2x_acquire_phy_lock(bp);
2586
2587                         /* save nig interrupt mask */
2588                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2589                         REG_WR(bp, nig_int_mask_addr, 0);
2590
2591                         bnx2x_link_attn(bp);
2592
2593                         /* handle unicore attn? */
2594                 }
2595                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2596                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2597
2598                 if (asserted & GPIO_2_FUNC)
2599                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2600
2601                 if (asserted & GPIO_3_FUNC)
2602                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2603
2604                 if (asserted & GPIO_4_FUNC)
2605                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2606
2607                 if (port == 0) {
2608                         if (asserted & ATTN_GENERAL_ATTN_1) {
2609                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2610                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2611                         }
2612                         if (asserted & ATTN_GENERAL_ATTN_2) {
2613                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2614                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2615                         }
2616                         if (asserted & ATTN_GENERAL_ATTN_3) {
2617                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2618                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2619                         }
2620                 } else {
2621                         if (asserted & ATTN_GENERAL_ATTN_4) {
2622                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2623                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2624                         }
2625                         if (asserted & ATTN_GENERAL_ATTN_5) {
2626                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2627                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2628                         }
2629                         if (asserted & ATTN_GENERAL_ATTN_6) {
2630                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2631                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2632                         }
2633                 }
2634
2635         } /* if hardwired */
2636
2637         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2638            asserted, hc_addr);
2639         REG_WR(bp, hc_addr, asserted);
2640
2641         /* now set back the mask */
2642         if (asserted & ATTN_NIG_FOR_FUNC) {
2643                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2644                 bnx2x_release_phy_lock(bp);
2645         }
2646 }
2647
2648 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2649 {
2650         int port = BP_PORT(bp);
2651
2652         /* mark the failure */
2653         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2655         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2656                  bp->link_params.ext_phy_config);
2657
2658         /* log the failure */
2659         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2660                " the driver to shutdown the card to prevent permanent"
2661                " damage.  Please contact Dell Support for assistance\n",
2662                bp->dev->name);
2663 }
2664 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2665 {
2666         int port = BP_PORT(bp);
2667         int reg_offset;
2668         u32 val, swap_val, swap_override;
2669
2670         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2672
2673         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2674
2675                 val = REG_RD(bp, reg_offset);
2676                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677                 REG_WR(bp, reg_offset, val);
2678
2679                 BNX2X_ERR("SPIO5 hw attention\n");
2680
2681                 /* Fan failure attention */
2682                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2683                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2684                         /* Low power mode is controlled by GPIO 2 */
2685                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2686                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2687                         /* The PHY reset is controlled by GPIO 1 */
2688                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2689                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2690                         break;
2691
2692                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2693                         /* The PHY reset is controlled by GPIO 1 */
2694                         /* fake the port number to cancel the swap done in
2695                            set_gpio() */
2696                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2697                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2698                         port = (swap_val && swap_override) ^ 1;
2699                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2700                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2701                         break;
2702
2703                 default:
2704                         break;
2705                 }
2706                 bnx2x_fan_failure(bp);
2707         }
2708
2709         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2710                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2711                 bnx2x_acquire_phy_lock(bp);
2712                 bnx2x_handle_module_detect_int(&bp->link_params);
2713                 bnx2x_release_phy_lock(bp);
2714         }
2715
2716         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2717
2718                 val = REG_RD(bp, reg_offset);
2719                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2720                 REG_WR(bp, reg_offset, val);
2721
2722                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2723                           (attn & HW_INTERRUT_ASSERT_SET_0));
2724                 bnx2x_panic();
2725         }
2726 }
2727
2728 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2729 {
2730         u32 val;
2731
2732         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2733
2734                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2735                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2736                 /* DORQ discard attention */
2737                 if (val & 0x2)
2738                         BNX2X_ERR("FATAL error from DORQ\n");
2739         }
2740
2741         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2742
2743                 int port = BP_PORT(bp);
2744                 int reg_offset;
2745
2746                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2747                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2748
2749                 val = REG_RD(bp, reg_offset);
2750                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2751                 REG_WR(bp, reg_offset, val);
2752
2753                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2754                           (attn & HW_INTERRUT_ASSERT_SET_1));
2755                 bnx2x_panic();
2756         }
2757 }
2758
2759 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2760 {
2761         u32 val;
2762
2763         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2764
2765                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2766                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2767                 /* CFC error attention */
2768                 if (val & 0x2)
2769                         BNX2X_ERR("FATAL error from CFC\n");
2770         }
2771
2772         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2773
2774                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2775                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2776                 /* RQ_USDMDP_FIFO_OVERFLOW */
2777                 if (val & 0x18000)
2778                         BNX2X_ERR("FATAL error from PXP\n");
2779         }
2780
2781         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2782
2783                 int port = BP_PORT(bp);
2784                 int reg_offset;
2785
2786                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2787                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2788
2789                 val = REG_RD(bp, reg_offset);
2790                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2791                 REG_WR(bp, reg_offset, val);
2792
2793                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2794                           (attn & HW_INTERRUT_ASSERT_SET_2));
2795                 bnx2x_panic();
2796         }
2797 }
2798
2799 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2800 {
2801         u32 val;
2802
2803         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2804
2805                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2806                         int func = BP_FUNC(bp);
2807
2808                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2809                         bnx2x__link_status_update(bp);
2810                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2811                                                         DRV_STATUS_PMF)
2812                                 bnx2x_pmf_update(bp);
2813
2814                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2815
2816                         BNX2X_ERR("MC assert!\n");
2817                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2818                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2819                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2820                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2821                         bnx2x_panic();
2822
2823                 } else if (attn & BNX2X_MCP_ASSERT) {
2824
2825                         BNX2X_ERR("MCP assert!\n");
2826                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2827                         bnx2x_fw_dump(bp);
2828
2829                 } else
2830                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2831         }
2832
2833         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2834                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2835                 if (attn & BNX2X_GRC_TIMEOUT) {
2836                         val = CHIP_IS_E1H(bp) ?
2837                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2838                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2839                 }
2840                 if (attn & BNX2X_GRC_RSV) {
2841                         val = CHIP_IS_E1H(bp) ?
2842                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2843                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2844                 }
2845                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2846         }
2847 }
2848
2849 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2850 {
2851         struct attn_route attn;
2852         struct attn_route group_mask;
2853         int port = BP_PORT(bp);
2854         int index;
2855         u32 reg_addr;
2856         u32 val;
2857         u32 aeu_mask;
2858
2859         /* need to take HW lock because MCP or other port might also
2860            try to handle this event */
2861         bnx2x_acquire_alr(bp);
2862
2863         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2864         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2865         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2866         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2867         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2868            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2869
2870         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2871                 if (deasserted & (1 << index)) {
2872                         group_mask = bp->attn_group[index];
2873
2874                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2875                            index, group_mask.sig[0], group_mask.sig[1],
2876                            group_mask.sig[2], group_mask.sig[3]);
2877
2878                         bnx2x_attn_int_deasserted3(bp,
2879                                         attn.sig[3] & group_mask.sig[3]);
2880                         bnx2x_attn_int_deasserted1(bp,
2881                                         attn.sig[1] & group_mask.sig[1]);
2882                         bnx2x_attn_int_deasserted2(bp,
2883                                         attn.sig[2] & group_mask.sig[2]);
2884                         bnx2x_attn_int_deasserted0(bp,
2885                                         attn.sig[0] & group_mask.sig[0]);
2886
2887                         if ((attn.sig[0] & group_mask.sig[0] &
2888                                                 HW_PRTY_ASSERT_SET_0) ||
2889                             (attn.sig[1] & group_mask.sig[1] &
2890                                                 HW_PRTY_ASSERT_SET_1) ||
2891                             (attn.sig[2] & group_mask.sig[2] &
2892                                                 HW_PRTY_ASSERT_SET_2))
2893                                 BNX2X_ERR("FATAL HW block parity attention\n");
2894                 }
2895         }
2896
2897         bnx2x_release_alr(bp);
2898
2899         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2900
2901         val = ~deasserted;
2902         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2903            val, reg_addr);
2904         REG_WR(bp, reg_addr, val);
2905
2906         if (~bp->attn_state & deasserted)
2907                 BNX2X_ERR("IGU ERROR\n");
2908
2909         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2910                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2911
2912         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2913         aeu_mask = REG_RD(bp, reg_addr);
2914
2915         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2916            aeu_mask, deasserted);
2917         aeu_mask |= (deasserted & 0xff);
2918         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2919
2920         REG_WR(bp, reg_addr, aeu_mask);
2921         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2922
2923         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2924         bp->attn_state &= ~deasserted;
2925         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2926 }
2927
2928 static void bnx2x_attn_int(struct bnx2x *bp)
2929 {
2930         /* read local copy of bits */
2931         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2932                                                                 attn_bits);
2933         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2934                                                                 attn_bits_ack);
2935         u32 attn_state = bp->attn_state;
2936
2937         /* look for changed bits */
2938         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2939         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2940
2941         DP(NETIF_MSG_HW,
2942            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2943            attn_bits, attn_ack, asserted, deasserted);
2944
2945         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2946                 BNX2X_ERR("BAD attention state\n");
2947
2948         /* handle bits that were raised */
2949         if (asserted)
2950                 bnx2x_attn_int_asserted(bp, asserted);
2951
2952         if (deasserted)
2953                 bnx2x_attn_int_deasserted(bp, deasserted);
2954 }
2955
2956 static void bnx2x_sp_task(struct work_struct *work)
2957 {
2958         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2959         u16 status;
2960
2961
2962         /* Return here if interrupt is disabled */
2963         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2964                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2965                 return;
2966         }
2967
2968         status = bnx2x_update_dsb_idx(bp);
2969 /*      if (status == 0)                                     */
2970 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2971
2972         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2973
2974         /* HW attentions */
2975         if (status & 0x1)
2976                 bnx2x_attn_int(bp);
2977
2978         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2979                      IGU_INT_NOP, 1);
2980         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2981                      IGU_INT_NOP, 1);
2982         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2983                      IGU_INT_NOP, 1);
2984         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2985                      IGU_INT_NOP, 1);
2986         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2987                      IGU_INT_ENABLE, 1);
2988
2989 }
2990
2991 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2992 {
2993         struct net_device *dev = dev_instance;
2994         struct bnx2x *bp = netdev_priv(dev);
2995
2996         /* Return here if interrupt is disabled */
2997         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2998                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2999                 return IRQ_HANDLED;
3000         }
3001
3002         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3003
3004 #ifdef BNX2X_STOP_ON_ERROR
3005         if (unlikely(bp->panic))
3006                 return IRQ_HANDLED;
3007 #endif
3008
3009         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3010
3011         return IRQ_HANDLED;
3012 }
3013
3014 /* end of slow path */
3015
3016 /* Statistics */
3017
3018 /****************************************************************************
3019 * Macros
3020 ****************************************************************************/
3021
3022 /* sum[hi:lo] += add[hi:lo] */
3023 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3024         do { \
3025                 s_lo += a_lo; \
3026                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3027         } while (0)
3028
3029 /* difference = minuend - subtrahend */
3030 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3031         do { \
3032                 if (m_lo < s_lo) { \
3033                         /* underflow */ \
3034                         d_hi = m_hi - s_hi; \
3035                         if (d_hi > 0) { \
3036                                 /* we can 'loan' 1 */ \
3037                                 d_hi--; \
3038                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3039                         } else { \
3040                                 /* m_hi <= s_hi */ \
3041                                 d_hi = 0; \
3042                                 d_lo = 0; \
3043                         } \
3044                 } else { \
3045                         /* m_lo >= s_lo */ \
3046                         if (m_hi < s_hi) { \
3047                                 d_hi = 0; \
3048                                 d_lo = 0; \
3049                         } else { \
3050                                 /* m_hi >= s_hi */ \
3051                                 d_hi = m_hi - s_hi; \
3052                                 d_lo = m_lo - s_lo; \
3053                         } \
3054                 } \
3055         } while (0)
3056
3057 #define UPDATE_STAT64(s, t) \
3058         do { \
3059                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3060                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3061                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3062                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3063                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3064                        pstats->mac_stx[1].t##_lo, diff.lo); \
3065         } while (0)
3066
3067 #define UPDATE_STAT64_NIG(s, t) \
3068         do { \
3069                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3070                         diff.lo, new->s##_lo, old->s##_lo); \
3071                 ADD_64(estats->t##_hi, diff.hi, \
3072                        estats->t##_lo, diff.lo); \
3073         } while (0)
3074
3075 /* sum[hi:lo] += add */
3076 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3077         do { \
3078                 s_lo += a; \
3079                 s_hi += (s_lo < a) ? 1 : 0; \
3080         } while (0)
3081
3082 #define UPDATE_EXTEND_STAT(s) \
3083         do { \
3084                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3085                               pstats->mac_stx[1].s##_lo, \
3086                               new->s); \
3087         } while (0)
3088
3089 #define UPDATE_EXTEND_TSTAT(s, t) \
3090         do { \
3091                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3092                 old_tclient->s = tclient->s; \
3093                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3094         } while (0)
3095
3096 #define UPDATE_EXTEND_USTAT(s, t) \
3097         do { \
3098                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3099                 old_uclient->s = uclient->s; \
3100                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3101         } while (0)
3102
3103 #define UPDATE_EXTEND_XSTAT(s, t) \
3104         do { \
3105                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3106                 old_xclient->s = xclient->s; \
3107                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3108         } while (0)
3109
3110 /* minuend -= subtrahend */
3111 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3112         do { \
3113                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3114         } while (0)
3115
3116 /* minuend[hi:lo] -= subtrahend */
3117 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3118         do { \
3119                 SUB_64(m_hi, 0, m_lo, s); \
3120         } while (0)
3121
3122 #define SUB_EXTEND_USTAT(s, t) \
3123         do { \
3124                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3125                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3126         } while (0)
3127
3128 /*
3129  * General service functions
3130  */
3131
3132 static inline long bnx2x_hilo(u32 *hiref)
3133 {
3134         u32 lo = *(hiref + 1);
3135 #if (BITS_PER_LONG == 64)
3136         u32 hi = *hiref;
3137
3138         return HILO_U64(hi, lo);
3139 #else
3140         return lo;
3141 #endif
3142 }
3143
3144 /*
3145  * Init service functions
3146  */
3147
3148 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3149 {
3150         if (!bp->stats_pending) {
3151                 struct eth_query_ramrod_data ramrod_data = {0};
3152                 int i, rc;
3153
3154                 ramrod_data.drv_counter = bp->stats_counter++;
3155                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3156                 for_each_queue(bp, i)
3157                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3158
3159                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3160                                    ((u32 *)&ramrod_data)[1],
3161                                    ((u32 *)&ramrod_data)[0], 0);
3162                 if (rc == 0) {
3163                         /* stats ramrod has it's own slot on the spq */
3164                         bp->spq_left++;
3165                         bp->stats_pending = 1;
3166                 }
3167         }
3168 }
3169
3170 static void bnx2x_stats_init(struct bnx2x *bp)
3171 {
3172         int port = BP_PORT(bp);
3173         int i;
3174
3175         bp->stats_pending = 0;
3176         bp->executer_idx = 0;
3177         bp->stats_counter = 0;
3178
3179         /* port stats */
3180         if (!BP_NOMCP(bp))
3181                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3182         else
3183                 bp->port.port_stx = 0;
3184         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3185
3186         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3187         bp->port.old_nig_stats.brb_discard =
3188                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3189         bp->port.old_nig_stats.brb_truncate =
3190                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3191         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3192                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3193         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3194                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3195
3196         /* function stats */
3197         for_each_queue(bp, i) {
3198                 struct bnx2x_fastpath *fp = &bp->fp[i];
3199
3200                 memset(&fp->old_tclient, 0,
3201                        sizeof(struct tstorm_per_client_stats));
3202                 memset(&fp->old_uclient, 0,
3203                        sizeof(struct ustorm_per_client_stats));
3204                 memset(&fp->old_xclient, 0,
3205                        sizeof(struct xstorm_per_client_stats));
3206                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3207         }
3208
3209         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3210         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3211
3212         bp->stats_state = STATS_STATE_DISABLED;
3213         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3214                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3215 }
3216
3217 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3218 {
3219         struct dmae_command *dmae = &bp->stats_dmae;
3220         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3221
3222         *stats_comp = DMAE_COMP_VAL;
3223         if (CHIP_REV_IS_SLOW(bp))
3224                 return;
3225
3226         /* loader */
3227         if (bp->executer_idx) {
3228                 int loader_idx = PMF_DMAE_C(bp);
3229
3230                 memset(dmae, 0, sizeof(struct dmae_command));
3231
3232                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3233                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3234                                 DMAE_CMD_DST_RESET |
3235 #ifdef __BIG_ENDIAN
3236                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237 #else
3238                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3239 #endif
3240                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3241                                                DMAE_CMD_PORT_0) |
3242                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3243                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3244                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3245                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3246                                      sizeof(struct dmae_command) *
3247                                      (loader_idx + 1)) >> 2;
3248                 dmae->dst_addr_hi = 0;
3249                 dmae->len = sizeof(struct dmae_command) >> 2;
3250                 if (CHIP_IS_E1(bp))
3251                         dmae->len--;
3252                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3253                 dmae->comp_addr_hi = 0;
3254                 dmae->comp_val = 1;
3255
3256                 *stats_comp = 0;
3257                 bnx2x_post_dmae(bp, dmae, loader_idx);
3258
3259         } else if (bp->func_stx) {
3260                 *stats_comp = 0;
3261                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3262         }
3263 }
3264
3265 static int bnx2x_stats_comp(struct bnx2x *bp)
3266 {
3267         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3268         int cnt = 10;
3269
3270         might_sleep();
3271         while (*stats_comp != DMAE_COMP_VAL) {
3272                 if (!cnt) {
3273                         BNX2X_ERR("timeout waiting for stats finished\n");
3274                         break;
3275                 }
3276                 cnt--;
3277                 msleep(1);
3278         }
3279         return 1;
3280 }
3281
3282 /*
3283  * Statistics service functions
3284  */
3285
3286 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3287 {
3288         struct dmae_command *dmae;
3289         u32 opcode;
3290         int loader_idx = PMF_DMAE_C(bp);
3291         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3292
3293         /* sanity */
3294         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3295                 BNX2X_ERR("BUG!\n");
3296                 return;
3297         }
3298
3299         bp->executer_idx = 0;
3300
3301         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3302                   DMAE_CMD_C_ENABLE |
3303                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 #ifdef __BIG_ENDIAN
3305                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 #else
3307                   DMAE_CMD_ENDIANITY_DW_SWAP |
3308 #endif
3309                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3311
3312         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3314         dmae->src_addr_lo = bp->port.port_stx >> 2;
3315         dmae->src_addr_hi = 0;
3316         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318         dmae->len = DMAE_LEN32_RD_MAX;
3319         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320         dmae->comp_addr_hi = 0;
3321         dmae->comp_val = 1;
3322
3323         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3325         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3326         dmae->src_addr_hi = 0;
3327         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3328                                    DMAE_LEN32_RD_MAX * 4);
3329         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3330                                    DMAE_LEN32_RD_MAX * 4);
3331         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3332         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3333         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3334         dmae->comp_val = DMAE_COMP_VAL;
3335
3336         *stats_comp = 0;
3337         bnx2x_hw_stats_post(bp);
3338         bnx2x_stats_comp(bp);
3339 }
3340
3341 static void bnx2x_port_stats_init(struct bnx2x *bp)
3342 {
3343         struct dmae_command *dmae;
3344         int port = BP_PORT(bp);
3345         int vn = BP_E1HVN(bp);
3346         u32 opcode;
3347         int loader_idx = PMF_DMAE_C(bp);
3348         u32 mac_addr;
3349         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3350
3351         /* sanity */
3352         if (!bp->link_vars.link_up || !bp->port.pmf) {
3353                 BNX2X_ERR("BUG!\n");
3354                 return;
3355         }
3356
3357         bp->executer_idx = 0;
3358
3359         /* MCP */
3360         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3361                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3362                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3363 #ifdef __BIG_ENDIAN
3364                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3365 #else
3366                   DMAE_CMD_ENDIANITY_DW_SWAP |
3367 #endif
3368                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3369                   (vn << DMAE_CMD_E1HVN_SHIFT));
3370
3371         if (bp->port.port_stx) {
3372
3373                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374                 dmae->opcode = opcode;
3375                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3376                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3377                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3378                 dmae->dst_addr_hi = 0;
3379                 dmae->len = sizeof(struct host_port_stats) >> 2;
3380                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381                 dmae->comp_addr_hi = 0;
3382                 dmae->comp_val = 1;
3383         }
3384
3385         if (bp->func_stx) {
3386
3387                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388                 dmae->opcode = opcode;
3389                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3390                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3391                 dmae->dst_addr_lo = bp->func_stx >> 2;
3392                 dmae->dst_addr_hi = 0;
3393                 dmae->len = sizeof(struct host_func_stats) >> 2;
3394                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395                 dmae->comp_addr_hi = 0;
3396                 dmae->comp_val = 1;
3397         }
3398
3399         /* MAC */
3400         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3402                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3403 #ifdef __BIG_ENDIAN
3404                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3405 #else
3406                   DMAE_CMD_ENDIANITY_DW_SWAP |
3407 #endif
3408                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409                   (vn << DMAE_CMD_E1HVN_SHIFT));
3410
3411         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3412
3413                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3414                                    NIG_REG_INGRESS_BMAC0_MEM);
3415
3416                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3417                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3418                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3419                 dmae->opcode = opcode;
3420                 dmae->src_addr_lo = (mac_addr +
3421                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3422                 dmae->src_addr_hi = 0;
3423                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3424                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3425                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3426                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3427                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3428                 dmae->comp_addr_hi = 0;
3429                 dmae->comp_val = 1;
3430
3431                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3432                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3433                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3434                 dmae->opcode = opcode;
3435                 dmae->src_addr_lo = (mac_addr +
3436                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3437                 dmae->src_addr_hi = 0;
3438                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3439                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3440                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3441                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3442                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3443                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3444                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3445                 dmae->comp_addr_hi = 0;
3446                 dmae->comp_val = 1;
3447
3448         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3449
3450                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3451
3452                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3453                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454                 dmae->opcode = opcode;
3455                 dmae->src_addr_lo = (mac_addr +
3456                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3457                 dmae->src_addr_hi = 0;
3458                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3459                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3460                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3461                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462                 dmae->comp_addr_hi = 0;
3463                 dmae->comp_val = 1;
3464
3465                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3466                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3467                 dmae->opcode = opcode;
3468                 dmae->src_addr_lo = (mac_addr +
3469                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3470                 dmae->src_addr_hi = 0;
3471                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3472                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3473                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3474                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3475                 dmae->len = 1;
3476                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3477                 dmae->comp_addr_hi = 0;
3478                 dmae->comp_val = 1;
3479
3480                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3481                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482                 dmae->opcode = opcode;
3483                 dmae->src_addr_lo = (mac_addr +
3484                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3485                 dmae->src_addr_hi = 0;
3486                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3487                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3488                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3489                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3490                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3491                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492                 dmae->comp_addr_hi = 0;
3493                 dmae->comp_val = 1;
3494         }
3495
3496         /* NIG */
3497         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3498         dmae->opcode = opcode;
3499         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3500                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3501         dmae->src_addr_hi = 0;
3502         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3503         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3504         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3505         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3506         dmae->comp_addr_hi = 0;
3507         dmae->comp_val = 1;
3508
3509         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3510         dmae->opcode = opcode;
3511         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3512                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3513         dmae->src_addr_hi = 0;
3514         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3515                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3516         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3517                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3518         dmae->len = (2*sizeof(u32)) >> 2;
3519         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3520         dmae->comp_addr_hi = 0;
3521         dmae->comp_val = 1;
3522
3523         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3524         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3525                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3526                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527 #ifdef __BIG_ENDIAN
3528                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529 #else
3530                         DMAE_CMD_ENDIANITY_DW_SWAP |
3531 #endif
3532                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533                         (vn << DMAE_CMD_E1HVN_SHIFT));
3534         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3535                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3536         dmae->src_addr_hi = 0;
3537         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3538                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3539         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3540                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3541         dmae->len = (2*sizeof(u32)) >> 2;
3542         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3543         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3544         dmae->comp_val = DMAE_COMP_VAL;
3545
3546         *stats_comp = 0;
3547 }
3548
3549 static void bnx2x_func_stats_init(struct bnx2x *bp)
3550 {
3551         struct dmae_command *dmae = &bp->stats_dmae;
3552         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3553
3554         /* sanity */
3555         if (!bp->func_stx) {
3556                 BNX2X_ERR("BUG!\n");
3557                 return;
3558         }
3559
3560         bp->executer_idx = 0;
3561         memset(dmae, 0, sizeof(struct dmae_command));
3562
3563         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3564                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3565                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3566 #ifdef __BIG_ENDIAN
3567                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3568 #else
3569                         DMAE_CMD_ENDIANITY_DW_SWAP |
3570 #endif
3571                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3573         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3574         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3575         dmae->dst_addr_lo = bp->func_stx >> 2;
3576         dmae->dst_addr_hi = 0;
3577         dmae->len = sizeof(struct host_func_stats) >> 2;
3578         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3579         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3580         dmae->comp_val = DMAE_COMP_VAL;
3581
3582         *stats_comp = 0;
3583 }
3584
3585 static void bnx2x_stats_start(struct bnx2x *bp)
3586 {
3587         if (bp->port.pmf)
3588                 bnx2x_port_stats_init(bp);
3589
3590         else if (bp->func_stx)
3591                 bnx2x_func_stats_init(bp);
3592
3593         bnx2x_hw_stats_post(bp);
3594         bnx2x_storm_stats_post(bp);
3595 }
3596
3597 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3598 {
3599         bnx2x_stats_comp(bp);
3600         bnx2x_stats_pmf_update(bp);
3601         bnx2x_stats_start(bp);
3602 }
3603
3604 static void bnx2x_stats_restart(struct bnx2x *bp)
3605 {
3606         bnx2x_stats_comp(bp);
3607         bnx2x_stats_start(bp);
3608 }
3609
3610 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3611 {
3612         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3613         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3614         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3615         struct {
3616                 u32 lo;
3617                 u32 hi;
3618         } diff;
3619
3620         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3621         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3622         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3623         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3624         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3625         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3626         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3627         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3628         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3629         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3630         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3631         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3632         UPDATE_STAT64(tx_stat_gt127,
3633                                 tx_stat_etherstatspkts65octetsto127octets);
3634         UPDATE_STAT64(tx_stat_gt255,
3635                                 tx_stat_etherstatspkts128octetsto255octets);
3636         UPDATE_STAT64(tx_stat_gt511,
3637                                 tx_stat_etherstatspkts256octetsto511octets);
3638         UPDATE_STAT64(tx_stat_gt1023,
3639                                 tx_stat_etherstatspkts512octetsto1023octets);
3640         UPDATE_STAT64(tx_stat_gt1518,
3641                                 tx_stat_etherstatspkts1024octetsto1522octets);
3642         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3643         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3644         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3645         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3646         UPDATE_STAT64(tx_stat_gterr,
3647                                 tx_stat_dot3statsinternalmactransmiterrors);
3648         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3649
3650         estats->pause_frames_received_hi =
3651                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3652         estats->pause_frames_received_lo =
3653                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3654
3655         estats->pause_frames_sent_hi =
3656                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3657         estats->pause_frames_sent_lo =
3658                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3659 }
3660
3661 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3662 {
3663         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3664         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3665         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3666
3667         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3668         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3669         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3670         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3671         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3672         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3673         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3674         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3675         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3676         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3677         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3678         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3679         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3680         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3681         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3682         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3683         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3684         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3685         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3686         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3687         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3688         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3689         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3690         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3691         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3692         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3693         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3694         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3695         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3696         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3697         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3698
3699         estats->pause_frames_received_hi =
3700                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3701         estats->pause_frames_received_lo =
3702                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3703         ADD_64(estats->pause_frames_received_hi,
3704                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3705                estats->pause_frames_received_lo,
3706                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3707
3708         estats->pause_frames_sent_hi =
3709                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3710         estats->pause_frames_sent_lo =
3711                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3712         ADD_64(estats->pause_frames_sent_hi,
3713                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3714                estats->pause_frames_sent_lo,
3715                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3716 }
3717
3718 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3719 {
3720         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3721         struct nig_stats *old = &(bp->port.old_nig_stats);
3722         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3723         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3724         struct {
3725                 u32 lo;
3726                 u32 hi;
3727         } diff;
3728         u32 nig_timer_max;
3729
3730         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3731                 bnx2x_bmac_stats_update(bp);
3732
3733         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3734                 bnx2x_emac_stats_update(bp);
3735
3736         else { /* unreached */
3737                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3738                 return -1;
3739         }
3740
3741         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3742                       new->brb_discard - old->brb_discard);
3743         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3744                       new->brb_truncate - old->brb_truncate);
3745
3746         UPDATE_STAT64_NIG(egress_mac_pkt0,
3747                                         etherstatspkts1024octetsto1522octets);
3748         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3749
3750         memcpy(old, new, sizeof(struct nig_stats));
3751
3752         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3753                sizeof(struct mac_stx));
3754         estats->brb_drop_hi = pstats->brb_drop_hi;
3755         estats->brb_drop_lo = pstats->brb_drop_lo;
3756
3757         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3758
3759         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3760         if (nig_timer_max != estats->nig_timer_max) {
3761                 estats->nig_timer_max = nig_timer_max;
3762                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3763         }
3764
3765         return 0;
3766 }
3767
3768 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3769 {
3770         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3771         struct tstorm_per_port_stats *tport =
3772                                         &stats->tstorm_common.port_statistics;
3773         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3774         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3775         int i;
3776
3777         memset(&(fstats->total_bytes_received_hi), 0,
3778                sizeof(struct host_func_stats) - 2*sizeof(u32));
3779         estats->error_bytes_received_hi = 0;
3780         estats->error_bytes_received_lo = 0;
3781         estats->etherstatsoverrsizepkts_hi = 0;
3782         estats->etherstatsoverrsizepkts_lo = 0;
3783         estats->no_buff_discard_hi = 0;
3784         estats->no_buff_discard_lo = 0;
3785
3786         for_each_rx_queue(bp, i) {
3787                 struct bnx2x_fastpath *fp = &bp->fp[i];
3788                 int cl_id = fp->cl_id;
3789                 struct tstorm_per_client_stats *tclient =
3790                                 &stats->tstorm_common.client_statistics[cl_id];
3791                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3792                 struct ustorm_per_client_stats *uclient =
3793                                 &stats->ustorm_common.client_statistics[cl_id];
3794                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3795                 struct xstorm_per_client_stats *xclient =
3796                                 &stats->xstorm_common.client_statistics[cl_id];
3797                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3798                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3799                 u32 diff;
3800
3801                 /* are storm stats valid? */
3802                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3803                                                         bp->stats_counter) {
3804                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3805                            "  xstorm counter (%d) != stats_counter (%d)\n",
3806                            i, xclient->stats_counter, bp->stats_counter);
3807                         return -1;
3808                 }
3809                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3810                                                         bp->stats_counter) {
3811                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3812                            "  tstorm counter (%d) != stats_counter (%d)\n",
3813                            i, tclient->stats_counter, bp->stats_counter);
3814                         return -2;
3815                 }
3816                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3817                                                         bp->stats_counter) {
3818                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3819                            "  ustorm counter (%d) != stats_counter (%d)\n",
3820                            i, uclient->stats_counter, bp->stats_counter);
3821                         return -4;
3822                 }
3823
3824                 qstats->total_bytes_received_hi =
3825                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3826                 qstats->total_bytes_received_lo =
3827                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3828
3829                 ADD_64(qstats->total_bytes_received_hi,
3830                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3831                        qstats->total_bytes_received_lo,
3832                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3833
3834                 ADD_64(qstats->total_bytes_received_hi,
3835                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3836                        qstats->total_bytes_received_lo,
3837                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3838
3839                 qstats->valid_bytes_received_hi =
3840                                         qstats->total_bytes_received_hi;
3841                 qstats->valid_bytes_received_lo =
3842                                         qstats->total_bytes_received_lo;
3843
3844                 qstats->error_bytes_received_hi =
3845                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3846                 qstats->error_bytes_received_lo =
3847                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3848
3849                 ADD_64(qstats->total_bytes_received_hi,
3850                        qstats->error_bytes_received_hi,
3851                        qstats->total_bytes_received_lo,
3852                        qstats->error_bytes_received_lo);
3853
3854                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3855                                         total_unicast_packets_received);
3856                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3857                                         total_multicast_packets_received);
3858                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3859                                         total_broadcast_packets_received);
3860                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3861                                         etherstatsoverrsizepkts);
3862                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3863
3864                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3865                                         total_unicast_packets_received);
3866                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3867                                         total_multicast_packets_received);
3868                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3869                                         total_broadcast_packets_received);
3870                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3871                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3872                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3873
3874                 qstats->total_bytes_transmitted_hi =
3875                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3876                 qstats->total_bytes_transmitted_lo =
3877                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3878
3879                 ADD_64(qstats->total_bytes_transmitted_hi,
3880                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
3881                        qstats->total_bytes_transmitted_lo,
3882                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
3883
3884                 ADD_64(qstats->total_bytes_transmitted_hi,
3885                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3886                        qstats->total_bytes_transmitted_lo,
3887                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
3888
3889                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3890                                         total_unicast_packets_transmitted);
3891                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3892                                         total_multicast_packets_transmitted);
3893                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3894                                         total_broadcast_packets_transmitted);
3895
3896                 old_tclient->checksum_discard = tclient->checksum_discard;
3897                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3898
3899                 ADD_64(fstats->total_bytes_received_hi,
3900                        qstats->total_bytes_received_hi,
3901                        fstats->total_bytes_received_lo,
3902                        qstats->total_bytes_received_lo);
3903                 ADD_64(fstats->total_bytes_transmitted_hi,
3904                        qstats->total_bytes_transmitted_hi,
3905                        fstats->total_bytes_transmitted_lo,
3906                        qstats->total_bytes_transmitted_lo);
3907                 ADD_64(fstats->total_unicast_packets_received_hi,
3908                        qstats->total_unicast_packets_received_hi,
3909                        fstats->total_unicast_packets_received_lo,
3910                        qstats->total_unicast_packets_received_lo);
3911                 ADD_64(fstats->total_multicast_packets_received_hi,
3912                        qstats->total_multicast_packets_received_hi,
3913                        fstats->total_multicast_packets_received_lo,
3914                        qstats->total_multicast_packets_received_lo);
3915                 ADD_64(fstats->total_broadcast_packets_received_hi,
3916                        qstats->total_broadcast_packets_received_hi,
3917                        fstats->total_broadcast_packets_received_lo,
3918                        qstats->total_broadcast_packets_received_lo);
3919                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3920                        qstats->total_unicast_packets_transmitted_hi,
3921                        fstats->total_unicast_packets_transmitted_lo,
3922                        qstats->total_unicast_packets_transmitted_lo);
3923                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3924                        qstats->total_multicast_packets_transmitted_hi,
3925                        fstats->total_multicast_packets_transmitted_lo,
3926                        qstats->total_multicast_packets_transmitted_lo);
3927                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3928                        qstats->total_broadcast_packets_transmitted_hi,
3929                        fstats->total_broadcast_packets_transmitted_lo,
3930                        qstats->total_broadcast_packets_transmitted_lo);
3931                 ADD_64(fstats->valid_bytes_received_hi,
3932                        qstats->valid_bytes_received_hi,
3933                        fstats->valid_bytes_received_lo,
3934                        qstats->valid_bytes_received_lo);
3935
3936                 ADD_64(estats->error_bytes_received_hi,
3937                        qstats->error_bytes_received_hi,
3938                        estats->error_bytes_received_lo,
3939                        qstats->error_bytes_received_lo);
3940                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3941                        qstats->etherstatsoverrsizepkts_hi,
3942                        estats->etherstatsoverrsizepkts_lo,
3943                        qstats->etherstatsoverrsizepkts_lo);
3944                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3945                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3946         }
3947
3948         ADD_64(fstats->total_bytes_received_hi,
3949                estats->rx_stat_ifhcinbadoctets_hi,
3950                fstats->total_bytes_received_lo,
3951                estats->rx_stat_ifhcinbadoctets_lo);
3952
3953         memcpy(estats, &(fstats->total_bytes_received_hi),
3954                sizeof(struct host_func_stats) - 2*sizeof(u32));
3955
3956         ADD_64(estats->etherstatsoverrsizepkts_hi,
3957                estats->rx_stat_dot3statsframestoolong_hi,
3958                estats->etherstatsoverrsizepkts_lo,
3959                estats->rx_stat_dot3statsframestoolong_lo);
3960         ADD_64(estats->error_bytes_received_hi,
3961                estats->rx_stat_ifhcinbadoctets_hi,
3962                estats->error_bytes_received_lo,
3963                estats->rx_stat_ifhcinbadoctets_lo);
3964
3965         if (bp->port.pmf) {
3966                 estats->mac_filter_discard =
3967                                 le32_to_cpu(tport->mac_filter_discard);
3968                 estats->xxoverflow_discard =
3969                                 le32_to_cpu(tport->xxoverflow_discard);
3970                 estats->brb_truncate_discard =
3971                                 le32_to_cpu(tport->brb_truncate_discard);
3972                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3973         }
3974
3975         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3976
3977         bp->stats_pending = 0;
3978
3979         return 0;
3980 }
3981
3982 static void bnx2x_net_stats_update(struct bnx2x *bp)
3983 {
3984         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3985         struct net_device_stats *nstats = &bp->dev->stats;
3986         int i;
3987
3988         nstats->rx_packets =
3989                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3990                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3991                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3992
3993         nstats->tx_packets =
3994                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3995                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3996                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3997
3998         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3999
4000         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4001
4002         nstats->rx_dropped = estats->mac_discard;
4003         for_each_rx_queue(bp, i)
4004                 nstats->rx_dropped +=
4005                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4006
4007         nstats->tx_dropped = 0;
4008
4009         nstats->multicast =
4010                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4011
4012         nstats->collisions =
4013                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4014
4015         nstats->rx_length_errors =
4016                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4017                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4018         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4019                                  bnx2x_hilo(&estats->brb_truncate_hi);
4020         nstats->rx_crc_errors =
4021                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4022         nstats->rx_frame_errors =
4023                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4024         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4025         nstats->rx_missed_errors = estats->xxoverflow_discard;
4026
4027         nstats->rx_errors = nstats->rx_length_errors +
4028                             nstats->rx_over_errors +
4029                             nstats->rx_crc_errors +
4030                             nstats->rx_frame_errors +
4031                             nstats->rx_fifo_errors +
4032                             nstats->rx_missed_errors;
4033
4034         nstats->tx_aborted_errors =
4035                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4036                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4037         nstats->tx_carrier_errors =
4038                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4039         nstats->tx_fifo_errors = 0;
4040         nstats->tx_heartbeat_errors = 0;
4041         nstats->tx_window_errors = 0;
4042
4043         nstats->tx_errors = nstats->tx_aborted_errors +
4044                             nstats->tx_carrier_errors +
4045             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4046 }
4047
4048 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4049 {
4050         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4051         int i;
4052
4053         estats->driver_xoff = 0;
4054         estats->rx_err_discard_pkt = 0;
4055         estats->rx_skb_alloc_failed = 0;
4056         estats->hw_csum_err = 0;
4057         for_each_rx_queue(bp, i) {
4058                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4059
4060                 estats->driver_xoff += qstats->driver_xoff;
4061                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4062                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4063                 estats->hw_csum_err += qstats->hw_csum_err;
4064         }
4065 }
4066
4067 static void bnx2x_stats_update(struct bnx2x *bp)
4068 {
4069         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4070
4071         if (*stats_comp != DMAE_COMP_VAL)
4072                 return;
4073
4074         if (bp->port.pmf)
4075                 bnx2x_hw_stats_update(bp);
4076
4077         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4078                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4079                 bnx2x_panic();
4080                 return;
4081         }
4082
4083         bnx2x_net_stats_update(bp);
4084         bnx2x_drv_stats_update(bp);
4085
4086         if (bp->msglevel & NETIF_MSG_TIMER) {
4087                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4088                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4089                 struct tstorm_per_client_stats *old_tclient =
4090                                                         &bp->fp->old_tclient;
4091                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4092                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4093                 struct net_device_stats *nstats = &bp->dev->stats;
4094                 int i;
4095
4096                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4097                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4098                                   "  tx pkt (%lx)\n",
4099                        bnx2x_tx_avail(fp0_tx),
4100                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4101                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4102                                   "  rx pkt (%lx)\n",
4103                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4104                              fp0_rx->rx_comp_cons),
4105                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4106                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4107                                   "brb truncate %u\n",
4108                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4109                        qstats->driver_xoff,
4110                        estats->brb_drop_lo, estats->brb_truncate_lo);
4111                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4112                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4113                         "mac_discard %u  mac_filter_discard %u  "
4114                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4115                         "ttl0_discard %u\n",
4116                        le32_to_cpu(old_tclient->checksum_discard),
4117                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4118                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4119                        estats->mac_discard, estats->mac_filter_discard,
4120                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4121                        le32_to_cpu(old_tclient->ttl0_discard));
4122
4123                 for_each_queue(bp, i) {
4124                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4125                                bnx2x_fp(bp, i, tx_pkt),
4126                                bnx2x_fp(bp, i, rx_pkt),
4127                                bnx2x_fp(bp, i, rx_calls));
4128                 }
4129         }
4130
4131         bnx2x_hw_stats_post(bp);
4132         bnx2x_storm_stats_post(bp);
4133 }
4134
4135 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4136 {
4137         struct dmae_command *dmae;
4138         u32 opcode;
4139         int loader_idx = PMF_DMAE_C(bp);
4140         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4141
4142         bp->executer_idx = 0;
4143
4144         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4145                   DMAE_CMD_C_ENABLE |
4146                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4147 #ifdef __BIG_ENDIAN
4148                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4149 #else
4150                   DMAE_CMD_ENDIANITY_DW_SWAP |
4151 #endif
4152                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4153                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4154
4155         if (bp->port.port_stx) {
4156
4157                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4158                 if (bp->func_stx)
4159                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4160                 else
4161                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4162                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4163                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4164                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4165                 dmae->dst_addr_hi = 0;
4166                 dmae->len = sizeof(struct host_port_stats) >> 2;
4167                 if (bp->func_stx) {
4168                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4169                         dmae->comp_addr_hi = 0;
4170                         dmae->comp_val = 1;
4171                 } else {
4172                         dmae->comp_addr_lo =
4173                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4174                         dmae->comp_addr_hi =
4175                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4176                         dmae->comp_val = DMAE_COMP_VAL;
4177
4178                         *stats_comp = 0;
4179                 }
4180         }
4181
4182         if (bp->func_stx) {
4183
4184                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4185                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4186                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4187                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4188                 dmae->dst_addr_lo = bp->func_stx >> 2;
4189                 dmae->dst_addr_hi = 0;
4190                 dmae->len = sizeof(struct host_func_stats) >> 2;
4191                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4192                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4193                 dmae->comp_val = DMAE_COMP_VAL;
4194
4195                 *stats_comp = 0;
4196         }
4197 }
4198
4199 static void bnx2x_stats_stop(struct bnx2x *bp)
4200 {
4201         int update = 0;
4202
4203         bnx2x_stats_comp(bp);
4204
4205         if (bp->port.pmf)
4206                 update = (bnx2x_hw_stats_update(bp) == 0);
4207
4208         update |= (bnx2x_storm_stats_update(bp) == 0);
4209
4210         if (update) {
4211                 bnx2x_net_stats_update(bp);
4212
4213                 if (bp->port.pmf)
4214                         bnx2x_port_stats_stop(bp);
4215
4216                 bnx2x_hw_stats_post(bp);
4217                 bnx2x_stats_comp(bp);
4218         }
4219 }
4220
4221 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4222 {
4223 }
4224
4225 static const struct {
4226         void (*action)(struct bnx2x *bp);
4227         enum bnx2x_stats_state next_state;
4228 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4229 /* state        event   */
4230 {
4231 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4232 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4233 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4234 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4235 },
4236 {
4237 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4238 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4239 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4240 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4241 }
4242 };
4243
4244 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4245 {
4246         enum bnx2x_stats_state state = bp->stats_state;
4247
4248         bnx2x_stats_stm[state][event].action(bp);
4249         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4250
4251         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4252                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4253                    state, event, bp->stats_state);
4254 }
4255
4256 static void bnx2x_timer(unsigned long data)
4257 {
4258         struct bnx2x *bp = (struct bnx2x *) data;
4259
4260         if (!netif_running(bp->dev))
4261                 return;
4262
4263         if (atomic_read(&bp->intr_sem) != 0)
4264                 goto timer_restart;
4265
4266         if (poll) {
4267                 struct bnx2x_fastpath *fp = &bp->fp[0];
4268                 int rc;
4269
4270                 bnx2x_tx_int(fp);
4271                 rc = bnx2x_rx_int(fp, 1000);
4272         }
4273
4274         if (!BP_NOMCP(bp)) {
4275                 int func = BP_FUNC(bp);
4276                 u32 drv_pulse;
4277                 u32 mcp_pulse;
4278
4279                 ++bp->fw_drv_pulse_wr_seq;
4280                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4281                 /* TBD - add SYSTEM_TIME */
4282                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4283                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4284
4285                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4286                              MCP_PULSE_SEQ_MASK);
4287                 /* The delta between driver pulse and mcp response
4288                  * should be 1 (before mcp response) or 0 (after mcp response)
4289                  */
4290                 if ((drv_pulse != mcp_pulse) &&
4291                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4292                         /* someone lost a heartbeat... */
4293                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4294                                   drv_pulse, mcp_pulse);
4295                 }
4296         }
4297
4298         if ((bp->state == BNX2X_STATE_OPEN) ||
4299             (bp->state == BNX2X_STATE_DISABLED))
4300                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4301
4302 timer_restart:
4303         mod_timer(&bp->timer, jiffies + bp->current_interval);
4304 }
4305
4306 /* end of Statistics */
4307
4308 /* nic init */
4309
4310 /*
4311  * nic init service functions
4312  */
4313
4314 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4315 {
4316         int port = BP_PORT(bp);
4317
4318         /* "CSTORM" */
4319         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4320                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4321                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4322         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4323                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4324                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4325 }
4326
4327 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4328                           dma_addr_t mapping, int sb_id)
4329 {
4330         int port = BP_PORT(bp);
4331         int func = BP_FUNC(bp);
4332         int index;
4333         u64 section;
4334
4335         /* USTORM */
4336         section = ((u64)mapping) + offsetof(struct host_status_block,
4337                                             u_status_block);
4338         sb->u_status_block.status_block_id = sb_id;
4339
4340         REG_WR(bp, BAR_CSTRORM_INTMEM +
4341                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4342         REG_WR(bp, BAR_CSTRORM_INTMEM +
4343                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4344                U64_HI(section));
4345         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4346                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4347
4348         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4349                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4350                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4351
4352         /* CSTORM */
4353         section = ((u64)mapping) + offsetof(struct host_status_block,
4354                                             c_status_block);
4355         sb->c_status_block.status_block_id = sb_id;
4356
4357         REG_WR(bp, BAR_CSTRORM_INTMEM +
4358                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4359         REG_WR(bp, BAR_CSTRORM_INTMEM +
4360                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4361                U64_HI(section));
4362         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4363                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4364
4365         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4366                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4367                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4368
4369         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4370 }
4371
4372 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4373 {
4374         int func = BP_FUNC(bp);
4375
4376         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4377                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4378                         sizeof(struct tstorm_def_status_block)/4);
4379         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4380                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4381                         sizeof(struct cstorm_def_status_block_u)/4);
4382         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4383                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4384                         sizeof(struct cstorm_def_status_block_c)/4);
4385         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4386                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4387                         sizeof(struct xstorm_def_status_block)/4);
4388 }
4389
4390 static void bnx2x_init_def_sb(struct bnx2x *bp,
4391                               struct host_def_status_block *def_sb,
4392                               dma_addr_t mapping, int sb_id)
4393 {
4394         int port = BP_PORT(bp);
4395         int func = BP_FUNC(bp);
4396         int index, val, reg_offset;
4397         u64 section;
4398
4399         /* ATTN */
4400         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401                                             atten_status_block);
4402         def_sb->atten_status_block.status_block_id = sb_id;
4403
4404         bp->attn_state = 0;
4405
4406         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4407                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4408
4409         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4410                 bp->attn_group[index].sig[0] = REG_RD(bp,
4411                                                      reg_offset + 0x10*index);
4412                 bp->attn_group[index].sig[1] = REG_RD(bp,
4413                                                reg_offset + 0x4 + 0x10*index);
4414                 bp->attn_group[index].sig[2] = REG_RD(bp,
4415                                                reg_offset + 0x8 + 0x10*index);
4416                 bp->attn_group[index].sig[3] = REG_RD(bp,
4417                                                reg_offset + 0xc + 0x10*index);
4418         }
4419
4420         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4421                              HC_REG_ATTN_MSG0_ADDR_L);
4422
4423         REG_WR(bp, reg_offset, U64_LO(section));
4424         REG_WR(bp, reg_offset + 4, U64_HI(section));
4425
4426         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4427
4428         val = REG_RD(bp, reg_offset);
4429         val |= sb_id;
4430         REG_WR(bp, reg_offset, val);
4431
4432         /* USTORM */
4433         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4434                                             u_def_status_block);
4435         def_sb->u_def_status_block.status_block_id = sb_id;
4436
4437         REG_WR(bp, BAR_CSTRORM_INTMEM +
4438                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4439         REG_WR(bp, BAR_CSTRORM_INTMEM +
4440                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4441                U64_HI(section));
4442         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4443                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4444
4445         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4446                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4447                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4448
4449         /* CSTORM */
4450         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4451                                             c_def_status_block);
4452         def_sb->c_def_status_block.status_block_id = sb_id;
4453
4454         REG_WR(bp, BAR_CSTRORM_INTMEM +
4455                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4456         REG_WR(bp, BAR_CSTRORM_INTMEM +
4457                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4458                U64_HI(section));
4459         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4460                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4461
4462         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4463                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4464                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4465
4466         /* TSTORM */
4467         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4468                                             t_def_status_block);
4469         def_sb->t_def_status_block.status_block_id = sb_id;
4470
4471         REG_WR(bp, BAR_TSTRORM_INTMEM +
4472                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4473         REG_WR(bp, BAR_TSTRORM_INTMEM +
4474                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4475                U64_HI(section));
4476         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4477                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4478
4479         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4480                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4481                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4482
4483         /* XSTORM */
4484         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4485                                             x_def_status_block);
4486         def_sb->x_def_status_block.status_block_id = sb_id;
4487
4488         REG_WR(bp, BAR_XSTRORM_INTMEM +
4489                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4490         REG_WR(bp, BAR_XSTRORM_INTMEM +
4491                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4492                U64_HI(section));
4493         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4494                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4495
4496         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4497                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4498                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4499
4500         bp->stats_pending = 0;
4501         bp->set_mac_pending = 0;
4502
4503         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4504 }
4505
4506 static void bnx2x_update_coalesce(struct bnx2x *bp)
4507 {
4508         int port = BP_PORT(bp);
4509         int i;
4510
4511         for_each_queue(bp, i) {
4512                 int sb_id = bp->fp[i].sb_id;
4513
4514                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4515                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4516                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4517                                                       U_SB_ETH_RX_CQ_INDEX),
4518                         bp->rx_ticks/12);
4519                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4520                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4521                                                        U_SB_ETH_RX_CQ_INDEX),
4522                          (bp->rx_ticks/12) ? 0 : 1);
4523
4524                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4525                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4526                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4527                                                       C_SB_ETH_TX_CQ_INDEX),
4528                         bp->tx_ticks/12);
4529                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4530                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4531                                                        C_SB_ETH_TX_CQ_INDEX),
4532                          (bp->tx_ticks/12) ? 0 : 1);
4533         }
4534 }
4535
4536 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4537                                        struct bnx2x_fastpath *fp, int last)
4538 {
4539         int i;
4540
4541         for (i = 0; i < last; i++) {
4542                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4543                 struct sk_buff *skb = rx_buf->skb;
4544
4545                 if (skb == NULL) {
4546                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4547                         continue;
4548                 }
4549
4550                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4551                         pci_unmap_single(bp->pdev,
4552                                          pci_unmap_addr(rx_buf, mapping),
4553                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4554
4555                 dev_kfree_skb(skb);
4556                 rx_buf->skb = NULL;
4557         }
4558 }
4559
4560 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4561 {
4562         int func = BP_FUNC(bp);
4563         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4564                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4565         u16 ring_prod, cqe_ring_prod;
4566         int i, j;
4567
4568         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4569         DP(NETIF_MSG_IFUP,
4570            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4571
4572         if (bp->flags & TPA_ENABLE_FLAG) {
4573
4574                 for_each_rx_queue(bp, j) {
4575                         struct bnx2x_fastpath *fp = &bp->fp[j];
4576
4577                         for (i = 0; i < max_agg_queues; i++) {
4578                                 fp->tpa_pool[i].skb =
4579                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4580                                 if (!fp->tpa_pool[i].skb) {
4581                                         BNX2X_ERR("Failed to allocate TPA "
4582                                                   "skb pool for queue[%d] - "
4583                                                   "disabling TPA on this "
4584                                                   "queue!\n", j);
4585                                         bnx2x_free_tpa_pool(bp, fp, i);
4586                                         fp->disable_tpa = 1;
4587                                         break;
4588                                 }
4589                                 pci_unmap_addr_set((struct sw_rx_bd *)
4590                                                         &bp->fp->tpa_pool[i],
4591                                                    mapping, 0);
4592                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4593                         }
4594                 }
4595         }
4596
4597         for_each_rx_queue(bp, j) {
4598                 struct bnx2x_fastpath *fp = &bp->fp[j];
4599
4600                 fp->rx_bd_cons = 0;
4601                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4602                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4603
4604                 /* Mark queue as Rx */
4605                 fp->is_rx_queue = 1;
4606
4607                 /* "next page" elements initialization */
4608                 /* SGE ring */
4609                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4610                         struct eth_rx_sge *sge;
4611
4612                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4613                         sge->addr_hi =
4614                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4615                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4616                         sge->addr_lo =
4617                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4618                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4619                 }
4620
4621                 bnx2x_init_sge_ring_bit_mask(fp);
4622
4623                 /* RX BD ring */
4624                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4625                         struct eth_rx_bd *rx_bd;
4626
4627                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4628                         rx_bd->addr_hi =
4629                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4630                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4631                         rx_bd->addr_lo =
4632                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4633                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4634                 }
4635
4636                 /* CQ ring */
4637                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4638                         struct eth_rx_cqe_next_page *nextpg;
4639
4640                         nextpg = (struct eth_rx_cqe_next_page *)
4641                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4642                         nextpg->addr_hi =
4643                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4644                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4645                         nextpg->addr_lo =
4646                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4647                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4648                 }
4649
4650                 /* Allocate SGEs and initialize the ring elements */
4651                 for (i = 0, ring_prod = 0;
4652                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4653
4654                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4655                                 BNX2X_ERR("was only able to allocate "
4656                                           "%d rx sges\n", i);
4657                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4658                                 /* Cleanup already allocated elements */
4659                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4660                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4661                                 fp->disable_tpa = 1;
4662                                 ring_prod = 0;
4663                                 break;
4664                         }
4665                         ring_prod = NEXT_SGE_IDX(ring_prod);
4666                 }
4667                 fp->rx_sge_prod = ring_prod;
4668
4669                 /* Allocate BDs and initialize BD ring */
4670                 fp->rx_comp_cons = 0;
4671                 cqe_ring_prod = ring_prod = 0;
4672                 for (i = 0; i < bp->rx_ring_size; i++) {
4673                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4674                                 BNX2X_ERR("was only able to allocate "
4675                                           "%d rx skbs on queue[%d]\n", i, j);
4676                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4677                                 break;
4678                         }
4679                         ring_prod = NEXT_RX_IDX(ring_prod);
4680                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4681                         WARN_ON(ring_prod <= i);
4682                 }
4683
4684                 fp->rx_bd_prod = ring_prod;
4685                 /* must not have more available CQEs than BDs */
4686                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4687                                        cqe_ring_prod);
4688                 fp->rx_pkt = fp->rx_calls = 0;
4689
4690                 /* Warning!
4691                  * this will generate an interrupt (to the TSTORM)
4692                  * must only be done after chip is initialized
4693                  */
4694                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4695                                      fp->rx_sge_prod);
4696                 if (j != 0)
4697                         continue;
4698
4699                 REG_WR(bp, BAR_USTRORM_INTMEM +
4700                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4701                        U64_LO(fp->rx_comp_mapping));
4702                 REG_WR(bp, BAR_USTRORM_INTMEM +
4703                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4704                        U64_HI(fp->rx_comp_mapping));
4705         }
4706 }
4707
4708 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4709 {
4710         int i, j;
4711
4712         for_each_tx_queue(bp, j) {
4713                 struct bnx2x_fastpath *fp = &bp->fp[j];
4714
4715                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4716                         struct eth_tx_next_bd *tx_next_bd =
4717                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4718
4719                         tx_next_bd->addr_hi =
4720                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4721                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4722                         tx_next_bd->addr_lo =
4723                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4724                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4725                 }
4726
4727                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4728                 fp->tx_db.data.zero_fill1 = 0;
4729                 fp->tx_db.data.prod = 0;
4730
4731                 fp->tx_pkt_prod = 0;
4732                 fp->tx_pkt_cons = 0;
4733                 fp->tx_bd_prod = 0;
4734                 fp->tx_bd_cons = 0;
4735                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4736                 fp->tx_pkt = 0;
4737         }
4738 }
4739
4740 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4741 {
4742         int func = BP_FUNC(bp);
4743
4744         spin_lock_init(&bp->spq_lock);
4745
4746         bp->spq_left = MAX_SPQ_PENDING;
4747         bp->spq_prod_idx = 0;
4748         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4749         bp->spq_prod_bd = bp->spq;
4750         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4751
4752         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4753                U64_LO(bp->spq_mapping));
4754         REG_WR(bp,
4755                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4756                U64_HI(bp->spq_mapping));
4757
4758         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4759                bp->spq_prod_idx);
4760 }
4761
4762 static void bnx2x_init_context(struct bnx2x *bp)
4763 {
4764         int i;
4765
4766         for_each_rx_queue(bp, i) {
4767                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4768                 struct bnx2x_fastpath *fp = &bp->fp[i];
4769                 u8 cl_id = fp->cl_id;
4770
4771                 context->ustorm_st_context.common.sb_index_numbers =
4772                                                 BNX2X_RX_SB_INDEX_NUM;
4773                 context->ustorm_st_context.common.clientId = cl_id;
4774                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4775                 context->ustorm_st_context.common.flags =
4776                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4777                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4778                 context->ustorm_st_context.common.statistics_counter_id =
4779                                                 cl_id;
4780                 context->ustorm_st_context.common.mc_alignment_log_size =
4781                                                 BNX2X_RX_ALIGN_SHIFT;
4782                 context->ustorm_st_context.common.bd_buff_size =
4783                                                 bp->rx_buf_size;
4784                 context->ustorm_st_context.common.bd_page_base_hi =
4785                                                 U64_HI(fp->rx_desc_mapping);
4786                 context->ustorm_st_context.common.bd_page_base_lo =
4787                                                 U64_LO(fp->rx_desc_mapping);
4788                 if (!fp->disable_tpa) {
4789                         context->ustorm_st_context.common.flags |=
4790                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4791                         context->ustorm_st_context.common.sge_buff_size =
4792                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4793                                          (u32)0xffff);
4794                         context->ustorm_st_context.common.sge_page_base_hi =
4795                                                 U64_HI(fp->rx_sge_mapping);
4796                         context->ustorm_st_context.common.sge_page_base_lo =
4797                                                 U64_LO(fp->rx_sge_mapping);
4798
4799                         context->ustorm_st_context.common.max_sges_for_packet =
4800                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4801                         context->ustorm_st_context.common.max_sges_for_packet =
4802                                 ((context->ustorm_st_context.common.
4803                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
4804                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
4805                 }
4806
4807                 context->ustorm_ag_context.cdu_usage =
4808                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4809                                                CDU_REGION_NUMBER_UCM_AG,
4810                                                ETH_CONNECTION_TYPE);
4811
4812                 context->xstorm_ag_context.cdu_reserved =
4813                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4814                                                CDU_REGION_NUMBER_XCM_AG,
4815                                                ETH_CONNECTION_TYPE);
4816         }
4817
4818         for_each_tx_queue(bp, i) {
4819                 struct bnx2x_fastpath *fp = &bp->fp[i];
4820                 struct eth_context *context =
4821                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
4822
4823                 context->cstorm_st_context.sb_index_number =
4824                                                 C_SB_ETH_TX_CQ_INDEX;
4825                 context->cstorm_st_context.status_block_id = fp->sb_id;
4826
4827                 context->xstorm_st_context.tx_bd_page_base_hi =
4828                                                 U64_HI(fp->tx_desc_mapping);
4829                 context->xstorm_st_context.tx_bd_page_base_lo =
4830                                                 U64_LO(fp->tx_desc_mapping);
4831                 context->xstorm_st_context.statistics_data = (fp->cl_id |
4832                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4833         }
4834 }
4835
4836 static void bnx2x_init_ind_table(struct bnx2x *bp)
4837 {
4838         int func = BP_FUNC(bp);
4839         int i;
4840
4841         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4842                 return;
4843
4844         DP(NETIF_MSG_IFUP,
4845            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4846         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4847                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4848                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4849                         bp->fp->cl_id + (i % bp->num_rx_queues));
4850 }
4851
4852 static void bnx2x_set_client_config(struct bnx2x *bp)
4853 {
4854         struct tstorm_eth_client_config tstorm_client = {0};
4855         int port = BP_PORT(bp);
4856         int i;
4857
4858         tstorm_client.mtu = bp->dev->mtu;
4859         tstorm_client.config_flags =
4860                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4861                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4862 #ifdef BCM_VLAN
4863         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4864                 tstorm_client.config_flags |=
4865                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4866                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4867         }
4868 #endif
4869
4870         for_each_queue(bp, i) {
4871                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4872
4873                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4874                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4875                        ((u32 *)&tstorm_client)[0]);
4876                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4877                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4878                        ((u32 *)&tstorm_client)[1]);
4879         }
4880
4881         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4882            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4883 }
4884
4885 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4886 {
4887         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4888         int mode = bp->rx_mode;
4889         int mask = (1 << BP_L_ID(bp));
4890         int func = BP_FUNC(bp);
4891         int port = BP_PORT(bp);
4892         int i;
4893         /* All but management unicast packets should pass to the host as well */
4894         u32 llh_mask =
4895                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4896                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4897                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4898                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4899
4900         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4901
4902         switch (mode) {
4903         case BNX2X_RX_MODE_NONE: /* no Rx */
4904                 tstorm_mac_filter.ucast_drop_all = mask;
4905                 tstorm_mac_filter.mcast_drop_all = mask;
4906                 tstorm_mac_filter.bcast_drop_all = mask;
4907                 break;
4908
4909         case BNX2X_RX_MODE_NORMAL:
4910                 tstorm_mac_filter.bcast_accept_all = mask;
4911                 break;
4912
4913         case BNX2X_RX_MODE_ALLMULTI:
4914                 tstorm_mac_filter.mcast_accept_all = mask;
4915                 tstorm_mac_filter.bcast_accept_all = mask;
4916                 break;
4917
4918         case BNX2X_RX_MODE_PROMISC:
4919                 tstorm_mac_filter.ucast_accept_all = mask;
4920                 tstorm_mac_filter.mcast_accept_all = mask;
4921                 tstorm_mac_filter.bcast_accept_all = mask;
4922                 /* pass management unicast packets as well */
4923                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4924                 break;
4925
4926         default:
4927                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4928                 break;
4929         }
4930
4931         REG_WR(bp,
4932                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4933                llh_mask);
4934
4935         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4936                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4937                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4938                        ((u32 *)&tstorm_mac_filter)[i]);
4939
4940 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4941                    ((u32 *)&tstorm_mac_filter)[i]); */
4942         }
4943
4944         if (mode != BNX2X_RX_MODE_NONE)
4945                 bnx2x_set_client_config(bp);
4946 }
4947
4948 static void bnx2x_init_internal_common(struct bnx2x *bp)
4949 {
4950         int i;
4951
4952         /* Zero this manually as its initialization is
4953            currently missing in the initTool */
4954         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4955                 REG_WR(bp, BAR_USTRORM_INTMEM +
4956                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4957 }
4958
4959 static void bnx2x_init_internal_port(struct bnx2x *bp)
4960 {
4961         int port = BP_PORT(bp);
4962
4963         REG_WR(bp,
4964                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4965         REG_WR(bp,
4966                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
4967         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4968         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4969 }
4970
4971 /* Calculates the sum of vn_min_rates.
4972    It's needed for further normalizing of the min_rates.
4973    Returns:
4974      sum of vn_min_rates.
4975        or
4976      0 - if all the min_rates are 0.
4977      In the later case fainess algorithm should be deactivated.
4978      If not all min_rates are zero then those that are zeroes will be set to 1.
4979  */
4980 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4981 {
4982         int all_zero = 1;
4983         int port = BP_PORT(bp);
4984         int vn;
4985
4986         bp->vn_weight_sum = 0;
4987         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4988                 int func = 2*vn + port;
4989                 u32 vn_cfg =
4990                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4991                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4992                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4993
4994                 /* Skip hidden vns */
4995                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4996                         continue;
4997
4998                 /* If min rate is zero - set it to 1 */
4999                 if (!vn_min_rate)
5000                         vn_min_rate = DEF_MIN_RATE;
5001                 else
5002                         all_zero = 0;
5003
5004                 bp->vn_weight_sum += vn_min_rate;
5005         }
5006
5007         /* ... only if all min rates are zeros - disable fairness */
5008         if (all_zero)
5009                 bp->vn_weight_sum = 0;
5010 }
5011
5012 static void bnx2x_init_internal_func(struct bnx2x *bp)
5013 {
5014         struct tstorm_eth_function_common_config tstorm_config = {0};
5015         struct stats_indication_flags stats_flags = {0};
5016         int port = BP_PORT(bp);
5017         int func = BP_FUNC(bp);
5018         int i, j;
5019         u32 offset;
5020         u16 max_agg_size;
5021
5022         if (is_multi(bp)) {
5023                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5024                 tstorm_config.rss_result_mask = MULTI_MASK;
5025         }
5026
5027         /* Enable TPA if needed */
5028         if (bp->flags & TPA_ENABLE_FLAG)
5029                 tstorm_config.config_flags |=
5030                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5031
5032         if (IS_E1HMF(bp))
5033                 tstorm_config.config_flags |=
5034                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5035
5036         tstorm_config.leading_client_id = BP_L_ID(bp);
5037
5038         REG_WR(bp, BAR_TSTRORM_INTMEM +
5039                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5040                (*(u32 *)&tstorm_config));
5041
5042         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5043         bnx2x_set_storm_rx_mode(bp);
5044
5045         for_each_queue(bp, i) {
5046                 u8 cl_id = bp->fp[i].cl_id;
5047
5048                 /* reset xstorm per client statistics */
5049                 offset = BAR_XSTRORM_INTMEM +
5050                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5051                 for (j = 0;
5052                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5053                         REG_WR(bp, offset + j*4, 0);
5054
5055                 /* reset tstorm per client statistics */
5056                 offset = BAR_TSTRORM_INTMEM +
5057                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5058                 for (j = 0;
5059                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5060                         REG_WR(bp, offset + j*4, 0);
5061
5062                 /* reset ustorm per client statistics */
5063                 offset = BAR_USTRORM_INTMEM +
5064                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5065                 for (j = 0;
5066                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5067                         REG_WR(bp, offset + j*4, 0);
5068         }
5069
5070         /* Init statistics related context */
5071         stats_flags.collect_eth = 1;
5072
5073         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5074                ((u32 *)&stats_flags)[0]);
5075         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5076                ((u32 *)&stats_flags)[1]);
5077
5078         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5079                ((u32 *)&stats_flags)[0]);
5080         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5081                ((u32 *)&stats_flags)[1]);
5082
5083         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5084                ((u32 *)&stats_flags)[0]);
5085         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5086                ((u32 *)&stats_flags)[1]);
5087
5088         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5089                ((u32 *)&stats_flags)[0]);
5090         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5091                ((u32 *)&stats_flags)[1]);
5092
5093         REG_WR(bp, BAR_XSTRORM_INTMEM +
5094                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5095                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5096         REG_WR(bp, BAR_XSTRORM_INTMEM +
5097                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5098                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5099
5100         REG_WR(bp, BAR_TSTRORM_INTMEM +
5101                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5102                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5103         REG_WR(bp, BAR_TSTRORM_INTMEM +
5104                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5105                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5106
5107         REG_WR(bp, BAR_USTRORM_INTMEM +
5108                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5109                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5110         REG_WR(bp, BAR_USTRORM_INTMEM +
5111                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5112                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5113
5114         if (CHIP_IS_E1H(bp)) {
5115                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5116                         IS_E1HMF(bp));
5117                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5118                         IS_E1HMF(bp));
5119                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5120                         IS_E1HMF(bp));
5121                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5122                         IS_E1HMF(bp));
5123
5124                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5125                          bp->e1hov);
5126         }
5127
5128         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5129         max_agg_size =
5130                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5131                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5132                     (u32)0xffff);
5133         for_each_rx_queue(bp, i) {
5134                 struct bnx2x_fastpath *fp = &bp->fp[i];
5135
5136                 REG_WR(bp, BAR_USTRORM_INTMEM +
5137                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5138                        U64_LO(fp->rx_comp_mapping));
5139                 REG_WR(bp, BAR_USTRORM_INTMEM +
5140                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5141                        U64_HI(fp->rx_comp_mapping));
5142
5143                 /* Next page */
5144                 REG_WR(bp, BAR_USTRORM_INTMEM +
5145                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5146                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5147                 REG_WR(bp, BAR_USTRORM_INTMEM +
5148                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5149                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5150
5151                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5152                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5153                          max_agg_size);
5154         }
5155
5156         /* dropless flow control */
5157         if (CHIP_IS_E1H(bp)) {
5158                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5159
5160                 rx_pause.bd_thr_low = 250;
5161                 rx_pause.cqe_thr_low = 250;
5162                 rx_pause.cos = 1;
5163                 rx_pause.sge_thr_low = 0;
5164                 rx_pause.bd_thr_high = 350;
5165                 rx_pause.cqe_thr_high = 350;
5166                 rx_pause.sge_thr_high = 0;
5167
5168                 for_each_rx_queue(bp, i) {
5169                         struct bnx2x_fastpath *fp = &bp->fp[i];
5170
5171                         if (!fp->disable_tpa) {
5172                                 rx_pause.sge_thr_low = 150;
5173                                 rx_pause.sge_thr_high = 250;
5174                         }
5175
5176
5177                         offset = BAR_USTRORM_INTMEM +
5178                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5179                                                                    fp->cl_id);
5180                         for (j = 0;
5181                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5182                              j++)
5183                                 REG_WR(bp, offset + j*4,
5184                                        ((u32 *)&rx_pause)[j]);
5185                 }
5186         }
5187
5188         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5189
5190         /* Init rate shaping and fairness contexts */
5191         if (IS_E1HMF(bp)) {
5192                 int vn;
5193
5194                 /* During init there is no active link
5195                    Until link is up, set link rate to 10Gbps */
5196                 bp->link_vars.line_speed = SPEED_10000;
5197                 bnx2x_init_port_minmax(bp);
5198
5199                 bnx2x_calc_vn_weight_sum(bp);
5200
5201                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5202                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5203
5204                 /* Enable rate shaping and fairness */
5205                 bp->cmng.flags.cmng_enables =
5206                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5207                 if (bp->vn_weight_sum)
5208                         bp->cmng.flags.cmng_enables |=
5209                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5210                 else
5211                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5212                            "  fairness will be disabled\n");
5213         } else {
5214                 /* rate shaping and fairness are disabled */
5215                 DP(NETIF_MSG_IFUP,
5216                    "single function mode  minmax will be disabled\n");
5217         }
5218
5219
5220         /* Store it to internal memory */
5221         if (bp->port.pmf)
5222                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5223                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5224                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5225                                ((u32 *)(&bp->cmng))[i]);
5226 }
5227
5228 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5229 {
5230         switch (load_code) {
5231         case FW_MSG_CODE_DRV_LOAD_COMMON:
5232                 bnx2x_init_internal_common(bp);
5233                 /* no break */
5234
5235         case FW_MSG_CODE_DRV_LOAD_PORT:
5236                 bnx2x_init_internal_port(bp);
5237                 /* no break */
5238
5239         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5240                 bnx2x_init_internal_func(bp);
5241                 break;
5242
5243         default:
5244                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5245                 break;
5246         }
5247 }
5248
5249 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5250 {
5251         int i;
5252
5253         for_each_queue(bp, i) {
5254                 struct bnx2x_fastpath *fp = &bp->fp[i];
5255
5256                 fp->bp = bp;
5257                 fp->state = BNX2X_FP_STATE_CLOSED;
5258                 fp->index = i;
5259                 fp->cl_id = BP_L_ID(bp) + i;
5260                 fp->sb_id = fp->cl_id;
5261                 /* Suitable Rx and Tx SBs are served by the same client */
5262                 if (i >= bp->num_rx_queues)
5263                         fp->cl_id -= bp->num_rx_queues;
5264                 DP(NETIF_MSG_IFUP,
5265                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5266                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5267                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5268                               fp->sb_id);
5269                 bnx2x_update_fpsb_idx(fp);
5270         }
5271
5272         /* ensure status block indices were read */
5273         rmb();
5274
5275
5276         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5277                           DEF_SB_ID);
5278         bnx2x_update_dsb_idx(bp);
5279         bnx2x_update_coalesce(bp);
5280         bnx2x_init_rx_rings(bp);
5281         bnx2x_init_tx_ring(bp);
5282         bnx2x_init_sp_ring(bp);
5283         bnx2x_init_context(bp);
5284         bnx2x_init_internal(bp, load_code);
5285         bnx2x_init_ind_table(bp);
5286         bnx2x_stats_init(bp);
5287
5288         /* At this point, we are ready for interrupts */
5289         atomic_set(&bp->intr_sem, 0);
5290
5291         /* flush all before enabling interrupts */
5292         mb();
5293         mmiowb();
5294
5295         bnx2x_int_enable(bp);
5296
5297         /* Check for SPIO5 */
5298         bnx2x_attn_int_deasserted0(bp,
5299                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5300                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5301 }
5302
5303 /* end of nic init */
5304
5305 /*
5306  * gzip service functions
5307  */
5308
5309 static int bnx2x_gunzip_init(struct bnx2x *bp)
5310 {
5311         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5312                                               &bp->gunzip_mapping);
5313         if (bp->gunzip_buf  == NULL)
5314                 goto gunzip_nomem1;
5315
5316         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5317         if (bp->strm  == NULL)
5318                 goto gunzip_nomem2;
5319
5320         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5321                                       GFP_KERNEL);
5322         if (bp->strm->workspace == NULL)
5323                 goto gunzip_nomem3;
5324
5325         return 0;
5326
5327 gunzip_nomem3:
5328         kfree(bp->strm);
5329         bp->strm = NULL;
5330
5331 gunzip_nomem2:
5332         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5333                             bp->gunzip_mapping);
5334         bp->gunzip_buf = NULL;
5335
5336 gunzip_nomem1:
5337         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5338                " un-compression\n", bp->dev->name);
5339         return -ENOMEM;
5340 }
5341
5342 static void bnx2x_gunzip_end(struct bnx2x *bp)
5343 {
5344         kfree(bp->strm->workspace);
5345
5346         kfree(bp->strm);
5347         bp->strm = NULL;
5348
5349         if (bp->gunzip_buf) {
5350                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5351                                     bp->gunzip_mapping);
5352                 bp->gunzip_buf = NULL;
5353         }
5354 }
5355
5356 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5357 {
5358         int n, rc;
5359
5360         /* check gzip header */
5361         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5362                 BNX2X_ERR("Bad gzip header\n");
5363                 return -EINVAL;
5364         }
5365
5366         n = 10;
5367
5368 #define FNAME                           0x8
5369
5370         if (zbuf[3] & FNAME)
5371                 while ((zbuf[n++] != 0) && (n < len));
5372
5373         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5374         bp->strm->avail_in = len - n;
5375         bp->strm->next_out = bp->gunzip_buf;
5376         bp->strm->avail_out = FW_BUF_SIZE;
5377
5378         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5379         if (rc != Z_OK)
5380                 return rc;
5381
5382         rc = zlib_inflate(bp->strm, Z_FINISH);
5383         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5384                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5385                        bp->dev->name, bp->strm->msg);
5386
5387         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5388         if (bp->gunzip_outlen & 0x3)
5389                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5390                                     " gunzip_outlen (%d) not aligned\n",
5391                        bp->dev->name, bp->gunzip_outlen);
5392         bp->gunzip_outlen >>= 2;
5393
5394         zlib_inflateEnd(bp->strm);
5395
5396         if (rc == Z_STREAM_END)
5397                 return 0;
5398
5399         return rc;
5400 }
5401
5402 /* nic load/unload */
5403
5404 /*
5405  * General service functions
5406  */
5407
5408 /* send a NIG loopback debug packet */
5409 static void bnx2x_lb_pckt(struct bnx2x *bp)
5410 {
5411         u32 wb_write[3];
5412
5413         /* Ethernet source and destination addresses */
5414         wb_write[0] = 0x55555555;
5415         wb_write[1] = 0x55555555;
5416         wb_write[2] = 0x20;             /* SOP */
5417         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5418
5419         /* NON-IP protocol */
5420         wb_write[0] = 0x09000000;
5421         wb_write[1] = 0x55555555;
5422         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5423         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5424 }
5425
5426 /* some of the internal memories
5427  * are not directly readable from the driver
5428  * to test them we send debug packets
5429  */
5430 static int bnx2x_int_mem_test(struct bnx2x *bp)
5431 {
5432         int factor;
5433         int count, i;
5434         u32 val = 0;
5435
5436         if (CHIP_REV_IS_FPGA(bp))
5437                 factor = 120;
5438         else if (CHIP_REV_IS_EMUL(bp))
5439                 factor = 200;
5440         else
5441                 factor = 1;
5442
5443         DP(NETIF_MSG_HW, "start part1\n");
5444
5445         /* Disable inputs of parser neighbor blocks */
5446         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5447         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5448         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5449         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5450
5451         /*  Write 0 to parser credits for CFC search request */
5452         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5453
5454         /* send Ethernet packet */
5455         bnx2x_lb_pckt(bp);
5456
5457         /* TODO do i reset NIG statistic? */
5458         /* Wait until NIG register shows 1 packet of size 0x10 */
5459         count = 1000 * factor;
5460         while (count) {
5461
5462                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5463                 val = *bnx2x_sp(bp, wb_data[0]);
5464                 if (val == 0x10)
5465                         break;
5466
5467                 msleep(10);
5468                 count--;
5469         }
5470         if (val != 0x10) {
5471                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5472                 return -1;
5473         }
5474
5475         /* Wait until PRS register shows 1 packet */
5476         count = 1000 * factor;
5477         while (count) {
5478                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5479                 if (val == 1)
5480                         break;
5481
5482                 msleep(10);
5483                 count--;
5484         }
5485         if (val != 0x1) {
5486                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5487                 return -2;
5488         }
5489
5490         /* Reset and init BRB, PRS */
5491         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5492         msleep(50);
5493         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5494         msleep(50);
5495         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5496         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5497
5498         DP(NETIF_MSG_HW, "part2\n");
5499
5500         /* Disable inputs of parser neighbor blocks */
5501         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5502         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5503         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5504         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5505
5506         /* Write 0 to parser credits for CFC search request */
5507         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5508
5509         /* send 10 Ethernet packets */
5510         for (i = 0; i < 10; i++)
5511                 bnx2x_lb_pckt(bp);
5512
5513         /* Wait until NIG register shows 10 + 1
5514            packets of size 11*0x10 = 0xb0 */
5515         count = 1000 * factor;
5516         while (count) {
5517
5518                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5519                 val = *bnx2x_sp(bp, wb_data[0]);
5520                 if (val == 0xb0)
5521                         break;
5522
5523                 msleep(10);
5524                 count--;
5525         }
5526         if (val != 0xb0) {
5527                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5528                 return -3;
5529         }
5530
5531         /* Wait until PRS register shows 2 packets */
5532         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5533         if (val != 2)
5534                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5535
5536         /* Write 1 to parser credits for CFC search request */
5537         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5538
5539         /* Wait until PRS register shows 3 packets */
5540         msleep(10 * factor);
5541         /* Wait until NIG register shows 1 packet of size 0x10 */
5542         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5543         if (val != 3)
5544                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5545
5546         /* clear NIG EOP FIFO */
5547         for (i = 0; i < 11; i++)
5548                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5549         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5550         if (val != 1) {
5551                 BNX2X_ERR("clear of NIG failed\n");
5552                 return -4;
5553         }
5554
5555         /* Reset and init BRB, PRS, NIG */
5556         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5557         msleep(50);
5558         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5559         msleep(50);
5560         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5561         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5562 #ifndef BCM_ISCSI
5563         /* set NIC mode */
5564         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5565 #endif
5566
5567         /* Enable inputs of parser neighbor blocks */
5568         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5569         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5570         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5571         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5572
5573         DP(NETIF_MSG_HW, "done\n");
5574
5575         return 0; /* OK */
5576 }
5577
5578 static void enable_blocks_attention(struct bnx2x *bp)
5579 {
5580         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5581         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5582         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5583         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5584         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5585         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5586         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5587         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5588         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5589 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5590 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5591         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5592         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5593         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5594 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5595 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5596         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5597         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5598         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5599         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5600 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5601 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5602         if (CHIP_REV_IS_FPGA(bp))
5603                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5604         else
5605                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5606         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5607         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5608         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5609 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5610 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5611         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5612         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5613 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5614         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5615 }
5616
5617
5618 static void bnx2x_reset_common(struct bnx2x *bp)
5619 {
5620         /* reset_common */
5621         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5622                0xd3ffff7f);
5623         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5624 }
5625
5626
5627 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5628 {
5629         u32 val;
5630         u8 port;
5631         u8 is_required = 0;
5632
5633         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5634               SHARED_HW_CFG_FAN_FAILURE_MASK;
5635
5636         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5637                 is_required = 1;
5638
5639         /*
5640          * The fan failure mechanism is usually related to the PHY type since
5641          * the power consumption of the board is affected by the PHY. Currently,
5642          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5643          */
5644         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5645                 for (port = PORT_0; port < PORT_MAX; port++) {
5646                         u32 phy_type =
5647                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5648                                          external_phy_config) &
5649                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5650                         is_required |=
5651                                 ((phy_type ==
5652                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5653                                  (phy_type ==
5654                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5655                                  (phy_type ==
5656                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5657                 }
5658
5659         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5660
5661         if (is_required == 0)
5662                 return;
5663
5664         /* Fan failure is indicated by SPIO 5 */
5665         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5666                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5667
5668         /* set to active low mode */
5669         val = REG_RD(bp, MISC_REG_SPIO_INT);
5670         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5671                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5672         REG_WR(bp, MISC_REG_SPIO_INT, val);
5673
5674         /* enable interrupt to signal the IGU */
5675         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5676         val |= (1 << MISC_REGISTERS_SPIO_5);
5677         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5678 }
5679
5680 static int bnx2x_init_common(struct bnx2x *bp)
5681 {
5682         u32 val, i;
5683
5684         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5685
5686         bnx2x_reset_common(bp);
5687         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5688         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5689
5690         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5691         if (CHIP_IS_E1H(bp))
5692                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5693
5694         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5695         msleep(30);
5696         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5697
5698         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5699         if (CHIP_IS_E1(bp)) {
5700                 /* enable HW interrupt from PXP on USDM overflow
5701                    bit 16 on INT_MASK_0 */
5702                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5703         }
5704
5705         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5706         bnx2x_init_pxp(bp);
5707
5708 #ifdef __BIG_ENDIAN
5709         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5710         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5711         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5712         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5713         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5714         /* make sure this value is 0 */
5715         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5716
5717 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5718         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5719         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5720         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5721         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5722 #endif
5723
5724         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5725 #ifdef BCM_ISCSI
5726         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5727         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5728         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5729 #endif
5730
5731         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5732                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5733
5734         /* let the HW do it's magic ... */
5735         msleep(100);
5736         /* finish PXP init */
5737         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5738         if (val != 1) {
5739                 BNX2X_ERR("PXP2 CFG failed\n");
5740        &n