bnx2x: Use firmware 5.2.13
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-5"
61 #define DRV_MODULE_RELDATE      "2009/11/09"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
518
519         printk(KERN_ERR PFX);
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 printk(KERN_CONT "%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 printk(KERN_CONT "%s", (char *)data);
533         }
534         printk(KERN_ERR PFX "end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* TBD need a thresh? */
961         if (unlikely(netif_tx_queue_stopped(txq))) {
962
963                 /* Need to make the tx_bd_cons update visible to start_xmit()
964                  * before checking for netif_tx_queue_stopped().  Without the
965                  * memory barrier, there is a small possibility that
966                  * start_xmit() will miss it and cause the queue to be stopped
967                  * forever.
968                  */
969                 smp_mb();
970
971                 if ((netif_tx_queue_stopped(txq)) &&
972                     (bp->state == BNX2X_STATE_OPEN) &&
973                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974                         netif_tx_wake_queue(txq);
975         }
976         return 0;
977 }
978
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
982
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984                            union eth_rx_cqe *rr_cqe)
985 {
986         struct bnx2x *bp = fp->bp;
987         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
990         DP(BNX2X_MSG_SP,
991            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
992            fp->index, cid, command, bp->state,
993            rr_cqe->ramrod_cqe.ramrod_type);
994
995         bp->spq_left++;
996
997         if (fp->index) {
998                 switch (command | fp->state) {
999                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000                                                 BNX2X_FP_STATE_OPENING):
1001                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002                            cid);
1003                         fp->state = BNX2X_FP_STATE_OPEN;
1004                         break;
1005
1006                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008                            cid);
1009                         fp->state = BNX2X_FP_STATE_HALTED;
1010                         break;
1011
1012                 default:
1013                         BNX2X_ERR("unexpected MC reply (%d)  "
1014                                   "fp->state is %x\n", command, fp->state);
1015                         break;
1016                 }
1017                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018                 return;
1019         }
1020
1021         switch (command | bp->state) {
1022         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024                 bp->state = BNX2X_STATE_OPEN;
1025                 break;
1026
1027         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030                 fp->state = BNX2X_FP_STATE_HALTED;
1031                 break;
1032
1033         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036                 break;
1037
1038 #ifdef BCM_CNIC
1039         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041                 bnx2x_cnic_cfc_comp(bp, cid);
1042                 break;
1043 #endif
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054                 bp->set_mac_pending--;
1055                 smp_wmb();
1056                 break;
1057
1058         default:
1059                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1060                           command, bp->state);
1061                 break;
1062         }
1063         mb(); /* force bnx2x_wait_ramrod() to see the change */
1064 }
1065
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct page *page = sw_buf->page;
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073         /* Skip "next page" elements */
1074         if (!page)
1075                 return;
1076
1077         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079         __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081         sw_buf->page = NULL;
1082         sge->addr_hi = 0;
1083         sge->addr_lo = 0;
1084 }
1085
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087                                            struct bnx2x_fastpath *fp, int last)
1088 {
1089         int i;
1090
1091         for (i = 0; i < last; i++)
1092                 bnx2x_free_rx_sge(bp, fp, i);
1093 }
1094
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096                                      struct bnx2x_fastpath *fp, u16 index)
1097 {
1098         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101         dma_addr_t mapping;
1102
1103         if (unlikely(page == NULL))
1104                 return -ENOMEM;
1105
1106         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107                                PCI_DMA_FROMDEVICE);
1108         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110                 return -ENOMEM;
1111         }
1112
1113         sw_buf->page = page;
1114         pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119         return 0;
1120 }
1121
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123                                      struct bnx2x_fastpath *fp, u16 index)
1124 {
1125         struct sk_buff *skb;
1126         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128         dma_addr_t mapping;
1129
1130         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131         if (unlikely(skb == NULL))
1132                 return -ENOMEM;
1133
1134         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135                                  PCI_DMA_FROMDEVICE);
1136         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137                 dev_kfree_skb(skb);
1138                 return -ENOMEM;
1139         }
1140
1141         rx_buf->skb = skb;
1142         pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147         return 0;
1148 }
1149
1150 /* note that we are not allocating a new skb,
1151  * we are just moving one from cons to prod
1152  * we are not creating a new mapping,
1153  * so there is no need to check for dma_mapping_error().
1154  */
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156                                struct sk_buff *skb, u16 cons, u16 prod)
1157 {
1158         struct bnx2x *bp = fp->bp;
1159         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164         pci_dma_sync_single_for_device(bp->pdev,
1165                                        pci_unmap_addr(cons_rx_buf, mapping),
1166                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1167
1168         prod_rx_buf->skb = cons_rx_buf->skb;
1169         pci_unmap_addr_set(prod_rx_buf, mapping,
1170                            pci_unmap_addr(cons_rx_buf, mapping));
1171         *prod_bd = *cons_bd;
1172 }
1173
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175                                              u16 idx)
1176 {
1177         u16 last_max = fp->last_max_sge;
1178
1179         if (SUB_S16(idx, last_max) > 0)
1180                 fp->last_max_sge = idx;
1181 }
1182
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184 {
1185         int i, j;
1186
1187         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188                 int idx = RX_SGE_CNT * i - 1;
1189
1190                 for (j = 0; j < 2; j++) {
1191                         SGE_MASK_CLEAR_BIT(fp, idx);
1192                         idx--;
1193                 }
1194         }
1195 }
1196
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198                                   struct eth_fast_path_rx_cqe *fp_cqe)
1199 {
1200         struct bnx2x *bp = fp->bp;
1201         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1203                       SGE_PAGE_SHIFT;
1204         u16 last_max, last_elem, first_elem;
1205         u16 delta = 0;
1206         u16 i;
1207
1208         if (!sge_len)
1209                 return;
1210
1211         /* First mark all used pages */
1212         for (i = 0; i < sge_len; i++)
1213                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218         /* Here we assume that the last SGE index is the biggest */
1219         prefetch((void *)(fp->sge_mask));
1220         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222         last_max = RX_SGE(fp->last_max_sge);
1223         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226         /* If ring is not full */
1227         if (last_elem + 1 != first_elem)
1228                 last_elem++;
1229
1230         /* Now update the prod */
1231         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232                 if (likely(fp->sge_mask[i]))
1233                         break;
1234
1235                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236                 delta += RX_SGE_MASK_ELEM_SZ;
1237         }
1238
1239         if (delta > 0) {
1240                 fp->rx_sge_prod += delta;
1241                 /* clear page-end entries */
1242                 bnx2x_clear_sge_mask_next_elems(fp);
1243         }
1244
1245         DP(NETIF_MSG_RX_STATUS,
1246            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1247            fp->last_max_sge, fp->rx_sge_prod);
1248 }
1249
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251 {
1252         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253         memset(fp->sge_mask, 0xff,
1254                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
1256         /* Clear the two last indices in the page to 1:
1257            these are the indices that correspond to the "next" element,
1258            hence will never be indicated and should be removed from
1259            the calculations. */
1260         bnx2x_clear_sge_mask_next_elems(fp);
1261 }
1262
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264                             struct sk_buff *skb, u16 cons, u16 prod)
1265 {
1266         struct bnx2x *bp = fp->bp;
1267         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270         dma_addr_t mapping;
1271
1272         /* move empty skb from pool to prod and map it */
1273         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278         /* move partial skb from cons to pool (don't unmap yet) */
1279         fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281         /* mark bin state as start - print error if current state != stop */
1282         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285         fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287         /* point prod_bd to new skb */
1288         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298            fp->tpa_queue_used);
1299 #endif
1300 }
1301
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303                                struct sk_buff *skb,
1304                                struct eth_fast_path_rx_cqe *fp_cqe,
1305                                u16 cqe_idx)
1306 {
1307         struct sw_rx_page *rx_pg, old_rx_pg;
1308         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309         u32 i, frag_len, frag_size, pages;
1310         int err;
1311         int j;
1312
1313         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1315
1316         /* This is needed in order to enable forwarding support */
1317         if (frag_size)
1318                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319                                                max(frag_size, (u32)len_on_bd));
1320
1321 #ifdef BNX2X_STOP_ON_ERROR
1322         if (pages >
1323             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325                           pages, cqe_idx);
1326                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1327                           fp_cqe->pkt_len, len_on_bd);
1328                 bnx2x_panic();
1329                 return -EINVAL;
1330         }
1331 #endif
1332
1333         /* Run through the SGL and compose the fragmented skb */
1334         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337                 /* FW gives the indices of the SGE as if the ring is an array
1338                    (meaning that "next" element will consume 2 indices) */
1339                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340                 rx_pg = &fp->rx_page_ring[sge_idx];
1341                 old_rx_pg = *rx_pg;
1342
1343                 /* If we fail to allocate a substitute page, we simply stop
1344                    where we are and drop the whole packet */
1345                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346                 if (unlikely(err)) {
1347                         fp->eth_q_stats.rx_skb_alloc_failed++;
1348                         return err;
1349                 }
1350
1351                 /* Unmap the page as we r going to pass it to the stack */
1352                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1354
1355                 /* Add one frag and update the appropriate fields in the skb */
1356                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358                 skb->data_len += frag_len;
1359                 skb->truesize += frag_len;
1360                 skb->len += frag_len;
1361
1362                 frag_size -= frag_len;
1363         }
1364
1365         return 0;
1366 }
1367
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370                            u16 cqe_idx)
1371 {
1372         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373         struct sk_buff *skb = rx_buf->skb;
1374         /* alloc new skb */
1375         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377         /* Unmap skb in the pool anyway, as we are going to change
1378            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379            fails. */
1380         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1382
1383         if (likely(new_skb)) {
1384                 /* fix ip xsum and give it to the stack */
1385                 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387                 int is_vlan_cqe =
1388                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389                          PARSING_FLAGS_VLAN);
1390                 int is_not_hwaccel_vlan_cqe =
1391                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1393
1394                 prefetch(skb);
1395                 prefetch(((char *)(skb)) + 128);
1396
1397 #ifdef BNX2X_STOP_ON_ERROR
1398                 if (pad + len > bp->rx_buf_size) {
1399                         BNX2X_ERR("skb_put is about to fail...  "
1400                                   "pad %d  len %d  rx_buf_size %d\n",
1401                                   pad, len, bp->rx_buf_size);
1402                         bnx2x_panic();
1403                         return;
1404                 }
1405 #endif
1406
1407                 skb_reserve(skb, pad);
1408                 skb_put(skb, len);
1409
1410                 skb->protocol = eth_type_trans(skb, bp->dev);
1411                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413                 {
1414                         struct iphdr *iph;
1415
1416                         iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418                         /* If there is no Rx VLAN offloading -
1419                            take VLAN tag into an account */
1420                         if (unlikely(is_not_hwaccel_vlan_cqe))
1421                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423                         iph->check = 0;
1424                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425                 }
1426
1427                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428                                          &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431                             (!is_not_hwaccel_vlan_cqe))
1432                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433                                                 le16_to_cpu(cqe->fast_path_cqe.
1434                                                             vlan_tag));
1435                         else
1436 #endif
1437                                 netif_receive_skb(skb);
1438                 } else {
1439                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440                            " - dropping packet!\n");
1441                         dev_kfree_skb(skb);
1442                 }
1443
1444
1445                 /* put new skb in bin */
1446                 fp->tpa_pool[queue].skb = new_skb;
1447
1448         } else {
1449                 /* else drop the packet and keep the buffer in the bin */
1450                 DP(NETIF_MSG_RX_STATUS,
1451                    "Failed to allocate new skb - dropping packet!\n");
1452                 fp->eth_q_stats.rx_skb_alloc_failed++;
1453         }
1454
1455         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456 }
1457
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459                                         struct bnx2x_fastpath *fp,
1460                                         u16 bd_prod, u16 rx_comp_prod,
1461                                         u16 rx_sge_prod)
1462 {
1463         struct ustorm_eth_rx_producers rx_prods = {0};
1464         int i;
1465
1466         /* Update producers */
1467         rx_prods.bd_prod = bd_prod;
1468         rx_prods.cqe_prod = rx_comp_prod;
1469         rx_prods.sge_prod = rx_sge_prod;
1470
1471         /*
1472          * Make sure that the BD and SGE data is updated before updating the
1473          * producers since FW might read the BD/SGE right after the producer
1474          * is updated.
1475          * This is only applicable for weak-ordered memory model archs such
1476          * as IA-64. The following barrier is also mandatory since FW will
1477          * assumes BDs must have buffers.
1478          */
1479         wmb();
1480
1481         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482                 REG_WR(bp, BAR_USTRORM_INTMEM +
1483                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484                        ((u32 *)&rx_prods)[i]);
1485
1486         mmiowb(); /* keep prod updates ordered */
1487
1488         DP(NETIF_MSG_RX_STATUS,
1489            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1490            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1491 }
1492
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494 {
1495         struct bnx2x *bp = fp->bp;
1496         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498         int rx_pkt = 0;
1499
1500 #ifdef BNX2X_STOP_ON_ERROR
1501         if (unlikely(bp->panic))
1502                 return 0;
1503 #endif
1504
1505         /* CQ "next element" is of the size of the regular element,
1506            that's why it's ok here */
1507         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509                 hw_comp_cons++;
1510
1511         bd_cons = fp->rx_bd_cons;
1512         bd_prod = fp->rx_bd_prod;
1513         bd_prod_fw = bd_prod;
1514         sw_comp_cons = fp->rx_comp_cons;
1515         sw_comp_prod = fp->rx_comp_prod;
1516
1517         /* Memory barrier necessary as speculative reads of the rx
1518          * buffer can be ahead of the index in the status block
1519          */
1520         rmb();
1521
1522         DP(NETIF_MSG_RX_STATUS,
1523            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1524            fp->index, hw_comp_cons, sw_comp_cons);
1525
1526         while (sw_comp_cons != hw_comp_cons) {
1527                 struct sw_rx_bd *rx_buf = NULL;
1528                 struct sk_buff *skb;
1529                 union eth_rx_cqe *cqe;
1530                 u8 cqe_fp_flags;
1531                 u16 len, pad;
1532
1533                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534                 bd_prod = RX_BD(bd_prod);
1535                 bd_cons = RX_BD(bd_cons);
1536
1537                 /* Prefetch the page containing the BD descriptor
1538                    at producer's index. It will be needed when new skb is
1539                    allocated */
1540                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541                                              (&fp->rx_desc_ring[bd_prod])) -
1542                                   PAGE_SIZE + 1));
1543
1544                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1546
1547                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1548                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1549                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1553
1554                 /* is this a slowpath msg? */
1555                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556                         bnx2x_sp_event(fp, cqe);
1557                         goto next_cqe;
1558
1559                 /* this is an rx packet */
1560                 } else {
1561                         rx_buf = &fp->rx_buf_ring[bd_cons];
1562                         skb = rx_buf->skb;
1563                         prefetch(skb);
1564                         prefetch((u8 *)skb + 256);
1565                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566                         pad = cqe->fast_path_cqe.placement_offset;
1567
1568                         /* If CQE is marked both TPA_START and TPA_END
1569                            it is a non-TPA CQE */
1570                         if ((!fp->disable_tpa) &&
1571                             (TPA_TYPE(cqe_fp_flags) !=
1572                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1573                                 u16 queue = cqe->fast_path_cqe.queue_index;
1574
1575                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576                                         DP(NETIF_MSG_RX_STATUS,
1577                                            "calling tpa_start on queue %d\n",
1578                                            queue);
1579
1580                                         bnx2x_tpa_start(fp, queue, skb,
1581                                                         bd_cons, bd_prod);
1582                                         goto next_rx;
1583                                 }
1584
1585                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586                                         DP(NETIF_MSG_RX_STATUS,
1587                                            "calling tpa_stop on queue %d\n",
1588                                            queue);
1589
1590                                         if (!BNX2X_RX_SUM_FIX(cqe))
1591                                                 BNX2X_ERR("STOP on none TCP "
1592                                                           "data\n");
1593
1594                                         /* This is a size of the linear data
1595                                            on this skb */
1596                                         len = le16_to_cpu(cqe->fast_path_cqe.
1597                                                                 len_on_bd);
1598                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1599                                                     len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601                                         if (bp->panic)
1602                                                 return 0;
1603 #endif
1604
1605                                         bnx2x_update_sge_prod(fp,
1606                                                         &cqe->fast_path_cqe);
1607                                         goto next_cqe;
1608                                 }
1609                         }
1610
1611                         pci_dma_sync_single_for_device(bp->pdev,
1612                                         pci_unmap_addr(rx_buf, mapping),
1613                                                        pad + RX_COPY_THRESH,
1614                                                        PCI_DMA_FROMDEVICE);
1615                         prefetch(skb);
1616                         prefetch(((char *)(skb)) + 128);
1617
1618                         /* is this an error packet? */
1619                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620                                 DP(NETIF_MSG_RX_ERR,
1621                                    "ERROR  flags %x  rx packet %u\n",
1622                                    cqe_fp_flags, sw_comp_cons);
1623                                 fp->eth_q_stats.rx_err_discard_pkt++;
1624                                 goto reuse_rx;
1625                         }
1626
1627                         /* Since we don't have a jumbo ring
1628                          * copy small packets if mtu > 1500
1629                          */
1630                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631                             (len <= RX_COPY_THRESH)) {
1632                                 struct sk_buff *new_skb;
1633
1634                                 new_skb = netdev_alloc_skb(bp->dev,
1635                                                            len + pad);
1636                                 if (new_skb == NULL) {
1637                                         DP(NETIF_MSG_RX_ERR,
1638                                            "ERROR  packet dropped "
1639                                            "because of alloc failure\n");
1640                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1641                                         goto reuse_rx;
1642                                 }
1643
1644                                 /* aligned copy */
1645                                 skb_copy_from_linear_data_offset(skb, pad,
1646                                                     new_skb->data + pad, len);
1647                                 skb_reserve(new_skb, pad);
1648                                 skb_put(new_skb, len);
1649
1650                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652                                 skb = new_skb;
1653
1654                         } else
1655                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656                                 pci_unmap_single(bp->pdev,
1657                                         pci_unmap_addr(rx_buf, mapping),
1658                                                  bp->rx_buf_size,
1659                                                  PCI_DMA_FROMDEVICE);
1660                                 skb_reserve(skb, pad);
1661                                 skb_put(skb, len);
1662
1663                         } else {
1664                                 DP(NETIF_MSG_RX_ERR,
1665                                    "ERROR  packet dropped because "
1666                                    "of alloc failure\n");
1667                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670                                 goto next_rx;
1671                         }
1672
1673                         skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675                         skb->ip_summed = CHECKSUM_NONE;
1676                         if (bp->rx_csum) {
1677                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1679                                 else
1680                                         fp->eth_q_stats.hw_csum_err++;
1681                         }
1682                 }
1683
1684                 skb_record_rx_queue(skb, fp->index);
1685
1686 #ifdef BCM_VLAN
1687                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689                      PARSING_FLAGS_VLAN))
1690                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692                 else
1693 #endif
1694                         netif_receive_skb(skb);
1695
1696
1697 next_rx:
1698                 rx_buf->skb = NULL;
1699
1700                 bd_cons = NEXT_RX_IDX(bd_cons);
1701                 bd_prod = NEXT_RX_IDX(bd_prod);
1702                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703                 rx_pkt++;
1704 next_cqe:
1705                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1707
1708                 if (rx_pkt == budget)
1709                         break;
1710         } /* while */
1711
1712         fp->rx_bd_cons = bd_cons;
1713         fp->rx_bd_prod = bd_prod_fw;
1714         fp->rx_comp_cons = sw_comp_cons;
1715         fp->rx_comp_prod = sw_comp_prod;
1716
1717         /* Update producers */
1718         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719                              fp->rx_sge_prod);
1720
1721         fp->rx_pkt += rx_pkt;
1722         fp->rx_calls++;
1723
1724         return rx_pkt;
1725 }
1726
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728 {
1729         struct bnx2x_fastpath *fp = fp_cookie;
1730         struct bnx2x *bp = fp->bp;
1731
1732         /* Return here if interrupt is disabled */
1733         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735                 return IRQ_HANDLED;
1736         }
1737
1738         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739            fp->index, fp->sb_id);
1740         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1741
1742 #ifdef BNX2X_STOP_ON_ERROR
1743         if (unlikely(bp->panic))
1744                 return IRQ_HANDLED;
1745 #endif
1746
1747         /* Handle Rx and Tx according to MSI-X vector */
1748         prefetch(fp->rx_cons_sb);
1749         prefetch(fp->tx_cons_sb);
1750         prefetch(&fp->status_blk->u_status_block.status_block_index);
1751         prefetch(&fp->status_blk->c_status_block.status_block_index);
1752         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758 {
1759         struct bnx2x *bp = netdev_priv(dev_instance);
1760         u16 status = bnx2x_ack_int(bp);
1761         u16 mask;
1762         int i;
1763
1764         /* Return here if interrupt is shared and it's not for us */
1765         if (unlikely(status == 0)) {
1766                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767                 return IRQ_NONE;
1768         }
1769         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1770
1771         /* Return here if interrupt is disabled */
1772         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774                 return IRQ_HANDLED;
1775         }
1776
1777 #ifdef BNX2X_STOP_ON_ERROR
1778         if (unlikely(bp->panic))
1779                 return IRQ_HANDLED;
1780 #endif
1781
1782         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783                 struct bnx2x_fastpath *fp = &bp->fp[i];
1784
1785                 mask = 0x2 << fp->sb_id;
1786                 if (status & mask) {
1787                         /* Handle Rx and Tx according to SB id */
1788                         prefetch(fp->rx_cons_sb);
1789                         prefetch(&fp->status_blk->u_status_block.
1790                                                 status_block_index);
1791                         prefetch(fp->tx_cons_sb);
1792                         prefetch(&fp->status_blk->c_status_block.
1793                                                 status_block_index);
1794                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795                         status &= ~mask;
1796                 }
1797         }
1798
1799 #ifdef BCM_CNIC
1800         mask = 0x2 << CNIC_SB_ID(bp);
1801         if (status & (mask | 0x1)) {
1802                 struct cnic_ops *c_ops = NULL;
1803
1804                 rcu_read_lock();
1805                 c_ops = rcu_dereference(bp->cnic_ops);
1806                 if (c_ops)
1807                         c_ops->cnic_handler(bp->cnic_data, NULL);
1808                 rcu_read_unlock();
1809
1810                 status &= ~mask;
1811         }
1812 #endif
1813
1814         if (unlikely(status & 0x1)) {
1815                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816
1817                 status &= ~0x1;
1818                 if (!status)
1819                         return IRQ_HANDLED;
1820         }
1821
1822         if (status)
1823                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824                    status);
1825
1826         return IRQ_HANDLED;
1827 }
1828
1829 /* end of fast path */
1830
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1832
1833 /* Link */
1834
1835 /*
1836  * General service functions
1837  */
1838
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1840 {
1841         u32 lock_status;
1842         u32 resource_bit = (1 << resource);
1843         int func = BP_FUNC(bp);
1844         u32 hw_lock_control_reg;
1845         int cnt;
1846
1847         /* Validating that the resource is within range */
1848         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849                 DP(NETIF_MSG_HW,
1850                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852                 return -EINVAL;
1853         }
1854
1855         if (func <= 5) {
1856                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857         } else {
1858                 hw_lock_control_reg =
1859                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860         }
1861
1862         /* Validating that the resource is not already taken */
1863         lock_status = REG_RD(bp, hw_lock_control_reg);
1864         if (lock_status & resource_bit) {
1865                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1866                    lock_status, resource_bit);
1867                 return -EEXIST;
1868         }
1869
1870         /* Try for 5 second every 5ms */
1871         for (cnt = 0; cnt < 1000; cnt++) {
1872                 /* Try to acquire the lock */
1873                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874                 lock_status = REG_RD(bp, hw_lock_control_reg);
1875                 if (lock_status & resource_bit)
1876                         return 0;
1877
1878                 msleep(5);
1879         }
1880         DP(NETIF_MSG_HW, "Timeout\n");
1881         return -EAGAIN;
1882 }
1883
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1885 {
1886         u32 lock_status;
1887         u32 resource_bit = (1 << resource);
1888         int func = BP_FUNC(bp);
1889         u32 hw_lock_control_reg;
1890
1891         /* Validating that the resource is within range */
1892         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893                 DP(NETIF_MSG_HW,
1894                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896                 return -EINVAL;
1897         }
1898
1899         if (func <= 5) {
1900                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901         } else {
1902                 hw_lock_control_reg =
1903                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904         }
1905
1906         /* Validating that the resource is currently taken */
1907         lock_status = REG_RD(bp, hw_lock_control_reg);
1908         if (!(lock_status & resource_bit)) {
1909                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1910                    lock_status, resource_bit);
1911                 return -EFAULT;
1912         }
1913
1914         REG_WR(bp, hw_lock_control_reg, resource_bit);
1915         return 0;
1916 }
1917
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1920 {
1921         mutex_lock(&bp->port.phy_mutex);
1922
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925 }
1926
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1928 {
1929         if (bp->port.need_hw_lock)
1930                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1931
1932         mutex_unlock(&bp->port.phy_mutex);
1933 }
1934
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944         int value;
1945
1946         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948                 return -EINVAL;
1949         }
1950
1951         /* read GPIO value */
1952         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954         /* get the requested pin value */
1955         if ((gpio_reg & gpio_mask) == gpio_mask)
1956                 value = 1;
1957         else
1958                 value = 0;
1959
1960         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1961
1962         return value;
1963 }
1964
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1966 {
1967         /* The GPIO should be swapped if swap register is set and active */
1968         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970         int gpio_shift = gpio_num +
1971                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972         u32 gpio_mask = (1 << gpio_shift);
1973         u32 gpio_reg;
1974
1975         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977                 return -EINVAL;
1978         }
1979
1980         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981         /* read GPIO and mask except the float bits */
1982         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984         switch (mode) {
1985         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987                    gpio_num, gpio_shift);
1988                 /* clear FLOAT and set CLR */
1989                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991                 break;
1992
1993         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995                    gpio_num, gpio_shift);
1996                 /* clear FLOAT and set SET */
1997                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999                 break;
2000
2001         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003                    gpio_num, gpio_shift);
2004                 /* set FLOAT */
2005                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006                 break;
2007
2008         default:
2009                 break;
2010         }
2011
2012         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015         return 0;
2016 }
2017
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019 {
2020         /* The GPIO should be swapped if swap register is set and active */
2021         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023         int gpio_shift = gpio_num +
2024                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025         u32 gpio_mask = (1 << gpio_shift);
2026         u32 gpio_reg;
2027
2028         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030                 return -EINVAL;
2031         }
2032
2033         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034         /* read GPIO int */
2035         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037         switch (mode) {
2038         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040                                    "output low\n", gpio_num, gpio_shift);
2041                 /* clear SET and set CLR */
2042                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044                 break;
2045
2046         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048                                    "output high\n", gpio_num, gpio_shift);
2049                 /* clear CLR and set SET */
2050                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052                 break;
2053
2054         default:
2055                 break;
2056         }
2057
2058         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061         return 0;
2062 }
2063
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065 {
2066         u32 spio_mask = (1 << spio_num);
2067         u32 spio_reg;
2068
2069         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070             (spio_num > MISC_REGISTERS_SPIO_7)) {
2071                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072                 return -EINVAL;
2073         }
2074
2075         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076         /* read SPIO and mask except the float bits */
2077         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079         switch (mode) {
2080         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082                 /* clear FLOAT and set CLR */
2083                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085                 break;
2086
2087         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089                 /* clear FLOAT and set SET */
2090                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092                 break;
2093
2094         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096                 /* set FLOAT */
2097                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098                 break;
2099
2100         default:
2101                 break;
2102         }
2103
2104         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2106
2107         return 0;
2108 }
2109
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2111 {
2112         switch (bp->link_vars.ieee_fc &
2113                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116                                           ADVERTISED_Pause);
2117                 break;
2118
2119         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121                                          ADVERTISED_Pause);
2122                 break;
2123
2124         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126                 break;
2127
2128         default:
2129                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130                                           ADVERTISED_Pause);
2131                 break;
2132         }
2133 }
2134
2135 static void bnx2x_link_report(struct bnx2x *bp)
2136 {
2137         if (bp->flags & MF_FUNC_DIS) {
2138                 netif_carrier_off(bp->dev);
2139                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140                 return;
2141         }
2142
2143         if (bp->link_vars.link_up) {
2144                 u16 line_speed;
2145
2146                 if (bp->state == BNX2X_STATE_OPEN)
2147                         netif_carrier_on(bp->dev);
2148                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
2150                 line_speed = bp->link_vars.line_speed;
2151                 if (IS_E1HMF(bp)) {
2152                         u16 vn_max_rate;
2153
2154                         vn_max_rate =
2155                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157                         if (vn_max_rate < line_speed)
2158                                 line_speed = vn_max_rate;
2159                 }
2160                 printk("%d Mbps ", line_speed);
2161
2162                 if (bp->link_vars.duplex == DUPLEX_FULL)
2163                         printk("full duplex");
2164                 else
2165                         printk("half duplex");
2166
2167                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169                                 printk(", receive ");
2170                                 if (bp->link_vars.flow_ctrl &
2171                                     BNX2X_FLOW_CTRL_TX)
2172                                         printk("& transmit ");
2173                         } else {
2174                                 printk(", transmit ");
2175                         }
2176                         printk("flow control ON");
2177                 }
2178                 printk("\n");
2179
2180         } else { /* link_down */
2181                 netif_carrier_off(bp->dev);
2182                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2183         }
2184 }
2185
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2187 {
2188         if (!BP_NOMCP(bp)) {
2189                 u8 rc;
2190
2191                 /* Initialize link parameters structure variables */
2192                 /* It is recommended to turn off RX FC for jumbo frames
2193                    for better performance */
2194                 if (bp->dev->mtu > 5000)
2195                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196                 else
2197                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2198
2199                 bnx2x_acquire_phy_lock(bp);
2200
2201                 if (load_mode == LOAD_DIAG)
2202                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
2204                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2205
2206                 bnx2x_release_phy_lock(bp);
2207
2208                 bnx2x_calc_fc_adv(bp);
2209
2210                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212                         bnx2x_link_report(bp);
2213                 }
2214
2215                 return rc;
2216         }
2217         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218         return -EINVAL;
2219 }
2220
2221 static void bnx2x_link_set(struct bnx2x *bp)
2222 {
2223         if (!BP_NOMCP(bp)) {
2224                 bnx2x_acquire_phy_lock(bp);
2225                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226                 bnx2x_release_phy_lock(bp);
2227
2228                 bnx2x_calc_fc_adv(bp);
2229         } else
2230                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2231 }
2232
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238                 bnx2x_release_phy_lock(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2241 }
2242
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2244 {
2245         u8 rc;
2246
2247         bnx2x_acquire_phy_lock(bp);
2248         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249         bnx2x_release_phy_lock(bp);
2250
2251         return rc;
2252 }
2253
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2255 {
2256         u32 r_param = bp->link_vars.line_speed / 8;
2257         u32 fair_periodic_timeout_usec;
2258         u32 t_fair;
2259
2260         memset(&(bp->cmng.rs_vars), 0,
2261                sizeof(struct rate_shaping_vars_per_port));
2262         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2263
2264         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2266
2267         /* this is the threshold below which no timer arming will occur
2268            1.25 coefficient is for the threshold to be a little bigger
2269            than the real time, to compensate for timer in-accuracy */
2270         bp->cmng.rs_vars.rs_threshold =
2271                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
2273         /* resolution of fairness timer */
2274         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2277
2278         /* this is the threshold below which we won't arm the timer anymore */
2279         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2280
2281         /* we multiply by 1e3/8 to get bytes/msec.
2282            We don't want the credits to pass a credit
2283            of the t_fair*FAIR_MEM (algorithm resolution) */
2284         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285         /* since each tick is 4 usec */
2286         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2287 }
2288
2289 /* Calculates the sum of vn_min_rates.
2290    It's needed for further normalizing of the min_rates.
2291    Returns:
2292      sum of vn_min_rates.
2293        or
2294      0 - if all the min_rates are 0.
2295      In the later case fainess algorithm should be deactivated.
2296      If not all min_rates are zero then those that are zeroes will be set to 1.
2297  */
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299 {
2300         int all_zero = 1;
2301         int port = BP_PORT(bp);
2302         int vn;
2303
2304         bp->vn_weight_sum = 0;
2305         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306                 int func = 2*vn + port;
2307                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311                 /* Skip hidden vns */
2312                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313                         continue;
2314
2315                 /* If min rate is zero - set it to 1 */
2316                 if (!vn_min_rate)
2317                         vn_min_rate = DEF_MIN_RATE;
2318                 else
2319                         all_zero = 0;
2320
2321                 bp->vn_weight_sum += vn_min_rate;
2322         }
2323
2324         /* ... only if all min rates are zeros - disable fairness */
2325         if (all_zero) {
2326                 bp->cmng.flags.cmng_enables &=
2327                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329                    "  fairness will be disabled\n");
2330         } else
2331                 bp->cmng.flags.cmng_enables |=
2332                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2333 }
2334
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2336 {
2337         struct rate_shaping_vars_per_vn m_rs_vn;
2338         struct fairness_vars_per_vn m_fair_vn;
2339         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340         u16 vn_min_rate, vn_max_rate;
2341         int i;
2342
2343         /* If function is hidden - set min and max to zeroes */
2344         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345                 vn_min_rate = 0;
2346                 vn_max_rate = 0;
2347
2348         } else {
2349                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351                 /* If min rate is zero - set it to 1 */
2352                 if (!vn_min_rate)
2353                         vn_min_rate = DEF_MIN_RATE;
2354                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356         }
2357         DP(NETIF_MSG_IFUP,
2358            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2359            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2360
2361         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364         /* global vn counter - maximal Mbps for this vn */
2365         m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367         /* quota - number of bytes transmitted in this period */
2368         m_rs_vn.vn_counter.quota =
2369                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
2371         if (bp->vn_weight_sum) {
2372                 /* credit for each period of the fairness algorithm:
2373                    number of bytes in T_FAIR (the vn share the port rate).
2374                    vn_weight_sum should not be larger than 10000, thus
2375                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376                    than zero */
2377                 m_fair_vn.vn_credit_delta =
2378                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2379                                                  (8 * bp->vn_weight_sum))),
2380                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382                    m_fair_vn.vn_credit_delta);
2383         }
2384
2385         /* Store it to internal memory */
2386         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389                        ((u32 *)(&m_rs_vn))[i]);
2390
2391         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394                        ((u32 *)(&m_fair_vn))[i]);
2395 }
2396
2397
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2400 {
2401         /* Make sure that we are synced with the current statistics */
2402         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
2404         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2405
2406         if (bp->link_vars.link_up) {
2407
2408                 /* dropless flow control */
2409                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410                         int port = BP_PORT(bp);
2411                         u32 pause_enabled = 0;
2412
2413                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414                                 pause_enabled = 1;
2415
2416                         REG_WR(bp, BAR_USTRORM_INTMEM +
2417                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418                                pause_enabled);
2419                 }
2420
2421                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422                         struct host_port_stats *pstats;
2423
2424                         pstats = bnx2x_sp(bp, port_stats);
2425                         /* reset old bmac stats */
2426                         memset(&(pstats->mac_stx[0]), 0,
2427                                sizeof(struct mac_stx));
2428                 }
2429                 if (bp->state == BNX2X_STATE_OPEN)
2430                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431         }
2432
2433         /* indicate link status */
2434         bnx2x_link_report(bp);
2435
2436         if (IS_E1HMF(bp)) {
2437                 int port = BP_PORT(bp);
2438                 int func;
2439                 int vn;
2440
2441                 /* Set the attention towards other drivers on the same port */
2442                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443                         if (vn == BP_E1HVN(bp))
2444                                 continue;
2445
2446                         func = ((vn << 1) | port);
2447                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449                 }
2450
2451                 if (bp->link_vars.link_up) {
2452                         int i;
2453
2454                         /* Init rate shaping and fairness contexts */
2455                         bnx2x_init_port_minmax(bp);
2456
2457                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460                         /* Store it to internal memory */
2461                         for (i = 0;
2462                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465                                        ((u32 *)(&bp->cmng))[i]);
2466                 }
2467         }
2468 }
2469
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2471 {
2472         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473                 return;
2474
2475         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
2477         if (bp->link_vars.link_up)
2478                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479         else
2480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2482         bnx2x_calc_vn_weight_sum(bp);
2483
2484         /* indicate link status */
2485         bnx2x_link_report(bp);
2486 }
2487
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2489 {
2490         int port = BP_PORT(bp);
2491         u32 val;
2492
2493         bp->port.pmf = 1;
2494         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496         /* enable nig attention */
2497         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2500
2501         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2502 }
2503
2504 /* end of Link */
2505
2506 /* slow path */
2507
2508 /*
2509  * General service functions
2510  */
2511
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514 {
2515         int func = BP_FUNC(bp);
2516         u32 seq = ++bp->fw_seq;
2517         u32 rc = 0;
2518         u32 cnt = 1;
2519         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
2521         mutex_lock(&bp->fw_mb_mutex);
2522         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525         do {
2526                 /* let the FW do it's magic ... */
2527                 msleep(delay);
2528
2529                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
2531                 /* Give the FW up to 5 second (500*10ms) */
2532         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2533
2534         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535            cnt*delay, rc, seq);
2536
2537         /* is this a reply to our command? */
2538         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539                 rc &= FW_MSG_CODE_MASK;
2540         else {
2541                 /* FW BUG! */
2542                 BNX2X_ERR("FW failed to respond!\n");
2543                 bnx2x_fw_dump(bp);
2544                 rc = 0;
2545         }
2546         mutex_unlock(&bp->fw_mb_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558
2559         netif_tx_disable(bp->dev);
2560
2561         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2563         netif_carrier_off(bp->dev);
2564 }
2565
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2567 {
2568         int port = BP_PORT(bp);
2569
2570         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2572         /* Tx queue should be only reenabled */
2573         netif_tx_wake_all_queues(bp->dev);
2574
2575         /*
2576          * Should not call netif_carrier_on since it will be called if the link
2577          * is up when checking for link state
2578          */
2579 }
2580
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2582 {
2583         int port = BP_PORT(bp);
2584         int vn, i;
2585
2586         /* Init rate shaping and fairness contexts */
2587         bnx2x_init_port_minmax(bp);
2588
2589         bnx2x_calc_vn_weight_sum(bp);
2590
2591         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594         if (bp->port.pmf) {
2595                 int func;
2596
2597                 /* Set the attention towards other drivers on the same port */
2598                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599                         if (vn == BP_E1HVN(bp))
2600                                 continue;
2601
2602                         func = ((vn << 1) | port);
2603                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605                 }
2606
2607                 /* Store it to internal memory */
2608                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2610                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611                                ((u32 *)(&bp->cmng))[i]);
2612         }
2613 }
2614
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616 {
2617         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2618
2619         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
2621                 /*
2622                  * This is the only place besides the function initialization
2623                  * where the bp->flags can change so it is done without any
2624                  * locks
2625                  */
2626                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628                         bp->flags |= MF_FUNC_DIS;
2629
2630                         bnx2x_e1h_disable(bp);
2631                 } else {
2632                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633                         bp->flags &= ~MF_FUNC_DIS;
2634
2635                         bnx2x_e1h_enable(bp);
2636                 }
2637                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638         }
2639         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641                 bnx2x_update_min_max(bp);
2642                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643         }
2644
2645         /* Report results to MCP */
2646         if (dcc_event)
2647                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648         else
2649                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650 }
2651
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654 {
2655         struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657         if (bp->spq_prod_bd == bp->spq_last_bd) {
2658                 bp->spq_prod_bd = bp->spq;
2659                 bp->spq_prod_idx = 0;
2660                 DP(NETIF_MSG_TIMER, "end of spq\n");
2661         } else {
2662                 bp->spq_prod_bd++;
2663                 bp->spq_prod_idx++;
2664         }
2665         return next_spe;
2666 }
2667
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670 {
2671         int func = BP_FUNC(bp);
2672
2673         /* Make sure that BD data is updated before writing the producer */
2674         wmb();
2675
2676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677                bp->spq_prod_idx);
2678         mmiowb();
2679 }
2680
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683                          u32 data_hi, u32 data_lo, int common)
2684 {
2685         struct eth_spe *spe;
2686
2687         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2689            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693 #ifdef BNX2X_STOP_ON_ERROR
2694         if (unlikely(bp->panic))
2695                 return -EIO;
2696 #endif
2697
2698         spin_lock_bh(&bp->spq_lock);
2699
2700         if (!bp->spq_left) {
2701                 BNX2X_ERR("BUG! SPQ ring full!\n");
2702                 spin_unlock_bh(&bp->spq_lock);
2703                 bnx2x_panic();
2704                 return -EBUSY;
2705         }
2706
2707         spe = bnx2x_sp_get_next(bp);
2708
2709         /* CID needs port number to be encoded int it */
2710         spe->hdr.conn_and_cmd_data =
2711                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712                                      HW_CID(bp, cid)));
2713         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2714         if (common)
2715                 spe->hdr.type |=
2716                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
2718         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2720
2721         bp->spq_left--;
2722
2723         bnx2x_sp_prod_update(bp);
2724         spin_unlock_bh(&bp->spq_lock);
2725         return 0;
2726 }
2727
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2730 {
2731         u32 i, j, val;
2732         int rc = 0;
2733
2734         might_sleep();
2735         i = 100;
2736         for (j = 0; j < i*10; j++) {
2737                 val = (1UL << 31);
2738                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740                 if (val & (1L << 31))
2741                         break;
2742
2743                 msleep(5);
2744         }
2745         if (!(val & (1L << 31))) {
2746                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2747                 rc = -EBUSY;
2748         }
2749
2750         return rc;
2751 }
2752
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2755 {
2756         u32 val = 0;
2757
2758         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759 }
2760
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762 {
2763         struct host_def_status_block *def_sb = bp->def_status_blk;
2764         u16 rc = 0;
2765
2766         barrier(); /* status block is written to by the chip */
2767         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769                 rc |= 1;
2770         }
2771         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773                 rc |= 2;
2774         }
2775         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777                 rc |= 4;
2778         }
2779         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781                 rc |= 8;
2782         }
2783         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785                 rc |= 16;
2786         }
2787         return rc;
2788 }
2789
2790 /*
2791  * slow path service functions
2792  */
2793
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795 {
2796         int port = BP_PORT(bp);
2797         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798                        COMMAND_REG_ATTN_BITS_SET);
2799         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802                                        NIG_REG_MASK_INTERRUPT_PORT0;
2803         u32 aeu_mask;
2804         u32 nig_mask = 0;
2805
2806         if (bp->attn_state & asserted)
2807                 BNX2X_ERR("IGU ERROR\n");
2808
2809         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810         aeu_mask = REG_RD(bp, aeu_addr);
2811
2812         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2813            aeu_mask, asserted);
2814         aeu_mask &= ~(asserted & 0xff);
2815         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2816
2817         REG_WR(bp, aeu_addr, aeu_mask);
2818         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819
2820         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821         bp->attn_state |= asserted;
2822         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2823
2824         if (asserted & ATTN_HARD_WIRED_MASK) {
2825                 if (asserted & ATTN_NIG_FOR_FUNC) {
2826
2827                         bnx2x_acquire_phy_lock(bp);
2828
2829                         /* save nig interrupt mask */
2830                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2831                         REG_WR(bp, nig_int_mask_addr, 0);
2832
2833                         bnx2x_link_attn(bp);
2834
2835                         /* handle unicore attn? */
2836                 }
2837                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840                 if (asserted & GPIO_2_FUNC)
2841                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843                 if (asserted & GPIO_3_FUNC)
2844                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846                 if (asserted & GPIO_4_FUNC)
2847                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849                 if (port == 0) {
2850                         if (asserted & ATTN_GENERAL_ATTN_1) {
2851                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853                         }
2854                         if (asserted & ATTN_GENERAL_ATTN_2) {
2855                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857                         }
2858                         if (asserted & ATTN_GENERAL_ATTN_3) {
2859                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861                         }
2862                 } else {
2863                         if (asserted & ATTN_GENERAL_ATTN_4) {
2864                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866                         }
2867                         if (asserted & ATTN_GENERAL_ATTN_5) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_6) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874                         }
2875                 }
2876
2877         } /* if hardwired */
2878
2879         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880            asserted, hc_addr);
2881         REG_WR(bp, hc_addr, asserted);
2882
2883         /* now set back the mask */
2884         if (asserted & ATTN_NIG_FOR_FUNC) {
2885                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886                 bnx2x_release_phy_lock(bp);
2887         }
2888 }
2889
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891 {
2892         int port = BP_PORT(bp);
2893
2894         /* mark the failure */
2895         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898                  bp->link_params.ext_phy_config);
2899
2900         /* log the failure */
2901         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902                " the driver to shutdown the card to prevent permanent"
2903                " damage.  Please contact Dell Support for assistance\n",
2904                bp->dev->name);
2905 }
2906
2907 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908 {
2909         int port = BP_PORT(bp);
2910         int reg_offset;
2911         u32 val, swap_val, swap_override;
2912
2913         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2915
2916         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2917
2918                 val = REG_RD(bp, reg_offset);
2919                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920                 REG_WR(bp, reg_offset, val);
2921
2922                 BNX2X_ERR("SPIO5 hw attention\n");
2923
2924                 /* Fan failure attention */
2925                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2927                         /* Low power mode is controlled by GPIO 2 */
2928                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2929                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930                         /* The PHY reset is controlled by GPIO 1 */
2931                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933                         break;
2934
2935                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936                         /* The PHY reset is controlled by GPIO 1 */
2937                         /* fake the port number to cancel the swap done in
2938                            set_gpio() */
2939                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941                         port = (swap_val && swap_override) ^ 1;
2942                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944                         break;
2945
2946                 default:
2947                         break;
2948                 }
2949                 bnx2x_fan_failure(bp);
2950         }
2951
2952         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954                 bnx2x_acquire_phy_lock(bp);
2955                 bnx2x_handle_module_detect_int(&bp->link_params);
2956                 bnx2x_release_phy_lock(bp);
2957         }
2958
2959         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961                 val = REG_RD(bp, reg_offset);
2962                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963                 REG_WR(bp, reg_offset, val);
2964
2965                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2966                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2967                 bnx2x_panic();
2968         }
2969 }
2970
2971 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972 {
2973         u32 val;
2974
2975         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2976
2977                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979                 /* DORQ discard attention */
2980                 if (val & 0x2)
2981                         BNX2X_ERR("FATAL error from DORQ\n");
2982         }
2983
2984         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986                 int port = BP_PORT(bp);
2987                 int reg_offset;
2988
2989                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992                 val = REG_RD(bp, reg_offset);
2993                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994                 REG_WR(bp, reg_offset, val);
2995
2996                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2997                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2998                 bnx2x_panic();
2999         }
3000 }
3001
3002 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003 {
3004         u32 val;
3005
3006         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010                 /* CFC error attention */
3011                 if (val & 0x2)
3012                         BNX2X_ERR("FATAL error from CFC\n");
3013         }
3014
3015         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019                 /* RQ_USDMDP_FIFO_OVERFLOW */
3020                 if (val & 0x18000)
3021                         BNX2X_ERR("FATAL error from PXP\n");
3022         }
3023
3024         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026                 int port = BP_PORT(bp);
3027                 int reg_offset;
3028
3029                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032                 val = REG_RD(bp, reg_offset);
3033                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034                 REG_WR(bp, reg_offset, val);
3035
3036                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3037                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3038                 bnx2x_panic();
3039         }
3040 }
3041
3042 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043 {
3044         u32 val;
3045
3046         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
3048                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049                         int func = BP_FUNC(bp);
3050
3051                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052                         bp->mf_config = SHMEM_RD(bp,
3053                                            mf_cfg.func_mf_config[func].config);
3054                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3055                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3056                                 bnx2x_dcc_event(bp,
3057                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3058                         bnx2x__link_status_update(bp);
3059                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3060                                 bnx2x_pmf_update(bp);
3061
3062                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3063
3064                         BNX2X_ERR("MC assert!\n");
3065                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069                         bnx2x_panic();
3070
3071                 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073                         BNX2X_ERR("MCP assert!\n");
3074                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3075                         bnx2x_fw_dump(bp);
3076
3077                 } else
3078                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079         }
3080
3081         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3082                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083                 if (attn & BNX2X_GRC_TIMEOUT) {
3084                         val = CHIP_IS_E1H(bp) ?
3085                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087                 }
3088                 if (attn & BNX2X_GRC_RSV) {
3089                         val = CHIP_IS_E1H(bp) ?
3090                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092                 }
3093                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3094         }
3095 }
3096
3097 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098 {
3099         struct attn_route attn;
3100         struct attn_route group_mask;
3101         int port = BP_PORT(bp);
3102         int index;
3103         u32 reg_addr;
3104         u32 val;
3105         u32 aeu_mask;
3106
3107         /* need to take HW lock because MCP or other port might also
3108            try to handle this event */
3109         bnx2x_acquire_alr(bp);
3110
3111         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3115         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3117
3118         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119                 if (deasserted & (1 << index)) {
3120                         group_mask = bp->attn_group[index];
3121
3122                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123                            index, group_mask.sig[0], group_mask.sig[1],
3124                            group_mask.sig[2], group_mask.sig[3]);
3125
3126                         bnx2x_attn_int_deasserted3(bp,
3127                                         attn.sig[3] & group_mask.sig[3]);
3128                         bnx2x_attn_int_deasserted1(bp,
3129                                         attn.sig[1] & group_mask.sig[1]);
3130                         bnx2x_attn_int_deasserted2(bp,
3131                                         attn.sig[2] & group_mask.sig[2]);
3132                         bnx2x_attn_int_deasserted0(bp,
3133                                         attn.sig[0] & group_mask.sig[0]);
3134
3135                         if ((attn.sig[0] & group_mask.sig[0] &
3136                                                 HW_PRTY_ASSERT_SET_0) ||
3137                             (attn.sig[1] & group_mask.sig[1] &
3138                                                 HW_PRTY_ASSERT_SET_1) ||
3139                             (attn.sig[2] & group_mask.sig[2] &
3140                                                 HW_PRTY_ASSERT_SET_2))
3141                                 BNX2X_ERR("FATAL HW block parity attention\n");
3142                 }
3143         }
3144
3145         bnx2x_release_alr(bp);
3146
3147         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3148
3149         val = ~deasserted;
3150         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151            val, reg_addr);
3152         REG_WR(bp, reg_addr, val);
3153
3154         if (~bp->attn_state & deasserted)
3155                 BNX2X_ERR("IGU ERROR\n");
3156
3157         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
3160         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161         aeu_mask = REG_RD(bp, reg_addr);
3162
3163         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3164            aeu_mask, deasserted);
3165         aeu_mask |= (deasserted & 0xff);
3166         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168         REG_WR(bp, reg_addr, aeu_mask);
3169         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3170
3171         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172         bp->attn_state &= ~deasserted;
3173         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174 }
3175
3176 static void bnx2x_attn_int(struct bnx2x *bp)
3177 {
3178         /* read local copy of bits */
3179         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180                                                                 attn_bits);
3181         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182                                                                 attn_bits_ack);
3183         u32 attn_state = bp->attn_state;
3184
3185         /* look for changed bits */
3186         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3187         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3188
3189         DP(NETIF_MSG_HW,
3190            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3191            attn_bits, attn_ack, asserted, deasserted);
3192
3193         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3194                 BNX2X_ERR("BAD attention state\n");
3195
3196         /* handle bits that were raised */
3197         if (asserted)
3198                 bnx2x_attn_int_asserted(bp, asserted);
3199
3200         if (deasserted)
3201                 bnx2x_attn_int_deasserted(bp, deasserted);
3202 }
3203
3204 static void bnx2x_sp_task(struct work_struct *work)
3205 {
3206         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3207         u16 status;
3208
3209
3210         /* Return here if interrupt is disabled */
3211         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3212                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3213                 return;
3214         }
3215
3216         status = bnx2x_update_dsb_idx(bp);
3217 /*      if (status == 0)                                     */
3218 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3219
3220         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3221
3222         /* HW attentions */
3223         if (status & 0x1)
3224                 bnx2x_attn_int(bp);
3225
3226         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3227                      IGU_INT_NOP, 1);
3228         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229                      IGU_INT_NOP, 1);
3230         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231                      IGU_INT_NOP, 1);
3232         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233                      IGU_INT_NOP, 1);
3234         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235                      IGU_INT_ENABLE, 1);
3236
3237 }
3238
3239 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240 {
3241         struct net_device *dev = dev_instance;
3242         struct bnx2x *bp = netdev_priv(dev);
3243
3244         /* Return here if interrupt is disabled */
3245         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3246                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3247                 return IRQ_HANDLED;
3248         }
3249
3250         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3251
3252 #ifdef BNX2X_STOP_ON_ERROR
3253         if (unlikely(bp->panic))
3254                 return IRQ_HANDLED;
3255 #endif
3256
3257 #ifdef BCM_CNIC
3258         {
3259                 struct cnic_ops *c_ops;
3260
3261                 rcu_read_lock();
3262                 c_ops = rcu_dereference(bp->cnic_ops);
3263                 if (c_ops)
3264                         c_ops->cnic_handler(bp->cnic_data, NULL);
3265                 rcu_read_unlock();
3266         }
3267 #endif
3268         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3269
3270         return IRQ_HANDLED;
3271 }
3272
3273 /* end of slow path */
3274
3275 /* Statistics */
3276
3277 /****************************************************************************
3278 * Macros
3279 ****************************************************************************/
3280
3281 /* sum[hi:lo] += add[hi:lo] */
3282 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283         do { \
3284                 s_lo += a_lo; \
3285                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3286         } while (0)
3287
3288 /* difference = minuend - subtrahend */
3289 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290         do { \
3291                 if (m_lo < s_lo) { \
3292                         /* underflow */ \
3293                         d_hi = m_hi - s_hi; \
3294                         if (d_hi > 0) { \
3295                                 /* we can 'loan' 1 */ \
3296                                 d_hi--; \
3297                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3298                         } else { \
3299                                 /* m_hi <= s_hi */ \
3300                                 d_hi = 0; \
3301                                 d_lo = 0; \
3302                         } \
3303                 } else { \
3304                         /* m_lo >= s_lo */ \
3305                         if (m_hi < s_hi) { \
3306                                 d_hi = 0; \
3307                                 d_lo = 0; \
3308                         } else { \
3309                                 /* m_hi >= s_hi */ \
3310                                 d_hi = m_hi - s_hi; \
3311                                 d_lo = m_lo - s_lo; \
3312                         } \
3313                 } \
3314         } while (0)
3315
3316 #define UPDATE_STAT64(s, t) \
3317         do { \
3318                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323                        pstats->mac_stx[1].t##_lo, diff.lo); \
3324         } while (0)
3325
3326 #define UPDATE_STAT64_NIG(s, t) \
3327         do { \
3328                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329                         diff.lo, new->s##_lo, old->s##_lo); \
3330                 ADD_64(estats->t##_hi, diff.hi, \
3331                        estats->t##_lo, diff.lo); \
3332         } while (0)
3333
3334 /* sum[hi:lo] += add */
3335 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3336         do { \
3337                 s_lo += a; \
3338                 s_hi += (s_lo < a) ? 1 : 0; \
3339         } while (0)
3340
3341 #define UPDATE_EXTEND_STAT(s) \
3342         do { \
3343                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344                               pstats->mac_stx[1].s##_lo, \
3345                               new->s); \
3346         } while (0)
3347
3348 #define UPDATE_EXTEND_TSTAT(s, t) \
3349         do { \
3350                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351                 old_tclient->s = tclient->s; \
3352                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353         } while (0)
3354
3355 #define UPDATE_EXTEND_USTAT(s, t) \
3356         do { \
3357                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358                 old_uclient->s = uclient->s; \
3359                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3360         } while (0)
3361
3362 #define UPDATE_EXTEND_XSTAT(s, t) \
3363         do { \
3364                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365                 old_xclient->s = xclient->s; \
3366                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367         } while (0)
3368
3369 /* minuend -= subtrahend */
3370 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371         do { \
3372                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373         } while (0)
3374
3375 /* minuend[hi:lo] -= subtrahend */
3376 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3377         do { \
3378                 SUB_64(m_hi, 0, m_lo, s); \
3379         } while (0)
3380
3381 #define SUB_EXTEND_USTAT(s, t) \
3382         do { \
3383                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385         } while (0)
3386
3387 /*
3388  * General service functions
3389  */
3390
3391 static inline long bnx2x_hilo(u32 *hiref)
3392 {
3393         u32 lo = *(hiref + 1);
3394 #if (BITS_PER_LONG == 64)
3395         u32 hi = *hiref;
3396
3397         return HILO_U64(hi, lo);
3398 #else
3399         return lo;
3400 #endif
3401 }
3402
3403 /*
3404  * Init service functions
3405  */
3406
3407 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408 {
3409         if (!bp->stats_pending) {
3410                 struct eth_query_ramrod_data ramrod_data = {0};
3411                 int i, rc;
3412
3413                 ramrod_data.drv_counter = bp->stats_counter++;
3414                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3415                 for_each_queue(bp, i)
3416                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3417
3418                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419                                    ((u32 *)&ramrod_data)[1],
3420                                    ((u32 *)&ramrod_data)[0], 0);
3421                 if (rc == 0) {
3422                         /* stats ramrod has it's own slot on the spq */
3423                         bp->spq_left++;
3424                         bp->stats_pending = 1;
3425                 }
3426         }
3427 }
3428
3429 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430 {
3431         struct dmae_command *dmae = &bp->stats_dmae;
3432         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434         *stats_comp = DMAE_COMP_VAL;
3435         if (CHIP_REV_IS_SLOW(bp))
3436                 return;
3437
3438         /* loader */
3439         if (bp->executer_idx) {
3440                 int loader_idx = PMF_DMAE_C(bp);
3441
3442                 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446                                 DMAE_CMD_DST_RESET |
3447 #ifdef __BIG_ENDIAN
3448                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449 #else
3450                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3451 #endif
3452                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453                                                DMAE_CMD_PORT_0) |
3454                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458                                      sizeof(struct dmae_command) *
3459                                      (loader_idx + 1)) >> 2;
3460                 dmae->dst_addr_hi = 0;
3461                 dmae->len = sizeof(struct dmae_command) >> 2;
3462                 if (CHIP_IS_E1(bp))
3463                         dmae->len--;
3464                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465                 dmae->comp_addr_hi = 0;
3466                 dmae->comp_val = 1;
3467
3468                 *stats_comp = 0;
3469                 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471         } else if (bp->func_stx) {
3472                 *stats_comp = 0;
3473                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474         }
3475 }
3476
3477 static int bnx2x_stats_comp(struct bnx2x *bp)
3478 {
3479         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480         int cnt = 10;
3481
3482         might_sleep();
3483         while (*stats_comp != DMAE_COMP_VAL) {
3484                 if (!cnt) {
3485                         BNX2X_ERR("timeout waiting for stats finished\n");
3486                         break;
3487                 }
3488                 cnt--;
3489                 msleep(1);
3490         }
3491         return 1;
3492 }
3493
3494 /*
3495  * Statistics service functions
3496  */
3497
3498 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499 {
3500         struct dmae_command *dmae;
3501         u32 opcode;
3502         int loader_idx = PMF_DMAE_C(bp);
3503         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505         /* sanity */
3506         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507                 BNX2X_ERR("BUG!\n");
3508                 return;
3509         }
3510
3511         bp->executer_idx = 0;
3512
3513         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514                   DMAE_CMD_C_ENABLE |
3515                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516 #ifdef __BIG_ENDIAN
3517                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518 #else
3519                   DMAE_CMD_ENDIANITY_DW_SWAP |
3520 #endif
3521                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526         dmae->src_addr_lo = bp->port.port_stx >> 2;
3527         dmae->src_addr_hi = 0;
3528         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530         dmae->len = DMAE_LEN32_RD_MAX;
3531         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532         dmae->comp_addr_hi = 0;
3533         dmae->comp_val = 1;
3534
3535         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538         dmae->src_addr_hi = 0;
3539         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540                                    DMAE_LEN32_RD_MAX * 4);
3541         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542                                    DMAE_LEN32_RD_MAX * 4);
3543         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546         dmae->comp_val = DMAE_COMP_VAL;
3547
3548         *stats_comp = 0;
3549         bnx2x_hw_stats_post(bp);
3550         bnx2x_stats_comp(bp);
3551 }
3552
3553 static void bnx2x_port_stats_init(struct bnx2x *bp)
3554 {
3555         struct dmae_command *dmae;
3556         int port = BP_PORT(bp);
3557         int vn = BP_E1HVN(bp);
3558         u32 opcode;
3559         int loader_idx = PMF_DMAE_C(bp);
3560         u32 mac_addr;
3561         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563         /* sanity */
3564         if (!bp->link_vars.link_up || !bp->port.pmf) {
3565                 BNX2X_ERR("BUG!\n");
3566                 return;
3567         }
3568
3569         bp->executer_idx = 0;
3570
3571         /* MCP */
3572         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575 #ifdef __BIG_ENDIAN
3576                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577 #else
3578                   DMAE_CMD_ENDIANITY_DW_SWAP |
3579 #endif
3580                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581                   (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583         if (bp->port.port_stx) {
3584
3585                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586                 dmae->opcode = opcode;
3587                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590                 dmae->dst_addr_hi = 0;
3591                 dmae->len = sizeof(struct host_port_stats) >> 2;
3592                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593                 dmae->comp_addr_hi = 0;
3594                 dmae->comp_val = 1;
3595         }
3596
3597         if (bp->func_stx) {
3598
3599                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600                 dmae->opcode = opcode;
3601                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603                 dmae->dst_addr_lo = bp->func_stx >> 2;
3604                 dmae->dst_addr_hi = 0;
3605                 dmae->len = sizeof(struct host_func_stats) >> 2;
3606                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607                 dmae->comp_addr_hi = 0;
3608                 dmae->comp_val = 1;
3609         }
3610
3611         /* MAC */
3612         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615 #ifdef __BIG_ENDIAN
3616                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617 #else
3618                   DMAE_CMD_ENDIANITY_DW_SWAP |
3619 #endif
3620                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621                   (vn << DMAE_CMD_E1HVN_SHIFT));
3622
3623         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3624
3625                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626                                    NIG_REG_INGRESS_BMAC0_MEM);
3627
3628                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3630                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631                 dmae->opcode = opcode;
3632                 dmae->src_addr_lo = (mac_addr +
3633                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634                 dmae->src_addr_hi = 0;
3635                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640                 dmae->comp_addr_hi = 0;
3641                 dmae->comp_val = 1;
3642
3643                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646                 dmae->opcode = opcode;
3647                 dmae->src_addr_lo = (mac_addr +
3648                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649                 dmae->src_addr_hi = 0;
3650                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3651                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3653                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3654                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657                 dmae->comp_addr_hi = 0;
3658                 dmae->comp_val = 1;
3659
3660         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3661
3662                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666                 dmae->opcode = opcode;
3667                 dmae->src_addr_lo = (mac_addr +
3668                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669                 dmae->src_addr_hi = 0;
3670                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674                 dmae->comp_addr_hi = 0;
3675                 dmae->comp_val = 1;
3676
3677                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679                 dmae->opcode = opcode;
3680                 dmae->src_addr_lo = (mac_addr +
3681                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682                 dmae->src_addr_hi = 0;
3683                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3684                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3686                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3687                 dmae->len = 1;
3688                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689                 dmae->comp_addr_hi = 0;
3690                 dmae->comp_val = 1;
3691
3692                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694                 dmae->opcode = opcode;
3695                 dmae->src_addr_lo = (mac_addr +
3696                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697                 dmae->src_addr_hi = 0;
3698                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3699                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3701                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3702                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704                 dmae->comp_addr_hi = 0;
3705                 dmae->comp_val = 1;
3706         }
3707
3708         /* NIG */
3709         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710         dmae->opcode = opcode;
3711         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713         dmae->src_addr_hi = 0;
3714         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718         dmae->comp_addr_hi = 0;
3719         dmae->comp_val = 1;
3720
3721         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722         dmae->opcode = opcode;
3723         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725         dmae->src_addr_hi = 0;
3726         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730         dmae->len = (2*sizeof(u32)) >> 2;
3731         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732         dmae->comp_addr_hi = 0;
3733         dmae->comp_val = 1;
3734
3735         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739 #ifdef __BIG_ENDIAN
3740                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741 #else
3742                         DMAE_CMD_ENDIANITY_DW_SWAP |
3743 #endif
3744                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745                         (vn << DMAE_CMD_E1HVN_SHIFT));
3746         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3748         dmae->src_addr_hi = 0;
3749         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753         dmae->len = (2*sizeof(u32)) >> 2;
3754         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756         dmae->comp_val = DMAE_COMP_VAL;
3757
3758         *stats_comp = 0;
3759 }
3760
3761 static void bnx2x_func_stats_init(struct bnx2x *bp)
3762 {
3763         struct dmae_command *dmae = &bp->stats_dmae;
3764         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3765
3766         /* sanity */
3767         if (!bp->func_stx) {
3768                 BNX2X_ERR("BUG!\n");
3769                 return;
3770         }
3771
3772         bp->executer_idx = 0;
3773         memset(dmae, 0, sizeof(struct dmae_command));
3774
3775         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778 #ifdef __BIG_ENDIAN
3779                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780 #else
3781                         DMAE_CMD_ENDIANITY_DW_SWAP |
3782 #endif
3783                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787         dmae->dst_addr_lo = bp->func_stx >> 2;
3788         dmae->dst_addr_hi = 0;
3789         dmae->len = sizeof(struct host_func_stats) >> 2;
3790         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792         dmae->comp_val = DMAE_COMP_VAL;
3793
3794         *stats_comp = 0;
3795 }
3796
3797 static void bnx2x_stats_start(struct bnx2x *bp)
3798 {
3799         if (bp->port.pmf)
3800                 bnx2x_port_stats_init(bp);
3801
3802         else if (bp->func_stx)
3803                 bnx2x_func_stats_init(bp);
3804
3805         bnx2x_hw_stats_post(bp);
3806         bnx2x_storm_stats_post(bp);
3807 }
3808
3809 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3810 {
3811         bnx2x_stats_comp(bp);
3812         bnx2x_stats_pmf_update(bp);
3813         bnx2x_stats_start(bp);
3814 }
3815
3816 static void bnx2x_stats_restart(struct bnx2x *bp)
3817 {
3818         bnx2x_stats_comp(bp);
3819         bnx2x_stats_start(bp);
3820 }
3821
3822 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823 {
3824         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3826         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3827         struct {
3828                 u32 lo;
3829                 u32 hi;
3830         } diff;
3831
3832         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3838         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3839         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3840         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3841         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844         UPDATE_STAT64(tx_stat_gt127,
3845                                 tx_stat_etherstatspkts65octetsto127octets);
3846         UPDATE_STAT64(tx_stat_gt255,
3847                                 tx_stat_etherstatspkts128octetsto255octets);
3848         UPDATE_STAT64(tx_stat_gt511,
3849                                 tx_stat_etherstatspkts256octetsto511octets);
3850         UPDATE_STAT64(tx_stat_gt1023,
3851                                 tx_stat_etherstatspkts512octetsto1023octets);
3852         UPDATE_STAT64(tx_stat_gt1518,
3853                                 tx_stat_etherstatspkts1024octetsto1522octets);
3854         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858         UPDATE_STAT64(tx_stat_gterr,
3859                                 tx_stat_dot3statsinternalmactransmiterrors);
3860         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3861
3862         estats->pause_frames_received_hi =
3863                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864         estats->pause_frames_received_lo =
3865                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867         estats->pause_frames_sent_hi =
3868                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869         estats->pause_frames_sent_lo =
3870                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3871 }
3872
3873 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874 {
3875         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3877         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3878
3879         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3910
3911         estats->pause_frames_received_hi =
3912                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913         estats->pause_frames_received_lo =
3914                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915         ADD_64(estats->pause_frames_received_hi,
3916                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917                estats->pause_frames_received_lo,
3918                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920         estats->pause_frames_sent_hi =
3921                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922         estats->pause_frames_sent_lo =
3923                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924         ADD_64(estats->pause_frames_sent_hi,
3925                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926                estats->pause_frames_sent_lo,
3927                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3928 }
3929
3930 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931 {
3932         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933         struct nig_stats *old = &(bp->port.old_nig_stats);
3934         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3936         struct {
3937                 u32 lo;
3938                 u32 hi;
3939         } diff;
3940         u32 nig_timer_max;
3941
3942         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943                 bnx2x_bmac_stats_update(bp);
3944
3945         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946                 bnx2x_emac_stats_update(bp);
3947
3948         else { /* unreached */
3949                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3950                 return -1;
3951         }
3952
3953         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954                       new->brb_discard - old->brb_discard);
3955         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956                       new->brb_truncate - old->brb_truncate);
3957
3958         UPDATE_STAT64_NIG(egress_mac_pkt0,
3959                                         etherstatspkts1024octetsto1522octets);
3960         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3961
3962         memcpy(old, new, sizeof(struct nig_stats));
3963
3964         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965                sizeof(struct mac_stx));
3966         estats->brb_drop_hi = pstats->brb_drop_hi;
3967         estats->brb_drop_lo = pstats->brb_drop_lo;
3968
3969         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3970
3971         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972         if (nig_timer_max != estats->nig_timer_max) {
3973                 estats->nig_timer_max = nig_timer_max;
3974                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975         }
3976
3977         return 0;
3978 }
3979
3980 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3981 {
3982         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3983         struct tstorm_per_port_stats *tport =
3984                                         &stats->tstorm_common.port_statistics;
3985         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987         int i;
3988
3989         memcpy(&(fstats->total_bytes_received_hi),
3990                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3991                sizeof(struct host_func_stats) - 2*sizeof(u32));
3992         estats->error_bytes_received_hi = 0;
3993         estats->error_bytes_received_lo = 0;
3994         estats->etherstatsoverrsizepkts_hi = 0;
3995         estats->etherstatsoverrsizepkts_lo = 0;
3996         estats->no_buff_discard_hi = 0;
3997         estats->no_buff_discard_lo = 0;
3998
3999         for_each_queue(bp, i) {
4000                 struct bnx2x_fastpath *fp = &bp->fp[i];
4001                 int cl_id = fp->cl_id;
4002                 struct tstorm_per_client_stats *tclient =
4003                                 &stats->tstorm_common.client_statistics[cl_id];
4004                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005                 struct ustorm_per_client_stats *uclient =
4006                                 &stats->ustorm_common.client_statistics[cl_id];
4007                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008                 struct xstorm_per_client_stats *xclient =
4009                                 &stats->xstorm_common.client_statistics[cl_id];
4010                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012                 u32 diff;
4013
4014                 /* are storm stats valid? */
4015                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016                                                         bp->stats_counter) {
4017                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018                            "  xstorm counter (%d) != stats_counter (%d)\n",
4019                            i, xclient->stats_counter, bp->stats_counter);
4020                         return -1;
4021                 }
4022                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023                                                         bp->stats_counter) {
4024                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025                            "  tstorm counter (%d) != stats_counter (%d)\n",
4026                            i, tclient->stats_counter, bp->stats_counter);
4027                         return -2;
4028                 }
4029                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030                                                         bp->stats_counter) {
4031                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032                            "  ustorm counter (%d) != stats_counter (%d)\n",
4033                            i, uclient->stats_counter, bp->stats_counter);
4034                         return -4;
4035                 }
4036
4037                 qstats->total_bytes_received_hi =
4038                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4039                 qstats->total_bytes_received_lo =
4040                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042                 ADD_64(qstats->total_bytes_received_hi,
4043                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044                        qstats->total_bytes_received_lo,
4045                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047                 ADD_64(qstats->total_bytes_received_hi,
4048                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049                        qstats->total_bytes_received_lo,
4050                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052                 qstats->valid_bytes_received_hi =
4053                                         qstats->total_bytes_received_hi;
4054                 qstats->valid_bytes_received_lo =
4055                                         qstats->total_bytes_received_lo;
4056
4057                 qstats->error_bytes_received_hi =
4058                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4059                 qstats->error_bytes_received_lo =
4060                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4061
4062                 ADD_64(qstats->total_bytes_received_hi,
4063                        qstats->error_bytes_received_hi,
4064                        qstats->total_bytes_received_lo,
4065                        qstats->error_bytes_received_lo);
4066
4067                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068                                         total_unicast_packets_received);
4069                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070                                         total_multicast_packets_received);
4071                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072                                         total_broadcast_packets_received);
4073                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074                                         etherstatsoverrsizepkts);
4075                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078                                         total_unicast_packets_received);
4079                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080                                         total_multicast_packets_received);
4081                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082                                         total_broadcast_packets_received);
4083                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087                 qstats->total_bytes_transmitted_hi =
4088                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4089                 qstats->total_bytes_transmitted_lo =
4090                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092                 ADD_64(qstats->total_bytes_transmitted_hi,
4093                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094                        qstats->total_bytes_transmitted_lo,
4095                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097                 ADD_64(qstats->total_bytes_transmitted_hi,
4098                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099                        qstats->total_bytes_transmitted_lo,
4100                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4101
4102                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103                                         total_unicast_packets_transmitted);
4104                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105                                         total_multicast_packets_transmitted);
4106                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107                                         total_broadcast_packets_transmitted);
4108
4109                 old_tclient->checksum_discard = tclient->checksum_discard;
4110                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112                 ADD_64(fstats->total_bytes_received_hi,
4113                        qstats->total_bytes_received_hi,
4114                        fstats->total_bytes_received_lo,
4115                        qstats->total_bytes_received_lo);
4116                 ADD_64(fstats->total_bytes_transmitted_hi,
4117                        qstats->total_bytes_transmitted_hi,
4118                        fstats->total_bytes_transmitted_lo,
4119                        qstats->total_bytes_transmitted_lo);
4120                 ADD_64(fstats->total_unicast_packets_received_hi,
4121                        qstats->total_unicast_packets_received_hi,
4122                        fstats->total_unicast_packets_received_lo,
4123                        qstats->total_unicast_packets_received_lo);
4124                 ADD_64(fstats->total_multicast_packets_received_hi,
4125                        qstats->total_multicast_packets_received_hi,
4126                        fstats->total_multicast_packets_received_lo,
4127                        qstats->total_multicast_packets_received_lo);
4128                 ADD_64(fstats->total_broadcast_packets_received_hi,
4129                        qstats->total_broadcast_packets_received_hi,
4130                        fstats->total_broadcast_packets_received_lo,
4131                        qstats->total_broadcast_packets_received_lo);
4132                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133                        qstats->total_unicast_packets_transmitted_hi,
4134                        fstats->total_unicast_packets_transmitted_lo,
4135                        qstats->total_unicast_packets_transmitted_lo);
4136                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137                        qstats->total_multicast_packets_transmitted_hi,
4138                        fstats->total_multicast_packets_transmitted_lo,
4139                        qstats->total_multicast_packets_transmitted_lo);
4140                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141                        qstats->total_broadcast_packets_transmitted_hi,
4142                        fstats->total_broadcast_packets_transmitted_lo,
4143                        qstats->total_broadcast_packets_transmitted_lo);
4144                 ADD_64(fstats->valid_bytes_received_hi,
4145                        qstats->valid_bytes_received_hi,
4146                        fstats->valid_bytes_received_lo,
4147                        qstats->valid_bytes_received_lo);
4148
4149                 ADD_64(estats->error_bytes_received_hi,
4150                        qstats->error_bytes_received_hi,
4151                        estats->error_bytes_received_lo,
4152                        qstats->error_bytes_received_lo);
4153                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154                        qstats->etherstatsoverrsizepkts_hi,
4155                        estats->etherstatsoverrsizepkts_lo,
4156                        qstats->etherstatsoverrsizepkts_lo);
4157                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159         }
4160
4161         ADD_64(fstats->total_bytes_received_hi,
4162                estats->rx_stat_ifhcinbadoctets_hi,
4163                fstats->total_bytes_received_lo,
4164                estats->rx_stat_ifhcinbadoctets_lo);
4165
4166         memcpy(estats, &(fstats->total_bytes_received_hi),
4167                sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169         ADD_64(estats->etherstatsoverrsizepkts_hi,
4170                estats->rx_stat_dot3statsframestoolong_hi,
4171                estats->etherstatsoverrsizepkts_lo,
4172                estats->rx_stat_dot3statsframestoolong_lo);
4173         ADD_64(estats->error_bytes_received_hi,
4174                estats->rx_stat_ifhcinbadoctets_hi,
4175                estats->error_bytes_received_lo,
4176                estats->rx_stat_ifhcinbadoctets_lo);
4177
4178         if (bp->port.pmf) {
4179                 estats->mac_filter_discard =
4180                                 le32_to_cpu(tport->mac_filter_discard);
4181                 estats->xxoverflow_discard =
4182                                 le32_to_cpu(tport->xxoverflow_discard);
4183                 estats->brb_truncate_discard =
4184                                 le32_to_cpu(tport->brb_truncate_discard);
4185                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186         }
4187
4188         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
4190         bp->stats_pending = 0;
4191
4192         return 0;
4193 }
4194
4195 static void bnx2x_net_stats_update(struct bnx2x *bp)
4196 {
4197         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4198         struct net_device_stats *nstats = &bp->dev->stats;
4199         int i;
4200
4201         nstats->rx_packets =
4202                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206         nstats->tx_packets =
4207                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
4211         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4212
4213         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4214
4215         nstats->rx_dropped = estats->mac_discard;
4216         for_each_queue(bp, i)
4217                 nstats->rx_dropped +=
4218                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
4220         nstats->tx_dropped = 0;
4221
4222         nstats->multicast =
4223                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4224
4225         nstats->collisions =
4226                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4227
4228         nstats->rx_length_errors =
4229                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232                                  bnx2x_hilo(&estats->brb_truncate_hi);
4233         nstats->rx_crc_errors =
4234                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235         nstats->rx_frame_errors =
4236                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4238         nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240         nstats->rx_errors = nstats->rx_length_errors +
4241                             nstats->rx_over_errors +
4242                             nstats->rx_crc_errors +
4243                             nstats->rx_frame_errors +
4244                             nstats->rx_fifo_errors +
4245                             nstats->rx_missed_errors;
4246
4247         nstats->tx_aborted_errors =
4248                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250         nstats->tx_carrier_errors =
4251                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4252         nstats->tx_fifo_errors = 0;
4253         nstats->tx_heartbeat_errors = 0;
4254         nstats->tx_window_errors = 0;
4255
4256         nstats->tx_errors = nstats->tx_aborted_errors +
4257                             nstats->tx_carrier_errors +
4258             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259 }
4260
4261 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262 {
4263         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264         int i;
4265
4266         estats->driver_xoff = 0;
4267         estats->rx_err_discard_pkt = 0;
4268         estats->rx_skb_alloc_failed = 0;
4269         estats->hw_csum_err = 0;
4270         for_each_queue(bp, i) {
4271                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273                 estats->driver_xoff += qstats->driver_xoff;
4274                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276                 estats->hw_csum_err += qstats->hw_csum_err;
4277         }
4278 }
4279
4280 static void bnx2x_stats_update(struct bnx2x *bp)
4281 {
4282         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4283
4284         if (*stats_comp != DMAE_COMP_VAL)
4285                 return;
4286
4287         if (bp->port.pmf)
4288                 bnx2x_hw_stats_update(bp);
4289
4290         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292                 bnx2x_panic();
4293                 return;
4294         }
4295
4296         bnx2x_net_stats_update(bp);
4297         bnx2x_drv_stats_update(bp);
4298
4299         if (bp->msglevel & NETIF_MSG_TIMER) {
4300                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302                 struct tstorm_per_client_stats *old_tclient =
4303                                                         &bp->fp->old_tclient;
4304                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4305                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4306                 struct net_device_stats *nstats = &bp->dev->stats;
4307                 int i;
4308
4309                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4311                                   "  tx pkt (%lx)\n",
4312                        bnx2x_tx_avail(fp0_tx),
4313                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4314                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4315                                   "  rx pkt (%lx)\n",
4316                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317                              fp0_rx->rx_comp_cons),
4318                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4319                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4320                                   "brb truncate %u\n",
4321                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322                        qstats->driver_xoff,
4323                        estats->brb_drop_lo, estats->brb_truncate_lo);
4324                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4325                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4326                         "mac_discard %u  mac_filter_discard %u  "
4327                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4328                         "ttl0_discard %u\n",
4329                        le32_to_cpu(old_tclient->checksum_discard),
4330                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4332                        estats->mac_discard, estats->mac_filter_discard,
4333                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4334                        le32_to_cpu(old_tclient->ttl0_discard));
4335
4336                 for_each_queue(bp, i) {
4337                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338                                bnx2x_fp(bp, i, tx_pkt),
4339                                bnx2x_fp(bp, i, rx_pkt),
4340                                bnx2x_fp(bp, i, rx_calls));
4341                 }
4342         }
4343
4344         bnx2x_hw_stats_post(bp);
4345         bnx2x_storm_stats_post(bp);
4346 }
4347
4348 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349 {
4350         struct dmae_command *dmae;
4351         u32 opcode;
4352         int loader_idx = PMF_DMAE_C(bp);
4353         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4354
4355         bp->executer_idx = 0;
4356
4357         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358                   DMAE_CMD_C_ENABLE |
4359                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4360 #ifdef __BIG_ENDIAN
4361                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4362 #else
4363                   DMAE_CMD_ENDIANITY_DW_SWAP |
4364 #endif
4365                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368         if (bp->port.port_stx) {
4369
4370                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371                 if (bp->func_stx)
4372                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373                 else
4374                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4378                 dmae->dst_addr_hi = 0;
4379                 dmae->len = sizeof(struct host_port_stats) >> 2;
4380                 if (bp->func_stx) {
4381                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382                         dmae->comp_addr_hi = 0;
4383                         dmae->comp_val = 1;
4384                 } else {
4385                         dmae->comp_addr_lo =
4386                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387                         dmae->comp_addr_hi =
4388                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389                         dmae->comp_val = DMAE_COMP_VAL;
4390
4391                         *stats_comp = 0;
4392                 }
4393         }
4394
4395         if (bp->func_stx) {
4396
4397                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401                 dmae->dst_addr_lo = bp->func_stx >> 2;
4402                 dmae->dst_addr_hi = 0;
4403                 dmae->len = sizeof(struct host_func_stats) >> 2;
4404                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406                 dmae->comp_val = DMAE_COMP_VAL;
4407
4408                 *stats_comp = 0;
4409         }
4410 }
4411
4412 static void bnx2x_stats_stop(struct bnx2x *bp)
4413 {
4414         int update = 0;
4415
4416         bnx2x_stats_comp(bp);
4417
4418         if (bp->port.pmf)
4419                 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421         update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423         if (update) {
4424                 bnx2x_net_stats_update(bp);
4425
4426                 if (bp->port.pmf)
4427                         bnx2x_port_stats_stop(bp);
4428
4429                 bnx2x_hw_stats_post(bp);
4430                 bnx2x_stats_comp(bp);
4431         }
4432 }
4433
4434 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435 {
4436 }
4437
4438 static const struct {
4439         void (*action)(struct bnx2x *bp);
4440         enum bnx2x_stats_state next_state;
4441 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442 /* state        event   */
4443 {
4444 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4446 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448 },
4449 {
4450 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4451 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4452 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4453 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4454 }
4455 };
4456
4457 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458 {
4459         enum bnx2x_stats_state state = bp->stats_state;
4460
4461         bnx2x_stats_stm[state][event].action(bp);
4462         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
4464         /* Make sure the state has been "changed" */
4465         smp_wmb();
4466
4467         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469                    state, event, bp->stats_state);
4470 }
4471
4472 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473 {
4474         struct dmae_command *dmae;
4475         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477         /* sanity */
4478         if (!bp->port.pmf || !bp->port.port_stx) {
4479                 BNX2X_ERR("BUG!\n");
4480                 return;
4481         }
4482
4483         bp->executer_idx = 0;
4484
4485         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489 #ifdef __BIG_ENDIAN
4490                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491 #else
4492                         DMAE_CMD_ENDIANITY_DW_SWAP |
4493 #endif
4494                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499         dmae->dst_addr_hi = 0;
4500         dmae->len = sizeof(struct host_port_stats) >> 2;
4501         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503         dmae->comp_val = DMAE_COMP_VAL;
4504
4505         *stats_comp = 0;
4506         bnx2x_hw_stats_post(bp);
4507         bnx2x_stats_comp(bp);
4508 }
4509
4510 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511 {
4512         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513         int port = BP_PORT(bp);
4514         int func;
4515         u32 func_stx;
4516
4517         /* sanity */
4518         if (!bp->port.pmf || !bp->func_stx) {
4519                 BNX2X_ERR("BUG!\n");
4520                 return;
4521         }
4522
4523         /* save our func_stx */
4524         func_stx = bp->func_stx;
4525
4526         for (vn = VN_0; vn < vn_max; vn++) {
4527                 func = 2*vn + port;
4528
4529                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530                 bnx2x_func_stats_init(bp);
4531                 bnx2x_hw_stats_post(bp);
4532                 bnx2x_stats_comp(bp);
4533         }
4534
4535         /* restore our func_stx */
4536         bp->func_stx = func_stx;
4537 }
4538
4539 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540 {
4541         struct dmae_command *dmae = &bp->stats_dmae;
4542         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544         /* sanity */
4545         if (!bp->func_stx) {
4546                 BNX2X_ERR("BUG!\n");
4547                 return;
4548         }
4549
4550         bp->executer_idx = 0;
4551         memset(dmae, 0, sizeof(struct dmae_command));
4552
4553         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556 #ifdef __BIG_ENDIAN
4557                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558 #else
4559                         DMAE_CMD_ENDIANITY_DW_SWAP |
4560 #endif
4561                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563         dmae->src_addr_lo = bp->func_stx >> 2;
4564         dmae->src_addr_hi = 0;
4565         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567         dmae->len = sizeof(struct host_func_stats) >> 2;
4568         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570         dmae->comp_val = DMAE_COMP_VAL;
4571
4572         *stats_comp = 0;
4573         bnx2x_hw_stats_post(bp);
4574         bnx2x_stats_comp(bp);
4575 }
4576
4577 static void bnx2x_stats_init(struct bnx2x *bp)
4578 {
4579         int port = BP_PORT(bp);
4580         int func = BP_FUNC(bp);
4581         int i;
4582
4583         bp->stats_pending = 0;
4584         bp->executer_idx = 0;
4585         bp->stats_counter = 0;
4586
4587         /* port and func stats for management */
4588         if (!BP_NOMCP(bp)) {
4589                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592         } else {
4593                 bp->port.port_stx = 0;
4594                 bp->func_stx = 0;
4595         }
4596         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4597            bp->port.port_stx, bp->func_stx);
4598
4599         /* port stats */
4600         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601         bp->port.old_nig_stats.brb_discard =
4602                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603         bp->port.old_nig_stats.brb_truncate =
4604                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610         /* function stats */
4611         for_each_queue(bp, i) {
4612                 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614                 memset(&fp->old_tclient, 0,
4615                        sizeof(struct tstorm_per_client_stats));
4616                 memset(&fp->old_uclient, 0,
4617                        sizeof(struct ustorm_per_client_stats));
4618                 memset(&fp->old_xclient, 0,
4619                        sizeof(struct xstorm_per_client_stats));
4620                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621         }
4622
4623         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626         bp->stats_state = STATS_STATE_DISABLED;
4627
4628         if (bp->port.pmf) {
4629                 if (bp->port.port_stx)
4630                         bnx2x_port_stats_base_init(bp);
4631
4632                 if (bp->func_stx)
4633                         bnx2x_func_stats_base_init(bp);
4634
4635         } else if (bp->func_stx)
4636                 bnx2x_func_stats_base_update(bp);
4637 }
4638
4639 static void bnx2x_timer(unsigned long data)
4640 {
4641         struct bnx2x *bp = (struct bnx2x *) data;
4642
4643         if (!netif_running(bp->dev))
4644                 return;
4645
4646         if (atomic_read(&bp->intr_sem) != 0)
4647                 goto timer_restart;
4648
4649         if (poll) {
4650                 struct bnx2x_fastpath *fp = &bp->fp[0];
4651                 int rc;
4652
4653                 bnx2x_tx_int(fp);
4654                 rc = bnx2x_rx_int(fp, 1000);
4655         }
4656
4657         if (!BP_NOMCP(bp)) {
4658                 int func = BP_FUNC(bp);
4659                 u32 drv_pulse;
4660                 u32 mcp_pulse;
4661
4662                 ++bp->fw_drv_pulse_wr_seq;
4663                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664                 /* TBD - add SYSTEM_TIME */
4665                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4666                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4667
4668                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4669                              MCP_PULSE_SEQ_MASK);
4670                 /* The delta between driver pulse and mcp response
4671                  * should be 1 (before mcp response) or 0 (after mcp response)
4672                  */
4673                 if ((drv_pulse != mcp_pulse) &&
4674                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675                         /* someone lost a heartbeat... */
4676                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677                                   drv_pulse, mcp_pulse);
4678                 }
4679         }
4680
4681         if (bp->state == BNX2X_STATE_OPEN)
4682                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4683
4684 timer_restart:
4685         mod_timer(&bp->timer, jiffies + bp->current_interval);
4686 }
4687
4688 /* end of Statistics */
4689
4690 /* nic init */
4691
4692 /*
4693  * nic init service functions
4694  */
4695
4696 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4697 {
4698         int port = BP_PORT(bp);
4699
4700         /* "CSTORM" */
4701         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4707 }
4708
4709 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710                           dma_addr_t mapping, int sb_id)
4711 {
4712         int port = BP_PORT(bp);
4713         int func = BP_FUNC(bp);
4714         int index;
4715         u64 section;
4716
4717         /* USTORM */
4718         section = ((u64)mapping) + offsetof(struct host_status_block,
4719                                             u_status_block);
4720         sb->u_status_block.status_block_id = sb_id;
4721
4722         REG_WR(bp, BAR_CSTRORM_INTMEM +
4723                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724         REG_WR(bp, BAR_CSTRORM_INTMEM +
4725                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4726                U64_HI(section));
4727         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4729
4730         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4731                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4733
4734         /* CSTORM */
4735         section = ((u64)mapping) + offsetof(struct host_status_block,
4736                                             c_status_block);
4737         sb->c_status_block.status_block_id = sb_id;
4738
4739         REG_WR(bp, BAR_CSTRORM_INTMEM +
4740                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4741         REG_WR(bp, BAR_CSTRORM_INTMEM +
4742                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4743                U64_HI(section));
4744         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4745                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4746
4747         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4750
4751         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752 }
4753
4754 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755 {
4756         int func = BP_FUNC(bp);
4757
4758         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4759                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760                         sizeof(struct tstorm_def_status_block)/4);
4761         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763                         sizeof(struct cstorm_def_status_block_u)/4);
4764         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766                         sizeof(struct cstorm_def_status_block_c)/4);
4767         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4768                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769                         sizeof(struct xstorm_def_status_block)/4);
4770 }
4771
4772 static void bnx2x_init_def_sb(struct bnx2x *bp,
4773                               struct host_def_status_block *def_sb,
4774                               dma_addr_t mapping, int sb_id)
4775 {
4776         int port = BP_PORT(bp);
4777         int func = BP_FUNC(bp);
4778         int index, val, reg_offset;
4779         u64 section;
4780
4781         /* ATTN */
4782         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783                                             atten_status_block);
4784         def_sb->atten_status_block.status_block_id = sb_id;
4785
4786         bp->attn_state = 0;
4787
4788         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
4791         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4792                 bp->attn_group[index].sig[0] = REG_RD(bp,
4793                                                      reg_offset + 0x10*index);
4794                 bp->attn_group[index].sig[1] = REG_RD(bp,
4795                                                reg_offset + 0x4 + 0x10*index);
4796                 bp->attn_group[index].sig[2] = REG_RD(bp,
4797                                                reg_offset + 0x8 + 0x10*index);
4798                 bp->attn_group[index].sig[3] = REG_RD(bp,
4799                                                reg_offset + 0xc + 0x10*index);
4800         }
4801
4802         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803                              HC_REG_ATTN_MSG0_ADDR_L);
4804
4805         REG_WR(bp, reg_offset, U64_LO(section));
4806         REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810         val = REG_RD(bp, reg_offset);
4811         val |= sb_id;
4812         REG_WR(bp, reg_offset, val);
4813
4814         /* USTORM */
4815         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816                                             u_def_status_block);
4817         def_sb->u_def_status_block.status_block_id = sb_id;
4818
4819         REG_WR(bp, BAR_CSTRORM_INTMEM +
4820                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821         REG_WR(bp, BAR_CSTRORM_INTMEM +
4822                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4823                U64_HI(section));
4824         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4826
4827         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4828                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4830
4831         /* CSTORM */
4832         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833                                             c_def_status_block);
4834         def_sb->c_def_status_block.status_block_id = sb_id;
4835
4836         REG_WR(bp, BAR_CSTRORM_INTMEM +
4837                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4838         REG_WR(bp, BAR_CSTRORM_INTMEM +
4839                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4840                U64_HI(section));
4841         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4842                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4843
4844         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4847
4848         /* TSTORM */
4849         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850                                             t_def_status_block);
4851         def_sb->t_def_status_block.status_block_id = sb_id;
4852
4853         REG_WR(bp, BAR_TSTRORM_INTMEM +
4854                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855         REG_WR(bp, BAR_TSTRORM_INTMEM +
4856                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4857                U64_HI(section));
4858         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4859                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4860
4861         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4863                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4864
4865         /* XSTORM */
4866         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867                                             x_def_status_block);
4868         def_sb->x_def_status_block.status_block_id = sb_id;
4869
4870         REG_WR(bp, BAR_XSTRORM_INTMEM +
4871                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872         REG_WR(bp, BAR_XSTRORM_INTMEM +
4873                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4874                U64_HI(section));
4875         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4876                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4877
4878         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4880                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4881
4882         bp->stats_pending = 0;
4883         bp->set_mac_pending = 0;
4884
4885         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4886 }
4887
4888 static void bnx2x_update_coalesce(struct bnx2x *bp)
4889 {
4890         int port = BP_PORT(bp);
4891         int i;
4892
4893         for_each_queue(bp, i) {
4894                 int sb_id = bp->fp[i].sb_id;
4895
4896                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4897                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899                                                       U_SB_ETH_RX_CQ_INDEX),
4900                         bp->rx_ticks/(4 * BNX2X_BTR));
4901                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903                                                        U_SB_ETH_RX_CQ_INDEX),
4904                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4905
4906                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909                                                       C_SB_ETH_TX_CQ_INDEX),
4910                         bp->tx_ticks/(4 * BNX2X_BTR));
4911                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913                                                        C_SB_ETH_TX_CQ_INDEX),
4914                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4915         }
4916 }
4917
4918 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919                                        struct bnx2x_fastpath *fp, int last)
4920 {
4921         int i;
4922
4923         for (i = 0; i < last; i++) {
4924                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925                 struct sk_buff *skb = rx_buf->skb;
4926
4927                 if (skb == NULL) {
4928                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929                         continue;
4930                 }
4931
4932                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933                         pci_unmap_single(bp->pdev,
4934                                          pci_unmap_addr(rx_buf, mapping),
4935                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4936
4937                 dev_kfree_skb(skb);
4938                 rx_buf->skb = NULL;
4939         }
4940 }
4941
4942 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943 {
4944         int func = BP_FUNC(bp);
4945         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4947         u16 ring_prod, cqe_ring_prod;
4948         int i, j;
4949
4950         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4951         DP(NETIF_MSG_IFUP,
4952            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4953
4954         if (bp->flags & TPA_ENABLE_FLAG) {
4955
4956                 for_each_queue(bp, j) {
4957                         struct bnx2x_fastpath *fp = &bp->fp[j];
4958
4959                         for (i = 0; i < max_agg_queues; i++) {
4960                                 fp->tpa_pool[i].skb =
4961                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962                                 if (!fp->tpa_pool[i].skb) {
4963                                         BNX2X_ERR("Failed to allocate TPA "
4964                                                   "skb pool for queue[%d] - "
4965                                                   "disabling TPA on this "
4966                                                   "queue!\n", j);
4967                                         bnx2x_free_tpa_pool(bp, fp, i);
4968                                         fp->disable_tpa = 1;
4969                                         break;
4970                                 }
4971                                 pci_unmap_addr_set((struct sw_rx_bd *)
4972                                                         &bp->fp->tpa_pool[i],
4973                                                    mapping, 0);
4974                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975                         }
4976                 }
4977         }
4978
4979         for_each_queue(bp, j) {
4980                 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982                 fp->rx_bd_cons = 0;
4983                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4984                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4985
4986                 /* "next page" elements initialization */
4987                 /* SGE ring */
4988                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989                         struct eth_rx_sge *sge;
4990
4991                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992                         sge->addr_hi =
4993                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995                         sge->addr_lo =
4996                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998                 }
4999
5000                 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002                 /* RX BD ring */
5003                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004                         struct eth_rx_bd *rx_bd;
5005
5006                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007                         rx_bd->addr_hi =
5008                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5009                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5010                         rx_bd->addr_lo =
5011                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5012                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5013                 }
5014
5015                 /* CQ ring */
5016                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017                         struct eth_rx_cqe_next_page *nextpg;
5018
5019                         nextpg = (struct eth_rx_cqe_next_page *)
5020                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021                         nextpg->addr_hi =
5022                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5023                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5024                         nextpg->addr_lo =
5025                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5026                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5027                 }
5028
5029                 /* Allocate SGEs and initialize the ring elements */
5030                 for (i = 0, ring_prod = 0;
5031                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5032
5033                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034                                 BNX2X_ERR("was only able to allocate "
5035                                           "%d rx sges\n", i);
5036                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037                                 /* Cleanup already allocated elements */
5038                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5039                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5040                                 fp->disable_tpa = 1;
5041                                 ring_prod = 0;
5042                                 break;
5043                         }
5044                         ring_prod = NEXT_SGE_IDX(ring_prod);
5045                 }
5046                 fp->rx_sge_prod = ring_prod;
5047
5048                 /* Allocate BDs and initialize BD ring */
5049                 fp->rx_comp_cons = 0;
5050                 cqe_ring_prod = ring_prod = 0;
5051                 for (i = 0; i < bp->rx_ring_size; i++) {
5052                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053                                 BNX2X_ERR("was only able to allocate "
5054                                           "%d rx skbs on queue[%d]\n", i, j);
5055                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5056                                 break;
5057                         }
5058                         ring_prod = NEXT_RX_IDX(ring_prod);
5059                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5060                         WARN_ON(ring_prod <= i);
5061                 }
5062
5063                 fp->rx_bd_prod = ring_prod;
5064                 /* must not have more available CQEs than BDs */
5065                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066                                        cqe_ring_prod);
5067                 fp->rx_pkt = fp->rx_calls = 0;
5068
5069                 /* Warning!
5070                  * this will generate an interrupt (to the TSTORM)
5071                  * must only be done after chip is initialized
5072                  */
5073                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074                                      fp->rx_sge_prod);
5075                 if (j != 0)
5076                         continue;
5077
5078                 REG_WR(bp, BAR_USTRORM_INTMEM +
5079                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5080                        U64_LO(fp->rx_comp_mapping));
5081                 REG_WR(bp, BAR_USTRORM_INTMEM +
5082                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5083                        U64_HI(fp->rx_comp_mapping));
5084         }
5085 }
5086
5087 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088 {
5089         int i, j;
5090
5091         for_each_queue(bp, j) {
5092                 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5095                         struct eth_tx_next_bd *tx_next_bd =
5096                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5097
5098                         tx_next_bd->addr_hi =
5099                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5100                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5101                         tx_next_bd->addr_lo =
5102                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5103                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5104                 }
5105
5106                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107                 fp->tx_db.data.zero_fill1 = 0;
5108                 fp->tx_db.data.prod = 0;
5109
5110                 fp->tx_pkt_prod = 0;
5111                 fp->tx_pkt_cons = 0;
5112                 fp->tx_bd_prod = 0;
5113                 fp->tx_bd_cons = 0;
5114                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115                 fp->tx_pkt = 0;
5116         }
5117 }
5118
5119 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120 {
5121         int func = BP_FUNC(bp);
5122
5123         spin_lock_init(&bp->spq_lock);
5124
5125         bp->spq_left = MAX_SPQ_PENDING;
5126         bp->spq_prod_idx = 0;
5127         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128         bp->spq_prod_bd = bp->spq;
5129         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
5131         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5132                U64_LO(bp->spq_mapping));
5133         REG_WR(bp,
5134                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5135                U64_HI(bp->spq_mapping));
5136
5137         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5138                bp->spq_prod_idx);
5139 }
5140
5141 static void bnx2x_init_context(struct bnx2x *bp)
5142 {
5143         int i;
5144
5145         /* Rx */
5146         for_each_queue(bp, i) {
5147                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148                 struct bnx2x_fastpath *fp = &bp->fp[i];
5149                 u8 cl_id = fp->cl_id;
5150
5151                 context->ustorm_st_context.common.sb_index_numbers =
5152                                                 BNX2X_RX_SB_INDEX_NUM;
5153                 context->ustorm_st_context.common.clientId = cl_id;
5154                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5155                 context->ustorm_st_context.common.flags =
5156                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158                 context->ustorm_st_context.common.statistics_counter_id =
5159                                                 cl_id;
5160                 context->ustorm_st_context.common.mc_alignment_log_size =
5161                                                 BNX2X_RX_ALIGN_SHIFT;
5162                 context->ustorm_st_context.common.bd_buff_size =
5163                                                 bp->rx_buf_size;
5164                 context->ustorm_st_context.common.bd_page_base_hi =
5165                                                 U64_HI(fp->rx_desc_mapping);
5166                 context->ustorm_st_context.common.bd_page_base_lo =
5167                                                 U64_LO(fp->rx_desc_mapping);
5168                 if (!fp->disable_tpa) {
5169                         context->ustorm_st_context.common.flags |=
5170                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5171                         context->ustorm_st_context.common.sge_buff_size =
5172                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173                                          (u32)0xffff);
5174                         context->ustorm_st_context.common.sge_page_base_hi =
5175                                                 U64_HI(fp->rx_sge_mapping);
5176                         context->ustorm_st_context.common.sge_page_base_lo =
5177                                                 U64_LO(fp->rx_sge_mapping);
5178
5179                         context->ustorm_st_context.common.max_sges_for_packet =
5180                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181                         context->ustorm_st_context.common.max_sges_for_packet =
5182                                 ((context->ustorm_st_context.common.
5183                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5184                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5185                 }
5186
5187                 context->ustorm_ag_context.cdu_usage =
5188                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189                                                CDU_REGION_NUMBER_UCM_AG,
5190                                                ETH_CONNECTION_TYPE);
5191
5192                 context->xstorm_ag_context.cdu_reserved =
5193                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194                                                CDU_REGION_NUMBER_XCM_AG,
5195                                                ETH_CONNECTION_TYPE);
5196         }
5197
5198         /* Tx */
5199         for_each_queue(bp, i) {
5200                 struct bnx2x_fastpath *fp = &bp->fp[i];
5201                 struct eth_context *context =
5202                         bnx2x_sp(bp, context[i].eth);
5203
5204                 context->cstorm_st_context.sb_index_number =
5205                                                 C_SB_ETH_TX_CQ_INDEX;
5206                 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208                 context->xstorm_st_context.tx_bd_page_base_hi =
5209                                                 U64_HI(fp->tx_desc_mapping);
5210                 context->xstorm_st_context.tx_bd_page_base_lo =
5211                                                 U64_LO(fp->tx_desc_mapping);
5212                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214         }
5215 }
5216
5217 static void bnx2x_init_ind_table(struct bnx2x *bp)
5218 {
5219         int func = BP_FUNC(bp);
5220         int i;
5221
5222         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5223                 return;
5224
5225         DP(NETIF_MSG_IFUP,
5226            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5227         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5228                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5229                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5230                         bp->fp->cl_id + (i % bp->num_queues));
5231 }
5232
5233 static void bnx2x_set_client_config(struct bnx2x *bp)
5234 {
5235         struct tstorm_eth_client_config tstorm_client = {0};
5236         int port = BP_PORT(bp);
5237         int i;
5238
5239         tstorm_client.mtu = bp->dev->mtu;
5240         tstorm_client.config_flags =
5241                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5243 #ifdef BCM_VLAN
5244         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5245                 tstorm_client.config_flags |=
5246                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5247                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248         }
5249 #endif
5250
5251         for_each_queue(bp, i) {
5252                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
5254                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5255                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5256                        ((u32 *)&tstorm_client)[0]);
5257                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5259                        ((u32 *)&tstorm_client)[1]);
5260         }
5261
5262         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5264 }
5265
5266 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267 {
5268         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5269         int mode = bp->rx_mode;
5270         int mask = bp->rx_mode_cl_mask;
5271         int func = BP_FUNC(bp);
5272         int port = BP_PORT(bp);
5273         int i;
5274         /* All but management unicast packets should pass to the host as well */
5275         u32 llh_mask =
5276                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5280
5281         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5282
5283         switch (mode) {
5284         case BNX2X_RX_MODE_NONE: /* no Rx */
5285                 tstorm_mac_filter.ucast_drop_all = mask;
5286                 tstorm_mac_filter.mcast_drop_all = mask;
5287                 tstorm_mac_filter.bcast_drop_all = mask;
5288                 break;
5289
5290         case BNX2X_RX_MODE_NORMAL:
5291                 tstorm_mac_filter.bcast_accept_all = mask;
5292                 break;
5293
5294         case BNX2X_RX_MODE_ALLMULTI:
5295                 tstorm_mac_filter.mcast_accept_all = mask;
5296                 tstorm_mac_filter.bcast_accept_all = mask;
5297                 break;
5298
5299         case BNX2X_RX_MODE_PROMISC:
5300                 tstorm_mac_filter.ucast_accept_all = mask;
5301                 tstorm_mac_filter.mcast_accept_all = mask;
5302                 tstorm_mac_filter.bcast_accept_all = mask;
5303                 /* pass management unicast packets as well */
5304                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5305                 break;
5306
5307         default:
5308                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309                 break;
5310         }
5311
5312         REG_WR(bp,
5313                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314                llh_mask);
5315
5316         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5319                        ((u32 *)&tstorm_mac_filter)[i]);
5320
5321 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5322                    ((u32 *)&tstorm_mac_filter)[i]); */
5323         }
5324
5325         if (mode != BNX2X_RX_MODE_NONE)
5326                 bnx2x_set_client_config(bp);
5327 }
5328
5329 static void bnx2x_init_internal_common(struct bnx2x *bp)
5330 {
5331         int i;
5332
5333         /* Zero this manually as its initialization is
5334            currently missing in the initTool */
5335         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336                 REG_WR(bp, BAR_USTRORM_INTMEM +
5337                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338 }
5339
5340 static void bnx2x_init_internal_port(struct bnx2x *bp)
5341 {
5342         int port = BP_PORT(bp);
5343
5344         REG_WR(bp,
5345                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346         REG_WR(bp,
5347                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5348         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350 }
5351
5352 static void bnx2x_init_internal_func(struct bnx2x *bp)
5353 {
5354         struct tstorm_eth_function_common_config tstorm_config = {0};
5355         struct stats_indication_flags stats_flags = {0};
5356         int port = BP_PORT(bp);
5357         int func = BP_FUNC(bp);
5358         int i, j;
5359         u32 offset;
5360         u16 max_agg_size;
5361
5362         if (is_multi(bp)) {
5363                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5364                 tstorm_config.rss_result_mask = MULTI_MASK;
5365         }
5366
5367         /* Enable TPA if needed */
5368         if (bp->flags & TPA_ENABLE_FLAG)
5369                 tstorm_config.config_flags |=
5370                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
5372         if (IS_E1HMF(bp))
5373                 tstorm_config.config_flags |=
5374                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5375
5376         tstorm_config.leading_client_id = BP_L_ID(bp);
5377
5378         REG_WR(bp, BAR_TSTRORM_INTMEM +
5379                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5380                (*(u32 *)&tstorm_config));
5381
5382         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5384         bnx2x_set_storm_rx_mode(bp);
5385
5386         for_each_queue(bp, i) {
5387                 u8 cl_id = bp->fp[i].cl_id;
5388
5389                 /* reset xstorm per client statistics */
5390                 offset = BAR_XSTRORM_INTMEM +
5391                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392                 for (j = 0;
5393                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394                         REG_WR(bp, offset + j*4, 0);
5395
5396                 /* reset tstorm per client statistics */
5397                 offset = BAR_TSTRORM_INTMEM +
5398                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399                 for (j = 0;
5400                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401                         REG_WR(bp, offset + j*4, 0);
5402
5403                 /* reset ustorm per client statistics */
5404                 offset = BAR_USTRORM_INTMEM +
5405                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406                 for (j = 0;
5407                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408                         REG_WR(bp, offset + j*4, 0);
5409         }
5410
5411         /* Init statistics related context */
5412         stats_flags.collect_eth = 1;
5413
5414         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5415                ((u32 *)&stats_flags)[0]);
5416         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5417                ((u32 *)&stats_flags)[1]);
5418
5419         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5420                ((u32 *)&stats_flags)[0]);
5421         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5422                ((u32 *)&stats_flags)[1]);
5423
5424         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425                ((u32 *)&stats_flags)[0]);
5426         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427                ((u32 *)&stats_flags)[1]);
5428
5429         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5430                ((u32 *)&stats_flags)[0]);
5431         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432                ((u32 *)&stats_flags)[1]);
5433
5434         REG_WR(bp, BAR_XSTRORM_INTMEM +
5435                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437         REG_WR(bp, BAR_XSTRORM_INTMEM +
5438                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441         REG_WR(bp, BAR_TSTRORM_INTMEM +
5442                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444         REG_WR(bp, BAR_TSTRORM_INTMEM +
5445                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5447
5448         REG_WR(bp, BAR_USTRORM_INTMEM +
5449                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451         REG_WR(bp, BAR_USTRORM_INTMEM +
5452                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
5455         if (CHIP_IS_E1H(bp)) {
5456                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457                         IS_E1HMF(bp));
5458                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459                         IS_E1HMF(bp));
5460                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461                         IS_E1HMF(bp));
5462                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463                         IS_E1HMF(bp));
5464
5465                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466                          bp->e1hov);
5467         }
5468
5469         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470         max_agg_size =
5471                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5473                     (u32)0xffff);
5474         for_each_queue(bp, i) {
5475                 struct bnx2x_fastpath *fp = &bp->fp[i];
5476
5477                 REG_WR(bp, BAR_USTRORM_INTMEM +
5478                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5479                        U64_LO(fp->rx_comp_mapping));
5480                 REG_WR(bp, BAR_USTRORM_INTMEM +
5481                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5482                        U64_HI(fp->rx_comp_mapping));
5483
5484                 /* Next page */
5485                 REG_WR(bp, BAR_USTRORM_INTMEM +
5486                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
5492                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5493                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5494                          max_agg_size);
5495         }
5496
5497         /* dropless flow control */
5498         if (CHIP_IS_E1H(bp)) {
5499                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501                 rx_pause.bd_thr_low = 250;
5502                 rx_pause.cqe_thr_low = 250;
5503                 rx_pause.cos = 1;
5504                 rx_pause.sge_thr_low = 0;
5505                 rx_pause.bd_thr_high = 350;
5506                 rx_pause.cqe_thr_high = 350;
5507                 rx_pause.sge_thr_high = 0;
5508
5509                 for_each_queue(bp, i) {
5510                         struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512                         if (!fp->disable_tpa) {
5513                                 rx_pause.sge_thr_low = 150;
5514                                 rx_pause.sge_thr_high = 250;
5515                         }
5516
5517
5518                         offset = BAR_USTRORM_INTMEM +
5519                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520                                                                    fp->cl_id);
5521                         for (j = 0;
5522                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523                              j++)
5524                                 REG_WR(bp, offset + j*4,
5525                                        ((u32 *)&rx_pause)[j]);
5526                 }
5527         }
5528
5529         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531         /* Init rate shaping and fairness contexts */
5532         if (IS_E1HMF(bp)) {
5533                 int vn;
5534
5535                 /* During init there is no active link
5536                    Until link is up, set link rate to 10Gbps */
5537                 bp->link_vars.line_speed = SPEED_10000;
5538                 bnx2x_init_port_minmax(bp);
5539
5540                 if (!BP_NOMCP(bp))
5541                         bp->mf_config =
5542                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5543                 bnx2x_calc_vn_weight_sum(bp);
5544
5545                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548                 /* Enable rate shaping and fairness */
5549                 bp->cmng.flags.cmng_enables |=
5550                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5551
5552         } else {
5553                 /* rate shaping and fairness are disabled */
5554                 DP(NETIF_MSG_IFUP,
5555                    "single function mode  minmax will be disabled\n");
5556         }
5557
5558
5559         /* Store it to internal memory */
5560         if (bp->port.pmf)
5561                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5563                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564                                ((u32 *)(&bp->cmng))[i]);
5565 }
5566
5567 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568 {
5569         switch (load_code) {
5570         case FW_MSG_CODE_DRV_LOAD_COMMON:
5571                 bnx2x_init_internal_common(bp);
5572                 /* no break */
5573
5574         case FW_MSG_CODE_DRV_LOAD_PORT:
5575                 bnx2x_init_internal_port(bp);
5576                 /* no break */
5577
5578         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579                 bnx2x_init_internal_func(bp);
5580                 break;
5581
5582         default:
5583                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584                 break;
5585         }
5586 }
5587
5588 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5589 {
5590         int i;
5591
5592         for_each_queue(bp, i) {
5593                 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
5595                 fp->bp = bp;
5596                 fp->state = BNX2X_FP_STATE_CLOSED;
5597                 fp->index = i;
5598                 fp->cl_id = BP_L_ID(bp) + i;
5599 #ifdef BCM_CNIC
5600                 fp->sb_id = fp->cl_id + 1;
5601 #else
5602                 fp->sb_id = fp->cl_id;
5603 #endif
5604                 DP(NETIF_MSG_IFUP,
5605                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5606                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5607                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5608                               fp->sb_id);
5609                 bnx2x_update_fpsb_idx(fp);
5610         }
5611
5612         /* ensure status block indices were read */
5613         rmb();
5614
5615
5616         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617                           DEF_SB_ID);
5618         bnx2x_update_dsb_idx(bp);
5619         bnx2x_update_coalesce(bp);
5620         bnx2x_init_rx_rings(bp);
5621         bnx2x_init_tx_ring(bp);
5622         bnx2x_init_sp_ring(bp);
5623         bnx2x_init_context(bp);
5624         bnx2x_init_internal(bp, load_code);
5625         bnx2x_init_ind_table(bp);
5626         bnx2x_stats_init(bp);
5627
5628         /* At this point, we are ready for interrupts */
5629         atomic_set(&bp->intr_sem, 0);
5630
5631         /* flush all before enabling interrupts */
5632         mb();
5633         mmiowb();
5634
5635         bnx2x_int_enable(bp);
5636
5637         /* Check for SPIO5 */
5638         bnx2x_attn_int_deasserted0(bp,
5639                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5641 }
5642
5643 /* end of nic init */
5644
5645 /*
5646  * gzip service functions
5647  */
5648
5649 static int bnx2x_gunzip_init(struct bnx2x *bp)
5650 {
5651         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652                                               &bp->gunzip_mapping);
5653         if (bp->gunzip_buf  == NULL)
5654                 goto gunzip_nomem1;
5655
5656         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657         if (bp->strm  == NULL)
5658                 goto gunzip_nomem2;
5659
5660         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661                                       GFP_KERNEL);
5662         if (bp->strm->workspace == NULL)
5663                 goto gunzip_nomem3;
5664
5665         return 0;
5666
5667 gunzip_nomem3:
5668         kfree(bp->strm);
5669         bp->strm = NULL;
5670
5671 gunzip_nomem2:
5672         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673                             bp->gunzip_mapping);
5674         bp->gunzip_buf = NULL;
5675
5676 gunzip_nomem1:
5677         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5678                " un-compression\n", bp->dev->name);
5679         return -ENOMEM;
5680 }
5681
5682 static void bnx2x_gunzip_end(struct bnx2x *bp)
5683 {
5684         kfree(bp->strm->workspace);
5685
5686         kfree(bp->strm);
5687         bp->strm = NULL;
5688
5689         if (bp->gunzip_buf) {
5690                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691                                     bp->gunzip_mapping);
5692                 bp->gunzip_buf = NULL;
5693         }
5694 }
5695
5696 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5697 {
5698         int n, rc;
5699
5700         /* check gzip header */
5701         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702                 BNX2X_ERR("Bad gzip header\n");
5703                 return -EINVAL;
5704         }
5705
5706         n = 10;
5707
5708 #define FNAME                           0x8
5709
5710         if (zbuf[3] & FNAME)
5711                 while ((zbuf[n++] != 0) && (n < len));
5712
5713         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5714         bp->strm->avail_in = len - n;
5715         bp->strm->next_out = bp->gunzip_buf;
5716         bp->strm->avail_out = FW_BUF_SIZE;
5717
5718         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719         if (rc != Z_OK)
5720                 return rc;
5721
5722         rc = zlib_inflate(bp->strm, Z_FINISH);
5723         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725                        bp->dev->name, bp->strm->msg);
5726
5727         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728         if (bp->gunzip_outlen & 0x3)
5729                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730                                     " gunzip_outlen (%d) not aligned\n",
5731                        bp->dev->name, bp->gunzip_outlen);
5732         bp->gunzip_outlen >>= 2;
5733
5734         zlib_inflateEnd(bp->strm);
5735
5736         if (rc == Z_STREAM_END)
5737                 return 0;
5738
5739         return rc;
5740 }
5741
5742 /* nic load/unload */
5743
5744 /*
5745  * General service functions
5746  */
5747
5748 /* send a NIG loopback debug packet */
5749 static void bnx2x_lb_pckt(struct bnx2x *bp)
5750 {
5751         u32 wb_write[3];
5752
5753         /* Ethernet source and destination addresses */
5754         wb_write[0] = 0x55555555;
5755         wb_write[1] = 0x55555555;
5756         wb_write[2] = 0x20;             /* SOP */
5757         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5758
5759         /* NON-IP protocol */