ad5ef25add3e0dde70a1ab8c7f2d01c5116dd81d
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_dump.h"
57
58 #define DRV_MODULE_VERSION      "1.48.105"
59 #define DRV_MODULE_RELDATE      "2009/03/02"
60 #define BNX2X_BC_VER            0x040200
61
62 /* Time in jiffies before concluding the transmitter is hung */
63 #define TX_TIMEOUT              (5*HZ)
64
65 static char version[] __devinitdata =
66         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
67         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68
69 MODULE_AUTHOR("Eliezer Tamir");
70 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
73
74 static int multi_mode = 1;
75 module_param(multi_mode, int, 0);
76 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
77
78 static int disable_tpa;
79 module_param(disable_tpa, int, 0);
80 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
81
82 static int int_mode;
83 module_param(int_mode, int, 0);
84 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
85
86 static int poll;
87 module_param(poll, int, 0);
88 MODULE_PARM_DESC(poll, " Use polling (for debug)");
89
90 static int mrrs = -1;
91 module_param(mrrs, int, 0);
92 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
93
94 static int debug;
95 module_param(debug, int, 0);
96 MODULE_PARM_DESC(debug, " Default debug msglevel");
97
98 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
99
100 static struct workqueue_struct *bnx2x_wq;
101
102 enum bnx2x_board_type {
103         BCM57710 = 0,
104         BCM57711 = 1,
105         BCM57711E = 2,
106 };
107
108 /* indexed by board_type, above */
109 static struct {
110         char *name;
111 } board_info[] __devinitdata = {
112         { "Broadcom NetXtreme II BCM57710 XGb" },
113         { "Broadcom NetXtreme II BCM57711 XGb" },
114         { "Broadcom NetXtreme II BCM57711E XGb" }
115 };
116
117
118 static const struct pci_device_id bnx2x_pci_tbl[] = {
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
124                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
125         { 0 }
126 };
127
128 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
129
130 /****************************************************************************
131 * General service functions
132 ****************************************************************************/
133
134 /* used only at init
135  * locking is done by mcp
136  */
137 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
138 {
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
140         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
141         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
142                                PCICFG_VENDOR_ID_OFFSET);
143 }
144
145 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
146 {
147         u32 val;
148
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
150         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
151         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
152                                PCICFG_VENDOR_ID_OFFSET);
153
154         return val;
155 }
156
157 static const u32 dmae_reg_go_c[] = {
158         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
159         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
160         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
161         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 };
163
164 /* copy command into DMAE command memory and set DMAE command go */
165 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
166                             int idx)
167 {
168         u32 cmd_offset;
169         int i;
170
171         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
172         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
173                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
174
175                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
176                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
177         }
178         REG_WR(bp, dmae_reg_go_c[idx], 1);
179 }
180
181 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182                       u32 len32)
183 {
184         struct dmae_command *dmae = &bp->init_dmae;
185         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186         int cnt = 200;
187
188         if (!bp->dmae_ready) {
189                 u32 *data = bnx2x_sp(bp, wb_data[0]);
190
191                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
192                    "  using indirect\n", dst_addr, len32);
193                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
194                 return;
195         }
196
197         mutex_lock(&bp->dmae_mutex);
198
199         memset(dmae, 0, sizeof(struct dmae_command));
200
201         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
202                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
203                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
204 #ifdef __BIG_ENDIAN
205                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
206 #else
207                         DMAE_CMD_ENDIANITY_DW_SWAP |
208 #endif
209                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
210                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
211         dmae->src_addr_lo = U64_LO(dma_addr);
212         dmae->src_addr_hi = U64_HI(dma_addr);
213         dmae->dst_addr_lo = dst_addr >> 2;
214         dmae->dst_addr_hi = 0;
215         dmae->len = len32;
216         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
217         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
218         dmae->comp_val = DMAE_COMP_VAL;
219
220         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
221            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
222                     "dst_addr [%x:%08x (%08x)]\n"
223            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
224            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
225            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
226            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
227         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
228            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
229            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
230
231         *wb_comp = 0;
232
233         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
234
235         udelay(5);
236
237         while (*wb_comp != DMAE_COMP_VAL) {
238                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239
240                 if (!cnt) {
241                         BNX2X_ERR("DMAE timeout!\n");
242                         break;
243                 }
244                 cnt--;
245                 /* adjust delay for emulation/FPGA */
246                 if (CHIP_REV_IS_SLOW(bp))
247                         msleep(100);
248                 else
249                         udelay(5);
250         }
251
252         mutex_unlock(&bp->dmae_mutex);
253 }
254
255 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 {
257         struct dmae_command *dmae = &bp->init_dmae;
258         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259         int cnt = 200;
260
261         if (!bp->dmae_ready) {
262                 u32 *data = bnx2x_sp(bp, wb_data[0]);
263                 int i;
264
265                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
266                    "  using indirect\n", src_addr, len32);
267                 for (i = 0; i < len32; i++)
268                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
269                 return;
270         }
271
272         mutex_lock(&bp->dmae_mutex);
273
274         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
275         memset(dmae, 0, sizeof(struct dmae_command));
276
277         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
278                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
279                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 #ifdef __BIG_ENDIAN
281                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 #else
283                         DMAE_CMD_ENDIANITY_DW_SWAP |
284 #endif
285                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
286                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
287         dmae->src_addr_lo = src_addr >> 2;
288         dmae->src_addr_hi = 0;
289         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
290         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291         dmae->len = len32;
292         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
293         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
294         dmae->comp_val = DMAE_COMP_VAL;
295
296         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
297            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
298                     "dst_addr [%x:%08x (%08x)]\n"
299            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
300            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
301            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
302            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
303
304         *wb_comp = 0;
305
306         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
307
308         udelay(5);
309
310         while (*wb_comp != DMAE_COMP_VAL) {
311
312                 if (!cnt) {
313                         BNX2X_ERR("DMAE timeout!\n");
314                         break;
315                 }
316                 cnt--;
317                 /* adjust delay for emulation/FPGA */
318                 if (CHIP_REV_IS_SLOW(bp))
319                         msleep(100);
320                 else
321                         udelay(5);
322         }
323         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
326
327         mutex_unlock(&bp->dmae_mutex);
328 }
329
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332 {
333         u32 wb_write[2];
334
335         wb_write[0] = val_hi;
336         wb_write[1] = val_lo;
337         REG_WR_DMAE(bp, reg, wb_write, 2);
338 }
339
340 #ifdef USE_WB_RD
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342 {
343         u32 wb_data[2];
344
345         REG_RD_DMAE(bp, reg, wb_data, 2);
346
347         return HILO_U64(wb_data[0], wb_data[1]);
348 }
349 #endif
350
351 static int bnx2x_mc_assert(struct bnx2x *bp)
352 {
353         char last_idx;
354         int i, rc = 0;
355         u32 row0, row1, row2, row3;
356
357         /* XSTORM */
358         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
360         if (last_idx)
361                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363         /* print the asserts */
364         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i));
368                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377                                   " 0x%08x 0x%08x 0x%08x\n",
378                                   i, row3, row2, row1, row0);
379                         rc++;
380                 } else {
381                         break;
382                 }
383         }
384
385         /* TSTORM */
386         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
388         if (last_idx)
389                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391         /* print the asserts */
392         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i));
396                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405                                   " 0x%08x 0x%08x 0x%08x\n",
406                                   i, row3, row2, row1, row0);
407                         rc++;
408                 } else {
409                         break;
410                 }
411         }
412
413         /* CSTORM */
414         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
416         if (last_idx)
417                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419         /* print the asserts */
420         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i));
424                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433                                   " 0x%08x 0x%08x 0x%08x\n",
434                                   i, row3, row2, row1, row0);
435                         rc++;
436                 } else {
437                         break;
438                 }
439         }
440
441         /* USTORM */
442         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443                            USTORM_ASSERT_LIST_INDEX_OFFSET);
444         if (last_idx)
445                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447         /* print the asserts */
448         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i));
452                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
454                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
456                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461                                   " 0x%08x 0x%08x 0x%08x\n",
462                                   i, row3, row2, row1, row0);
463                         rc++;
464                 } else {
465                         break;
466                 }
467         }
468
469         return rc;
470 }
471
472 static void bnx2x_fw_dump(struct bnx2x *bp)
473 {
474         u32 mark, offset;
475         __be32 data[9];
476         int word;
477
478         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479         mark = ((mark + 0x3) & ~0x3);
480         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
481
482         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483                 for (word = 0; word < 8; word++)
484                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485                                                   offset + 4*word));
486                 data[8] = 0x0;
487                 printk(KERN_CONT "%s", (char *)data);
488         }
489         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         printk("\n" KERN_ERR PFX "end of fw dump\n");
497 }
498
499 static void bnx2x_panic_dump(struct bnx2x *bp)
500 {
501         int i;
502         u16 j, start, end;
503
504         bp->stats_state = STATS_STATE_DISABLED;
505         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
507         BNX2X_ERR("begin crash dump -----------------\n");
508
509         /* Indices */
510         /* Common */
511         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
512                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
513                   "  spq_prod_idx(%u)\n",
514                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
515                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
516
517         /* Rx */
518         for_each_rx_queue(bp, i) {
519                 struct bnx2x_fastpath *fp = &bp->fp[i];
520
521                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
522                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
523                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
524                           i, fp->rx_bd_prod, fp->rx_bd_cons,
525                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
526                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
527                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
528                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
529                           fp->rx_sge_prod, fp->last_max_sge,
530                           le16_to_cpu(fp->fp_u_idx),
531                           fp->status_blk->u_status_block.status_block_index);
532         }
533
534         /* Tx */
535         for_each_tx_queue(bp, i) {
536                 struct bnx2x_fastpath *fp = &bp->fp[i];
537                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
538
539                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
540                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
541                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
542                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
543                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
544                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
545                           fp->status_blk->c_status_block.status_block_index,
546                           hw_prods->packets_prod, hw_prods->bds_prod);
547         }
548
549         /* Rings */
550         /* Rx */
551         for_each_rx_queue(bp, i) {
552                 struct bnx2x_fastpath *fp = &bp->fp[i];
553
554                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
555                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
556                 for (j = start; j != end; j = RX_BD(j + 1)) {
557                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
558                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
559
560                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
561                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
562                 }
563
564                 start = RX_SGE(fp->rx_sge_prod);
565                 end = RX_SGE(fp->last_max_sge);
566                 for (j = start; j != end; j = RX_SGE(j + 1)) {
567                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
568                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
569
570                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
571                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
572                 }
573
574                 start = RCQ_BD(fp->rx_comp_cons - 10);
575                 end = RCQ_BD(fp->rx_comp_cons + 503);
576                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
577                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
578
579                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
580                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
581                 }
582         }
583
584         /* Tx */
585         for_each_tx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
589                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
590                 for (j = start; j != end; j = TX_BD(j + 1)) {
591                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
592
593                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
594                                   i, j, sw_bd->skb, sw_bd->first_bd);
595                 }
596
597                 start = TX_BD(fp->tx_bd_cons - 10);
598                 end = TX_BD(fp->tx_bd_cons + 254);
599                 for (j = start; j != end; j = TX_BD(j + 1)) {
600                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
601
602                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
603                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
604                 }
605         }
606
607         bnx2x_fw_dump(bp);
608         bnx2x_mc_assert(bp);
609         BNX2X_ERR("end crash dump -----------------\n");
610 }
611
612 static void bnx2x_int_enable(struct bnx2x *bp)
613 {
614         int port = BP_PORT(bp);
615         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
616         u32 val = REG_RD(bp, addr);
617         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
618         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
619
620         if (msix) {
621                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
622                          HC_CONFIG_0_REG_INT_LINE_EN_0);
623                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
624                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
625         } else if (msi) {
626                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
627                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
628                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
629                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
630         } else {
631                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
632                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
633                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
634                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
635
636                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
637                    val, port, addr);
638
639                 REG_WR(bp, addr, val);
640
641                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
642         }
643
644         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
645            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
646
647         REG_WR(bp, addr, val);
648
649         if (CHIP_IS_E1H(bp)) {
650                 /* init leading/trailing edge */
651                 if (IS_E1HMF(bp)) {
652                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
653                         if (bp->port.pmf)
654                                 /* enable nig and gpio3 attention */
655                                 val |= 0x1100;
656                 } else
657                         val = 0xffff;
658
659                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
660                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
661         }
662 }
663
664 static void bnx2x_int_disable(struct bnx2x *bp)
665 {
666         int port = BP_PORT(bp);
667         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
668         u32 val = REG_RD(bp, addr);
669
670         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676            val, port, addr);
677
678         /* flush all outstanding writes */
679         mmiowb();
680
681         REG_WR(bp, addr, val);
682         if (REG_RD(bp, addr) != val)
683                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
684
685 }
686
687 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
688 {
689         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
690         int i, offset;
691
692         /* disable interrupt handling */
693         atomic_inc(&bp->intr_sem);
694         if (disable_hw)
695                 /* prevent the HW from sending interrupts */
696                 bnx2x_int_disable(bp);
697
698         /* make sure all ISRs are done */
699         if (msix) {
700                 synchronize_irq(bp->msix_table[0].vector);
701                 offset = 1;
702                 for_each_queue(bp, i)
703                         synchronize_irq(bp->msix_table[i + offset].vector);
704         } else
705                 synchronize_irq(bp->pdev->irq);
706
707         /* make sure sp_task is not running */
708         cancel_delayed_work(&bp->sp_task);
709         flush_workqueue(bnx2x_wq);
710 }
711
712 /* fast path */
713
714 /*
715  * General service functions
716  */
717
718 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
719                                 u8 storm, u16 index, u8 op, u8 update)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_INT_ACK);
723         struct igu_ack_register igu_ack;
724
725         igu_ack.status_block_index = index;
726         igu_ack.sb_id_and_flags =
727                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
728                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
729                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
730                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
731
732         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
733            (*(u32 *)&igu_ack), hc_addr);
734         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
735 }
736
737 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
738 {
739         struct host_status_block *fpsb = fp->status_blk;
740         u16 rc = 0;
741
742         barrier(); /* status block is written to by the chip */
743         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
744                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
745                 rc |= 1;
746         }
747         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
748                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
749                 rc |= 2;
750         }
751         return rc;
752 }
753
754 static u16 bnx2x_ack_int(struct bnx2x *bp)
755 {
756         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
757                        COMMAND_REG_SIMD_MASK);
758         u32 result = REG_RD(bp, hc_addr);
759
760         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
761            result, hc_addr);
762
763         return result;
764 }
765
766
767 /*
768  * fast path service functions
769  */
770
771 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
772 {
773         u16 tx_cons_sb;
774
775         /* Tell compiler that status block fields can change */
776         barrier();
777         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
778         return (fp->tx_pkt_cons != tx_cons_sb);
779 }
780
781 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
782 {
783         /* Tell compiler that consumer and producer can change */
784         barrier();
785         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
786 }
787
788 /* free skb in the packet ring at pos idx
789  * return idx of last bd freed
790  */
791 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
792                              u16 idx)
793 {
794         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
795         struct eth_tx_bd *tx_bd;
796         struct sk_buff *skb = tx_buf->skb;
797         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
798         int nbd;
799
800         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
801            idx, tx_buf, skb);
802
803         /* unmap first bd */
804         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
805         tx_bd = &fp->tx_desc_ring[bd_idx];
806         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
807                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
808
809         nbd = le16_to_cpu(tx_bd->nbd) - 1;
810         new_cons = nbd + tx_buf->first_bd;
811 #ifdef BNX2X_STOP_ON_ERROR
812         if (nbd > (MAX_SKB_FRAGS + 2)) {
813                 BNX2X_ERR("BAD nbd!\n");
814                 bnx2x_panic();
815         }
816 #endif
817
818         /* Skip a parse bd and the TSO split header bd
819            since they have no mapping */
820         if (nbd)
821                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
822
823         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
824                                            ETH_TX_BD_FLAGS_TCP_CSUM |
825                                            ETH_TX_BD_FLAGS_SW_LSO)) {
826                 if (--nbd)
827                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
828                 tx_bd = &fp->tx_desc_ring[bd_idx];
829                 /* is this a TSO split header bd? */
830                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
831                         if (--nbd)
832                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
833                 }
834         }
835
836         /* now free frags */
837         while (nbd > 0) {
838
839                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
840                 tx_bd = &fp->tx_desc_ring[bd_idx];
841                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
842                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
843                 if (--nbd)
844                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
845         }
846
847         /* release skb */
848         WARN_ON(!skb);
849         dev_kfree_skb(skb);
850         tx_buf->first_bd = 0;
851         tx_buf->skb = NULL;
852
853         return new_cons;
854 }
855
856 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
857 {
858         s16 used;
859         u16 prod;
860         u16 cons;
861
862         barrier(); /* Tell compiler that prod and cons can change */
863         prod = fp->tx_bd_prod;
864         cons = fp->tx_bd_cons;
865
866         /* NUM_TX_RINGS = number of "next-page" entries
867            It will be used as a threshold */
868         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
869
870 #ifdef BNX2X_STOP_ON_ERROR
871         WARN_ON(used < 0);
872         WARN_ON(used > fp->bp->tx_ring_size);
873         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
874 #endif
875
876         return (s16)(fp->bp->tx_ring_size) - used;
877 }
878
879 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
880 {
881         struct bnx2x *bp = fp->bp;
882         struct netdev_queue *txq;
883         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
884         int done = 0;
885
886 #ifdef BNX2X_STOP_ON_ERROR
887         if (unlikely(bp->panic))
888                 return;
889 #endif
890
891         txq = netdev_get_tx_queue(bp->dev, fp->index);
892         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
893         sw_cons = fp->tx_pkt_cons;
894
895         while (sw_cons != hw_cons) {
896                 u16 pkt_cons;
897
898                 pkt_cons = TX_BD(sw_cons);
899
900                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
901
902                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
903                    hw_cons, sw_cons, pkt_cons);
904
905 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
906                         rmb();
907                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
908                 }
909 */
910                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
911                 sw_cons++;
912                 done++;
913         }
914
915         fp->tx_pkt_cons = sw_cons;
916         fp->tx_bd_cons = bd_cons;
917
918         /* TBD need a thresh? */
919         if (unlikely(netif_tx_queue_stopped(txq))) {
920
921                 __netif_tx_lock(txq, smp_processor_id());
922
923                 /* Need to make the tx_bd_cons update visible to start_xmit()
924                  * before checking for netif_tx_queue_stopped().  Without the
925                  * memory barrier, there is a small possibility that
926                  * start_xmit() will miss it and cause the queue to be stopped
927                  * forever.
928                  */
929                 smp_mb();
930
931                 if ((netif_tx_queue_stopped(txq)) &&
932                     (bp->state == BNX2X_STATE_OPEN) &&
933                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934                         netif_tx_wake_queue(txq);
935
936                 __netif_tx_unlock(txq);
937         }
938 }
939
940
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942                            union eth_rx_cqe *rr_cqe)
943 {
944         struct bnx2x *bp = fp->bp;
945         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
948         DP(BNX2X_MSG_SP,
949            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
950            fp->index, cid, command, bp->state,
951            rr_cqe->ramrod_cqe.ramrod_type);
952
953         bp->spq_left++;
954
955         if (fp->index) {
956                 switch (command | fp->state) {
957                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958                                                 BNX2X_FP_STATE_OPENING):
959                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960                            cid);
961                         fp->state = BNX2X_FP_STATE_OPEN;
962                         break;
963
964                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966                            cid);
967                         fp->state = BNX2X_FP_STATE_HALTED;
968                         break;
969
970                 default:
971                         BNX2X_ERR("unexpected MC reply (%d)  "
972                                   "fp->state is %x\n", command, fp->state);
973                         break;
974                 }
975                 mb(); /* force bnx2x_wait_ramrod() to see the change */
976                 return;
977         }
978
979         switch (command | bp->state) {
980         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982                 bp->state = BNX2X_STATE_OPEN;
983                 break;
984
985         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988                 fp->state = BNX2X_FP_STATE_HALTED;
989                 break;
990
991         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
994                 break;
995
996
997         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000                 bp->set_mac_pending = 0;
1001                 break;
1002
1003         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1005                 break;
1006
1007         default:
1008                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1009                           command, bp->state);
1010                 break;
1011         }
1012         mb(); /* force bnx2x_wait_ramrod() to see the change */
1013 }
1014
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016                                      struct bnx2x_fastpath *fp, u16 index)
1017 {
1018         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019         struct page *page = sw_buf->page;
1020         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022         /* Skip "next page" elements */
1023         if (!page)
1024                 return;
1025
1026         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028         __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030         sw_buf->page = NULL;
1031         sge->addr_hi = 0;
1032         sge->addr_lo = 0;
1033 }
1034
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036                                            struct bnx2x_fastpath *fp, int last)
1037 {
1038         int i;
1039
1040         for (i = 0; i < last; i++)
1041                 bnx2x_free_rx_sge(bp, fp, i);
1042 }
1043
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045                                      struct bnx2x_fastpath *fp, u16 index)
1046 {
1047         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050         dma_addr_t mapping;
1051
1052         if (unlikely(page == NULL))
1053                 return -ENOMEM;
1054
1055         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056                                PCI_DMA_FROMDEVICE);
1057         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059                 return -ENOMEM;
1060         }
1061
1062         sw_buf->page = page;
1063         pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068         return 0;
1069 }
1070
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072                                      struct bnx2x_fastpath *fp, u16 index)
1073 {
1074         struct sk_buff *skb;
1075         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077         dma_addr_t mapping;
1078
1079         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080         if (unlikely(skb == NULL))
1081                 return -ENOMEM;
1082
1083         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084                                  PCI_DMA_FROMDEVICE);
1085         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1086                 dev_kfree_skb(skb);
1087                 return -ENOMEM;
1088         }
1089
1090         rx_buf->skb = skb;
1091         pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096         return 0;
1097 }
1098
1099 /* note that we are not allocating a new skb,
1100  * we are just moving one from cons to prod
1101  * we are not creating a new mapping,
1102  * so there is no need to check for dma_mapping_error().
1103  */
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105                                struct sk_buff *skb, u16 cons, u16 prod)
1106 {
1107         struct bnx2x *bp = fp->bp;
1108         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113         pci_dma_sync_single_for_device(bp->pdev,
1114                                        pci_unmap_addr(cons_rx_buf, mapping),
1115                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1116
1117         prod_rx_buf->skb = cons_rx_buf->skb;
1118         pci_unmap_addr_set(prod_rx_buf, mapping,
1119                            pci_unmap_addr(cons_rx_buf, mapping));
1120         *prod_bd = *cons_bd;
1121 }
1122
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124                                              u16 idx)
1125 {
1126         u16 last_max = fp->last_max_sge;
1127
1128         if (SUB_S16(idx, last_max) > 0)
1129                 fp->last_max_sge = idx;
1130 }
1131
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133 {
1134         int i, j;
1135
1136         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137                 int idx = RX_SGE_CNT * i - 1;
1138
1139                 for (j = 0; j < 2; j++) {
1140                         SGE_MASK_CLEAR_BIT(fp, idx);
1141                         idx--;
1142                 }
1143         }
1144 }
1145
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147                                   struct eth_fast_path_rx_cqe *fp_cqe)
1148 {
1149         struct bnx2x *bp = fp->bp;
1150         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1152                       SGE_PAGE_SHIFT;
1153         u16 last_max, last_elem, first_elem;
1154         u16 delta = 0;
1155         u16 i;
1156
1157         if (!sge_len)
1158                 return;
1159
1160         /* First mark all used pages */
1161         for (i = 0; i < sge_len; i++)
1162                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167         /* Here we assume that the last SGE index is the biggest */
1168         prefetch((void *)(fp->sge_mask));
1169         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171         last_max = RX_SGE(fp->last_max_sge);
1172         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175         /* If ring is not full */
1176         if (last_elem + 1 != first_elem)
1177                 last_elem++;
1178
1179         /* Now update the prod */
1180         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181                 if (likely(fp->sge_mask[i]))
1182                         break;
1183
1184                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185                 delta += RX_SGE_MASK_ELEM_SZ;
1186         }
1187
1188         if (delta > 0) {
1189                 fp->rx_sge_prod += delta;
1190                 /* clear page-end entries */
1191                 bnx2x_clear_sge_mask_next_elems(fp);
1192         }
1193
1194         DP(NETIF_MSG_RX_STATUS,
1195            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1196            fp->last_max_sge, fp->rx_sge_prod);
1197 }
1198
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200 {
1201         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202         memset(fp->sge_mask, 0xff,
1203                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
1205         /* Clear the two last indices in the page to 1:
1206            these are the indices that correspond to the "next" element,
1207            hence will never be indicated and should be removed from
1208            the calculations. */
1209         bnx2x_clear_sge_mask_next_elems(fp);
1210 }
1211
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213                             struct sk_buff *skb, u16 cons, u16 prod)
1214 {
1215         struct bnx2x *bp = fp->bp;
1216         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219         dma_addr_t mapping;
1220
1221         /* move empty skb from pool to prod and map it */
1222         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227         /* move partial skb from cons to pool (don't unmap yet) */
1228         fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230         /* mark bin state as start - print error if current state != stop */
1231         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234         fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236         /* point prod_bd to new skb */
1237         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240 #ifdef BNX2X_STOP_ON_ERROR
1241         fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244 #else
1245         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246 #endif
1247            fp->tpa_queue_used);
1248 #endif
1249 }
1250
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252                                struct sk_buff *skb,
1253                                struct eth_fast_path_rx_cqe *fp_cqe,
1254                                u16 cqe_idx)
1255 {
1256         struct sw_rx_page *rx_pg, old_rx_pg;
1257         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258         u32 i, frag_len, frag_size, pages;
1259         int err;
1260         int j;
1261
1262         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1264
1265         /* This is needed in order to enable forwarding support */
1266         if (frag_size)
1267                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268                                                max(frag_size, (u32)len_on_bd));
1269
1270 #ifdef BNX2X_STOP_ON_ERROR
1271         if (pages >
1272             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274                           pages, cqe_idx);
1275                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1276                           fp_cqe->pkt_len, len_on_bd);
1277                 bnx2x_panic();
1278                 return -EINVAL;
1279         }
1280 #endif
1281
1282         /* Run through the SGL and compose the fragmented skb */
1283         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286                 /* FW gives the indices of the SGE as if the ring is an array
1287                    (meaning that "next" element will consume 2 indices) */
1288                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289                 rx_pg = &fp->rx_page_ring[sge_idx];
1290                 old_rx_pg = *rx_pg;
1291
1292                 /* If we fail to allocate a substitute page, we simply stop
1293                    where we are and drop the whole packet */
1294                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295                 if (unlikely(err)) {
1296                         fp->eth_q_stats.rx_skb_alloc_failed++;
1297                         return err;
1298                 }
1299
1300                 /* Unmap the page as we r going to pass it to the stack */
1301                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1303
1304                 /* Add one frag and update the appropriate fields in the skb */
1305                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307                 skb->data_len += frag_len;
1308                 skb->truesize += frag_len;
1309                 skb->len += frag_len;
1310
1311                 frag_size -= frag_len;
1312         }
1313
1314         return 0;
1315 }
1316
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319                            u16 cqe_idx)
1320 {
1321         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322         struct sk_buff *skb = rx_buf->skb;
1323         /* alloc new skb */
1324         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326         /* Unmap skb in the pool anyway, as we are going to change
1327            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328            fails. */
1329         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1331
1332         if (likely(new_skb)) {
1333                 /* fix ip xsum and give it to the stack */
1334                 /* (no need to map the new skb) */
1335 #ifdef BCM_VLAN
1336                 int is_vlan_cqe =
1337                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338                          PARSING_FLAGS_VLAN);
1339                 int is_not_hwaccel_vlan_cqe =
1340                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341 #endif
1342
1343                 prefetch(skb);
1344                 prefetch(((char *)(skb)) + 128);
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347                 if (pad + len > bp->rx_buf_size) {
1348                         BNX2X_ERR("skb_put is about to fail...  "
1349                                   "pad %d  len %d  rx_buf_size %d\n",
1350                                   pad, len, bp->rx_buf_size);
1351                         bnx2x_panic();
1352                         return;
1353                 }
1354 #endif
1355
1356                 skb_reserve(skb, pad);
1357                 skb_put(skb, len);
1358
1359                 skb->protocol = eth_type_trans(skb, bp->dev);
1360                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362                 {
1363                         struct iphdr *iph;
1364
1365                         iph = (struct iphdr *)skb->data;
1366 #ifdef BCM_VLAN
1367                         /* If there is no Rx VLAN offloading -
1368                            take VLAN tag into an account */
1369                         if (unlikely(is_not_hwaccel_vlan_cqe))
1370                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371 #endif
1372                         iph->check = 0;
1373                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374                 }
1375
1376                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377                                          &cqe->fast_path_cqe, cqe_idx)) {
1378 #ifdef BCM_VLAN
1379                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380                             (!is_not_hwaccel_vlan_cqe))
1381                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382                                                 le16_to_cpu(cqe->fast_path_cqe.
1383                                                             vlan_tag));
1384                         else
1385 #endif
1386                                 netif_receive_skb(skb);
1387                 } else {
1388                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389                            " - dropping packet!\n");
1390                         dev_kfree_skb(skb);
1391                 }
1392
1393
1394                 /* put new skb in bin */
1395                 fp->tpa_pool[queue].skb = new_skb;
1396
1397         } else {
1398                 /* else drop the packet and keep the buffer in the bin */
1399                 DP(NETIF_MSG_RX_STATUS,
1400                    "Failed to allocate new skb - dropping packet!\n");
1401                 fp->eth_q_stats.rx_skb_alloc_failed++;
1402         }
1403
1404         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405 }
1406
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408                                         struct bnx2x_fastpath *fp,
1409                                         u16 bd_prod, u16 rx_comp_prod,
1410                                         u16 rx_sge_prod)
1411 {
1412         struct ustorm_eth_rx_producers rx_prods = {0};
1413         int i;
1414
1415         /* Update producers */
1416         rx_prods.bd_prod = bd_prod;
1417         rx_prods.cqe_prod = rx_comp_prod;
1418         rx_prods.sge_prod = rx_sge_prod;
1419
1420         /*
1421          * Make sure that the BD and SGE data is updated before updating the
1422          * producers since FW might read the BD/SGE right after the producer
1423          * is updated.
1424          * This is only applicable for weak-ordered memory model archs such
1425          * as IA-64. The following barrier is also mandatory since FW will
1426          * assumes BDs must have buffers.
1427          */
1428         wmb();
1429
1430         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431                 REG_WR(bp, BAR_USTRORM_INTMEM +
1432                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433                        ((u32 *)&rx_prods)[i]);
1434
1435         mmiowb(); /* keep prod updates ordered */
1436
1437         DP(NETIF_MSG_RX_STATUS,
1438            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1439            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1440 }
1441
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443 {
1444         struct bnx2x *bp = fp->bp;
1445         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447         int rx_pkt = 0;
1448
1449 #ifdef BNX2X_STOP_ON_ERROR
1450         if (unlikely(bp->panic))
1451                 return 0;
1452 #endif
1453
1454         /* CQ "next element" is of the size of the regular element,
1455            that's why it's ok here */
1456         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458                 hw_comp_cons++;
1459
1460         bd_cons = fp->rx_bd_cons;
1461         bd_prod = fp->rx_bd_prod;
1462         bd_prod_fw = bd_prod;
1463         sw_comp_cons = fp->rx_comp_cons;
1464         sw_comp_prod = fp->rx_comp_prod;
1465
1466         /* Memory barrier necessary as speculative reads of the rx
1467          * buffer can be ahead of the index in the status block
1468          */
1469         rmb();
1470
1471         DP(NETIF_MSG_RX_STATUS,
1472            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1473            fp->index, hw_comp_cons, sw_comp_cons);
1474
1475         while (sw_comp_cons != hw_comp_cons) {
1476                 struct sw_rx_bd *rx_buf = NULL;
1477                 struct sk_buff *skb;
1478                 union eth_rx_cqe *cqe;
1479                 u8 cqe_fp_flags;
1480                 u16 len, pad;
1481
1482                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483                 bd_prod = RX_BD(bd_prod);
1484                 bd_cons = RX_BD(bd_cons);
1485
1486                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1488
1489                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1490                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1491                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1495
1496                 /* is this a slowpath msg? */
1497                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498                         bnx2x_sp_event(fp, cqe);
1499                         goto next_cqe;
1500
1501                 /* this is an rx packet */
1502                 } else {
1503                         rx_buf = &fp->rx_buf_ring[bd_cons];
1504                         skb = rx_buf->skb;
1505                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506                         pad = cqe->fast_path_cqe.placement_offset;
1507
1508                         /* If CQE is marked both TPA_START and TPA_END
1509                            it is a non-TPA CQE */
1510                         if ((!fp->disable_tpa) &&
1511                             (TPA_TYPE(cqe_fp_flags) !=
1512                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1513                                 u16 queue = cqe->fast_path_cqe.queue_index;
1514
1515                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516                                         DP(NETIF_MSG_RX_STATUS,
1517                                            "calling tpa_start on queue %d\n",
1518                                            queue);
1519
1520                                         bnx2x_tpa_start(fp, queue, skb,
1521                                                         bd_cons, bd_prod);
1522                                         goto next_rx;
1523                                 }
1524
1525                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526                                         DP(NETIF_MSG_RX_STATUS,
1527                                            "calling tpa_stop on queue %d\n",
1528                                            queue);
1529
1530                                         if (!BNX2X_RX_SUM_FIX(cqe))
1531                                                 BNX2X_ERR("STOP on none TCP "
1532                                                           "data\n");
1533
1534                                         /* This is a size of the linear data
1535                                            on this skb */
1536                                         len = le16_to_cpu(cqe->fast_path_cqe.
1537                                                                 len_on_bd);
1538                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1539                                                     len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1541                                         if (bp->panic)
1542                                                 return -EINVAL;
1543 #endif
1544
1545                                         bnx2x_update_sge_prod(fp,
1546                                                         &cqe->fast_path_cqe);
1547                                         goto next_cqe;
1548                                 }
1549                         }
1550
1551                         pci_dma_sync_single_for_device(bp->pdev,
1552                                         pci_unmap_addr(rx_buf, mapping),
1553                                                        pad + RX_COPY_THRESH,
1554                                                        PCI_DMA_FROMDEVICE);
1555                         prefetch(skb);
1556                         prefetch(((char *)(skb)) + 128);
1557
1558                         /* is this an error packet? */
1559                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560                                 DP(NETIF_MSG_RX_ERR,
1561                                    "ERROR  flags %x  rx packet %u\n",
1562                                    cqe_fp_flags, sw_comp_cons);
1563                                 fp->eth_q_stats.rx_err_discard_pkt++;
1564                                 goto reuse_rx;
1565                         }
1566
1567                         /* Since we don't have a jumbo ring
1568                          * copy small packets if mtu > 1500
1569                          */
1570                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571                             (len <= RX_COPY_THRESH)) {
1572                                 struct sk_buff *new_skb;
1573
1574                                 new_skb = netdev_alloc_skb(bp->dev,
1575                                                            len + pad);
1576                                 if (new_skb == NULL) {
1577                                         DP(NETIF_MSG_RX_ERR,
1578                                            "ERROR  packet dropped "
1579                                            "because of alloc failure\n");
1580                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1581                                         goto reuse_rx;
1582                                 }
1583
1584                                 /* aligned copy */
1585                                 skb_copy_from_linear_data_offset(skb, pad,
1586                                                     new_skb->data + pad, len);
1587                                 skb_reserve(new_skb, pad);
1588                                 skb_put(new_skb, len);
1589
1590                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592                                 skb = new_skb;
1593
1594                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595                                 pci_unmap_single(bp->pdev,
1596                                         pci_unmap_addr(rx_buf, mapping),
1597                                                  bp->rx_buf_size,
1598                                                  PCI_DMA_FROMDEVICE);
1599                                 skb_reserve(skb, pad);
1600                                 skb_put(skb, len);
1601
1602                         } else {
1603                                 DP(NETIF_MSG_RX_ERR,
1604                                    "ERROR  packet dropped because "
1605                                    "of alloc failure\n");
1606                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 reuse_rx:
1608                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609                                 goto next_rx;
1610                         }
1611
1612                         skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614                         skb->ip_summed = CHECKSUM_NONE;
1615                         if (bp->rx_csum) {
1616                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1618                                 else
1619                                         fp->eth_q_stats.hw_csum_err++;
1620                         }
1621                 }
1622
1623                 skb_record_rx_queue(skb, fp->index);
1624 #ifdef BCM_VLAN
1625                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627                      PARSING_FLAGS_VLAN))
1628                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630                 else
1631 #endif
1632                         netif_receive_skb(skb);
1633
1634
1635 next_rx:
1636                 rx_buf->skb = NULL;
1637
1638                 bd_cons = NEXT_RX_IDX(bd_cons);
1639                 bd_prod = NEXT_RX_IDX(bd_prod);
1640                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641                 rx_pkt++;
1642 next_cqe:
1643                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1645
1646                 if (rx_pkt == budget)
1647                         break;
1648         } /* while */
1649
1650         fp->rx_bd_cons = bd_cons;
1651         fp->rx_bd_prod = bd_prod_fw;
1652         fp->rx_comp_cons = sw_comp_cons;
1653         fp->rx_comp_prod = sw_comp_prod;
1654
1655         /* Update producers */
1656         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657                              fp->rx_sge_prod);
1658
1659         fp->rx_pkt += rx_pkt;
1660         fp->rx_calls++;
1661
1662         return rx_pkt;
1663 }
1664
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666 {
1667         struct bnx2x_fastpath *fp = fp_cookie;
1668         struct bnx2x *bp = fp->bp;
1669         int index = fp->index;
1670
1671         /* Return here if interrupt is disabled */
1672         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674                 return IRQ_HANDLED;
1675         }
1676
1677         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1678            index, fp->sb_id);
1679         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1680
1681 #ifdef BNX2X_STOP_ON_ERROR
1682         if (unlikely(bp->panic))
1683                 return IRQ_HANDLED;
1684 #endif
1685
1686         prefetch(fp->rx_cons_sb);
1687         prefetch(fp->tx_cons_sb);
1688         prefetch(&fp->status_blk->c_status_block.status_block_index);
1689         prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
1691         napi_schedule(&bnx2x_fp(bp, index, napi));
1692
1693         return IRQ_HANDLED;
1694 }
1695
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697 {
1698         struct bnx2x *bp = netdev_priv(dev_instance);
1699         u16 status = bnx2x_ack_int(bp);
1700         u16 mask;
1701
1702         /* Return here if interrupt is shared and it's not for us */
1703         if (unlikely(status == 0)) {
1704                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705                 return IRQ_NONE;
1706         }
1707         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1708
1709         /* Return here if interrupt is disabled */
1710         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712                 return IRQ_HANDLED;
1713         }
1714
1715 #ifdef BNX2X_STOP_ON_ERROR
1716         if (unlikely(bp->panic))
1717                 return IRQ_HANDLED;
1718 #endif
1719
1720         mask = 0x2 << bp->fp[0].sb_id;
1721         if (status & mask) {
1722                 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724                 prefetch(fp->rx_cons_sb);
1725                 prefetch(fp->tx_cons_sb);
1726                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
1729                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1730
1731                 status &= ~mask;
1732         }
1733
1734
1735         if (unlikely(status & 0x1)) {
1736                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1737
1738                 status &= ~0x1;
1739                 if (!status)
1740                         return IRQ_HANDLED;
1741         }
1742
1743         if (status)
1744                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745                    status);
1746
1747         return IRQ_HANDLED;
1748 }
1749
1750 /* end of fast path */
1751
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1753
1754 /* Link */
1755
1756 /*
1757  * General service functions
1758  */
1759
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1761 {
1762         u32 lock_status;
1763         u32 resource_bit = (1 << resource);
1764         int func = BP_FUNC(bp);
1765         u32 hw_lock_control_reg;
1766         int cnt;
1767
1768         /* Validating that the resource is within range */
1769         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770                 DP(NETIF_MSG_HW,
1771                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773                 return -EINVAL;
1774         }
1775
1776         if (func <= 5) {
1777                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778         } else {
1779                 hw_lock_control_reg =
1780                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781         }
1782
1783         /* Validating that the resource is not already taken */
1784         lock_status = REG_RD(bp, hw_lock_control_reg);
1785         if (lock_status & resource_bit) {
1786                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1787                    lock_status, resource_bit);
1788                 return -EEXIST;
1789         }
1790
1791         /* Try for 5 second every 5ms */
1792         for (cnt = 0; cnt < 1000; cnt++) {
1793                 /* Try to acquire the lock */
1794                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795                 lock_status = REG_RD(bp, hw_lock_control_reg);
1796                 if (lock_status & resource_bit)
1797                         return 0;
1798
1799                 msleep(5);
1800         }
1801         DP(NETIF_MSG_HW, "Timeout\n");
1802         return -EAGAIN;
1803 }
1804
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1806 {
1807         u32 lock_status;
1808         u32 resource_bit = (1 << resource);
1809         int func = BP_FUNC(bp);
1810         u32 hw_lock_control_reg;
1811
1812         /* Validating that the resource is within range */
1813         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814                 DP(NETIF_MSG_HW,
1815                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817                 return -EINVAL;
1818         }
1819
1820         if (func <= 5) {
1821                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822         } else {
1823                 hw_lock_control_reg =
1824                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825         }
1826
1827         /* Validating that the resource is currently taken */
1828         lock_status = REG_RD(bp, hw_lock_control_reg);
1829         if (!(lock_status & resource_bit)) {
1830                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1831                    lock_status, resource_bit);
1832                 return -EFAULT;
1833         }
1834
1835         REG_WR(bp, hw_lock_control_reg, resource_bit);
1836         return 0;
1837 }
1838
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1841 {
1842         mutex_lock(&bp->port.phy_mutex);
1843
1844         if (bp->port.need_hw_lock)
1845                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1846 }
1847
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1849 {
1850         if (bp->port.need_hw_lock)
1851                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1852
1853         mutex_unlock(&bp->port.phy_mutex);
1854 }
1855
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857 {
1858         /* The GPIO should be swapped if swap register is set and active */
1859         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861         int gpio_shift = gpio_num +
1862                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863         u32 gpio_mask = (1 << gpio_shift);
1864         u32 gpio_reg;
1865         int value;
1866
1867         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869                 return -EINVAL;
1870         }
1871
1872         /* read GPIO value */
1873         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875         /* get the requested pin value */
1876         if ((gpio_reg & gpio_mask) == gpio_mask)
1877                 value = 1;
1878         else
1879                 value = 0;
1880
1881         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1882
1883         return value;
1884 }
1885
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1887 {
1888         /* The GPIO should be swapped if swap register is set and active */
1889         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891         int gpio_shift = gpio_num +
1892                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893         u32 gpio_mask = (1 << gpio_shift);
1894         u32 gpio_reg;
1895
1896         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898                 return -EINVAL;
1899         }
1900
1901         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902         /* read GPIO and mask except the float bits */
1903         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1904
1905         switch (mode) {
1906         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908                    gpio_num, gpio_shift);
1909                 /* clear FLOAT and set CLR */
1910                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912                 break;
1913
1914         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916                    gpio_num, gpio_shift);
1917                 /* clear FLOAT and set SET */
1918                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920                 break;
1921
1922         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924                    gpio_num, gpio_shift);
1925                 /* set FLOAT */
1926                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927                 break;
1928
1929         default:
1930                 break;
1931         }
1932
1933         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1935
1936         return 0;
1937 }
1938
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940 {
1941         /* The GPIO should be swapped if swap register is set and active */
1942         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944         int gpio_shift = gpio_num +
1945                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946         u32 gpio_mask = (1 << gpio_shift);
1947         u32 gpio_reg;
1948
1949         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951                 return -EINVAL;
1952         }
1953
1954         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955         /* read GPIO int */
1956         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958         switch (mode) {
1959         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961                                    "output low\n", gpio_num, gpio_shift);
1962                 /* clear SET and set CLR */
1963                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965                 break;
1966
1967         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969                                    "output high\n", gpio_num, gpio_shift);
1970                 /* clear CLR and set SET */
1971                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973                 break;
1974
1975         default:
1976                 break;
1977         }
1978
1979         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982         return 0;
1983 }
1984
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1986 {
1987         u32 spio_mask = (1 << spio_num);
1988         u32 spio_reg;
1989
1990         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991             (spio_num > MISC_REGISTERS_SPIO_7)) {
1992                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993                 return -EINVAL;
1994         }
1995
1996         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997         /* read SPIO and mask except the float bits */
1998         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1999
2000         switch (mode) {
2001         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003                 /* clear FLOAT and set CLR */
2004                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006                 break;
2007
2008         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010                 /* clear FLOAT and set SET */
2011                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017                 /* set FLOAT */
2018                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019                 break;
2020
2021         default:
2022                 break;
2023         }
2024
2025         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2027
2028         return 0;
2029 }
2030
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2032 {
2033         switch (bp->link_vars.ieee_fc &
2034                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2037                                           ADVERTISED_Pause);
2038                 break;
2039
2040         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2041                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2042                                          ADVERTISED_Pause);
2043                 break;
2044
2045         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2046                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2047                 break;
2048
2049         default:
2050                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2051                                           ADVERTISED_Pause);
2052                 break;
2053         }
2054 }
2055
2056 static void bnx2x_link_report(struct bnx2x *bp)
2057 {
2058         if (bp->link_vars.link_up) {
2059                 if (bp->state == BNX2X_STATE_OPEN)
2060                         netif_carrier_on(bp->dev);
2061                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2062
2063                 printk("%d Mbps ", bp->link_vars.line_speed);
2064
2065                 if (bp->link_vars.duplex == DUPLEX_FULL)
2066                         printk("full duplex");
2067                 else
2068                         printk("half duplex");
2069
2070                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2072                                 printk(", receive ");
2073                                 if (bp->link_vars.flow_ctrl &
2074                                     BNX2X_FLOW_CTRL_TX)
2075                                         printk("& transmit ");
2076                         } else {
2077                                 printk(", transmit ");
2078                         }
2079                         printk("flow control ON");
2080                 }
2081                 printk("\n");
2082
2083         } else { /* link_down */
2084                 netif_carrier_off(bp->dev);
2085                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2086         }
2087 }
2088
2089 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2090 {
2091         if (!BP_NOMCP(bp)) {
2092                 u8 rc;
2093
2094                 /* Initialize link parameters structure variables */
2095                 /* It is recommended to turn off RX FC for jumbo frames
2096                    for better performance */
2097                 if (IS_E1HMF(bp))
2098                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099                 else if (bp->dev->mtu > 5000)
2100                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2101                 else
2102                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2103
2104                 bnx2x_acquire_phy_lock(bp);
2105
2106                 if (load_mode == LOAD_DIAG)
2107                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2108
2109                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2110
2111                 bnx2x_release_phy_lock(bp);
2112
2113                 bnx2x_calc_fc_adv(bp);
2114
2115                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2117                         bnx2x_link_report(bp);
2118                 }
2119
2120                 return rc;
2121         }
2122         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2123         return -EINVAL;
2124 }
2125
2126 static void bnx2x_link_set(struct bnx2x *bp)
2127 {
2128         if (!BP_NOMCP(bp)) {
2129                 bnx2x_acquire_phy_lock(bp);
2130                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131                 bnx2x_release_phy_lock(bp);
2132
2133                 bnx2x_calc_fc_adv(bp);
2134         } else
2135                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2136 }
2137
2138 static void bnx2x__link_reset(struct bnx2x *bp)
2139 {
2140         if (!BP_NOMCP(bp)) {
2141                 bnx2x_acquire_phy_lock(bp);
2142                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2143                 bnx2x_release_phy_lock(bp);
2144         } else
2145                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2146 }
2147
2148 static u8 bnx2x_link_test(struct bnx2x *bp)
2149 {
2150         u8 rc;
2151
2152         bnx2x_acquire_phy_lock(bp);
2153         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2154         bnx2x_release_phy_lock(bp);
2155
2156         return rc;
2157 }
2158
2159 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2160 {
2161         u32 r_param = bp->link_vars.line_speed / 8;
2162         u32 fair_periodic_timeout_usec;
2163         u32 t_fair;
2164
2165         memset(&(bp->cmng.rs_vars), 0,
2166                sizeof(struct rate_shaping_vars_per_port));
2167         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2168
2169         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2171
2172         /* this is the threshold below which no timer arming will occur
2173            1.25 coefficient is for the threshold to be a little bigger
2174            than the real time, to compensate for timer in-accuracy */
2175         bp->cmng.rs_vars.rs_threshold =
2176                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2177
2178         /* resolution of fairness timer */
2179         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2182
2183         /* this is the threshold below which we won't arm the timer anymore */
2184         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2185
2186         /* we multiply by 1e3/8 to get bytes/msec.
2187            We don't want the credits to pass a credit
2188            of the t_fair*FAIR_MEM (algorithm resolution) */
2189         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190         /* since each tick is 4 usec */
2191         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2192 }
2193
2194 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2195 {
2196         struct rate_shaping_vars_per_vn m_rs_vn;
2197         struct fairness_vars_per_vn m_fair_vn;
2198         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199         u16 vn_min_rate, vn_max_rate;
2200         int i;
2201
2202         /* If function is hidden - set min and max to zeroes */
2203         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2204                 vn_min_rate = 0;
2205                 vn_max_rate = 0;
2206
2207         } else {
2208                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2210                 /* If fairness is enabled (not all min rates are zeroes) and
2211                    if current min rate is zero - set it to 1.
2212                    This is a requirement of the algorithm. */
2213                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2214                         vn_min_rate = DEF_MIN_RATE;
2215                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2217         }
2218
2219         DP(NETIF_MSG_IFUP,
2220            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2221            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2222
2223         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2225
2226         /* global vn counter - maximal Mbps for this vn */
2227         m_rs_vn.vn_counter.rate = vn_max_rate;
2228
2229         /* quota - number of bytes transmitted in this period */
2230         m_rs_vn.vn_counter.quota =
2231                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2232
2233         if (bp->vn_weight_sum) {
2234                 /* credit for each period of the fairness algorithm:
2235                    number of bytes in T_FAIR (the vn share the port rate).
2236                    vn_weight_sum should not be larger than 10000, thus
2237                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2238                    than zero */
2239                 m_fair_vn.vn_credit_delta =
2240                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2241                                                  (8 * bp->vn_weight_sum))),
2242                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2243                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244                    m_fair_vn.vn_credit_delta);
2245         }
2246
2247         /* Store it to internal memory */
2248         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251                        ((u32 *)(&m_rs_vn))[i]);
2252
2253         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256                        ((u32 *)(&m_fair_vn))[i]);
2257 }
2258
2259
2260 /* This function is called upon link interrupt */
2261 static void bnx2x_link_attn(struct bnx2x *bp)
2262 {
2263         /* Make sure that we are synced with the current statistics */
2264         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
2266         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267
2268         if (bp->link_vars.link_up) {
2269
2270                 /* dropless flow control */
2271                 if (CHIP_IS_E1H(bp)) {
2272                         int port = BP_PORT(bp);
2273                         u32 pause_enabled = 0;
2274
2275                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2276                                 pause_enabled = 1;
2277
2278                         REG_WR(bp, BAR_USTRORM_INTMEM +
2279                                USTORM_PAUSE_ENABLED_OFFSET(port),
2280                                pause_enabled);
2281                 }
2282
2283                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284                         struct host_port_stats *pstats;
2285
2286                         pstats = bnx2x_sp(bp, port_stats);
2287                         /* reset old bmac stats */
2288                         memset(&(pstats->mac_stx[0]), 0,
2289                                sizeof(struct mac_stx));
2290                 }
2291                 if ((bp->state == BNX2X_STATE_OPEN) ||
2292                     (bp->state == BNX2X_STATE_DISABLED))
2293                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2294         }
2295
2296         /* indicate link status */
2297         bnx2x_link_report(bp);
2298
2299         if (IS_E1HMF(bp)) {
2300                 int port = BP_PORT(bp);
2301                 int func;
2302                 int vn;
2303
2304                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305                         if (vn == BP_E1HVN(bp))
2306                                 continue;
2307
2308                         func = ((vn << 1) | port);
2309
2310                         /* Set the attention towards other drivers
2311                            on the same port */
2312                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2314                 }
2315
2316                 if (bp->link_vars.link_up) {
2317                         int i;
2318
2319                         /* Init rate shaping and fairness contexts */
2320                         bnx2x_init_port_minmax(bp);
2321
2322                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2323                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2324
2325                         /* Store it to internal memory */
2326                         for (i = 0;
2327                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330                                        ((u32 *)(&bp->cmng))[i]);
2331                 }
2332         }
2333 }
2334
2335 static void bnx2x__link_status_update(struct bnx2x *bp)
2336 {
2337         if (bp->state != BNX2X_STATE_OPEN)
2338                 return;
2339
2340         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2341
2342         if (bp->link_vars.link_up)
2343                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2344         else
2345                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2346
2347         /* indicate link status */
2348         bnx2x_link_report(bp);
2349 }
2350
2351 static void bnx2x_pmf_update(struct bnx2x *bp)
2352 {
2353         int port = BP_PORT(bp);
2354         u32 val;
2355
2356         bp->port.pmf = 1;
2357         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2358
2359         /* enable nig attention */
2360         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2363
2364         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2365 }
2366
2367 /* end of Link */
2368
2369 /* slow path */
2370
2371 /*
2372  * General service functions
2373  */
2374
2375 /* the slow path queue is odd since completions arrive on the fastpath ring */
2376 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377                          u32 data_hi, u32 data_lo, int common)
2378 {
2379         int func = BP_FUNC(bp);
2380
2381         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2383            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2386
2387 #ifdef BNX2X_STOP_ON_ERROR
2388         if (unlikely(bp->panic))
2389                 return -EIO;
2390 #endif
2391
2392         spin_lock_bh(&bp->spq_lock);
2393
2394         if (!bp->spq_left) {
2395                 BNX2X_ERR("BUG! SPQ ring full!\n");
2396                 spin_unlock_bh(&bp->spq_lock);
2397                 bnx2x_panic();
2398                 return -EBUSY;
2399         }
2400
2401         /* CID needs port number to be encoded int it */
2402         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2404                                      HW_CID(bp, cid)));
2405         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2406         if (common)
2407                 bp->spq_prod_bd->hdr.type |=
2408                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2409
2410         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2412
2413         bp->spq_left--;
2414
2415         if (bp->spq_prod_bd == bp->spq_last_bd) {
2416                 bp->spq_prod_bd = bp->spq;
2417                 bp->spq_prod_idx = 0;
2418                 DP(NETIF_MSG_TIMER, "end of spq\n");
2419
2420         } else {
2421                 bp->spq_prod_bd++;
2422                 bp->spq_prod_idx++;
2423         }
2424
2425         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2426                bp->spq_prod_idx);
2427
2428         spin_unlock_bh(&bp->spq_lock);
2429         return 0;
2430 }
2431
2432 /* acquire split MCP access lock register */
2433 static int bnx2x_acquire_alr(struct bnx2x *bp)
2434 {
2435         u32 i, j, val;
2436         int rc = 0;
2437
2438         might_sleep();
2439         i = 100;
2440         for (j = 0; j < i*10; j++) {
2441                 val = (1UL << 31);
2442                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444                 if (val & (1L << 31))
2445                         break;
2446
2447                 msleep(5);
2448         }
2449         if (!(val & (1L << 31))) {
2450                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2451                 rc = -EBUSY;
2452         }
2453
2454         return rc;
2455 }
2456
2457 /* release split MCP access lock register */
2458 static void bnx2x_release_alr(struct bnx2x *bp)
2459 {
2460         u32 val = 0;
2461
2462         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2463 }
2464
2465 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2466 {
2467         struct host_def_status_block *def_sb = bp->def_status_blk;
2468         u16 rc = 0;
2469
2470         barrier(); /* status block is written to by the chip */
2471         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2473                 rc |= 1;
2474         }
2475         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2477                 rc |= 2;
2478         }
2479         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2481                 rc |= 4;
2482         }
2483         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2485                 rc |= 8;
2486         }
2487         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2489                 rc |= 16;
2490         }
2491         return rc;
2492 }
2493
2494 /*
2495  * slow path service functions
2496  */
2497
2498 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2499 {
2500         int port = BP_PORT(bp);
2501         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502                        COMMAND_REG_ATTN_BITS_SET);
2503         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2505         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506                                        NIG_REG_MASK_INTERRUPT_PORT0;
2507         u32 aeu_mask;
2508         u32 nig_mask = 0;
2509
2510         if (bp->attn_state & asserted)
2511                 BNX2X_ERR("IGU ERROR\n");
2512
2513         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514         aeu_mask = REG_RD(bp, aeu_addr);
2515
2516         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2517            aeu_mask, asserted);
2518         aeu_mask &= ~(asserted & 0xff);
2519         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2520
2521         REG_WR(bp, aeu_addr, aeu_mask);
2522         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2523
2524         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2525         bp->attn_state |= asserted;
2526         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2527
2528         if (asserted & ATTN_HARD_WIRED_MASK) {
2529                 if (asserted & ATTN_NIG_FOR_FUNC) {
2530
2531                         bnx2x_acquire_phy_lock(bp);
2532
2533                         /* save nig interrupt mask */
2534                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2535                         REG_WR(bp, nig_int_mask_addr, 0);
2536
2537                         bnx2x_link_attn(bp);
2538
2539                         /* handle unicore attn? */
2540                 }
2541                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2543
2544                 if (asserted & GPIO_2_FUNC)
2545                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2546
2547                 if (asserted & GPIO_3_FUNC)
2548                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2549
2550                 if (asserted & GPIO_4_FUNC)
2551                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2552
2553                 if (port == 0) {
2554                         if (asserted & ATTN_GENERAL_ATTN_1) {
2555                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2557                         }
2558                         if (asserted & ATTN_GENERAL_ATTN_2) {
2559                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2561                         }
2562                         if (asserted & ATTN_GENERAL_ATTN_3) {
2563                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2565                         }
2566                 } else {
2567                         if (asserted & ATTN_GENERAL_ATTN_4) {
2568                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2570                         }
2571                         if (asserted & ATTN_GENERAL_ATTN_5) {
2572                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2574                         }
2575                         if (asserted & ATTN_GENERAL_ATTN_6) {
2576                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2578                         }
2579                 }
2580
2581         } /* if hardwired */
2582
2583         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2584            asserted, hc_addr);
2585         REG_WR(bp, hc_addr, asserted);
2586
2587         /* now set back the mask */
2588         if (asserted & ATTN_NIG_FOR_FUNC) {
2589                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2590                 bnx2x_release_phy_lock(bp);
2591         }
2592 }
2593
2594 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2595 {
2596         int port = BP_PORT(bp);
2597         int reg_offset;
2598         u32 val;
2599
2600         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2602
2603         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2604
2605                 val = REG_RD(bp, reg_offset);
2606                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607                 REG_WR(bp, reg_offset, val);
2608
2609                 BNX2X_ERR("SPIO5 hw attention\n");
2610
2611                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2613                         /* Fan failure attention */
2614
2615                         /* The PHY reset is controlled by GPIO 1 */
2616                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2617                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618                         /* Low power mode is controlled by GPIO 2 */
2619                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2620                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2621                         /* mark the failure */
2622                         bp->link_params.ext_phy_config &=
2623                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624                         bp->link_params.ext_phy_config |=
2625                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2626                         SHMEM_WR(bp,
2627                                  dev_info.port_hw_config[port].
2628                                                         external_phy_config,
2629                                  bp->link_params.ext_phy_config);
2630                         /* log the failure */
2631                         printk(KERN_ERR PFX "Fan Failure on Network"
2632                                " Controller %s has caused the driver to"
2633                                " shutdown the card to prevent permanent"
2634                                " damage.  Please contact Dell Support for"
2635                                " assistance\n", bp->dev->name);
2636                         break;
2637
2638                 default:
2639                         break;
2640                 }
2641         }
2642
2643         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645                 bnx2x_acquire_phy_lock(bp);
2646                 bnx2x_handle_module_detect_int(&bp->link_params);
2647                 bnx2x_release_phy_lock(bp);
2648         }
2649
2650         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2651
2652                 val = REG_RD(bp, reg_offset);
2653                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654                 REG_WR(bp, reg_offset, val);
2655
2656                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657                           (attn & HW_INTERRUT_ASSERT_SET_0));
2658                 bnx2x_panic();
2659         }
2660 }
2661
2662 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2663 {
2664         u32 val;
2665
2666         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2667
2668                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670                 /* DORQ discard attention */
2671                 if (val & 0x2)
2672                         BNX2X_ERR("FATAL error from DORQ\n");
2673         }
2674
2675         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2676
2677                 int port = BP_PORT(bp);
2678                 int reg_offset;
2679
2680                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2682
2683                 val = REG_RD(bp, reg_offset);
2684                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685                 REG_WR(bp, reg_offset, val);
2686
2687                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688                           (attn & HW_INTERRUT_ASSERT_SET_1));
2689                 bnx2x_panic();
2690         }
2691 }
2692
2693 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2694 {
2695         u32 val;
2696
2697         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2698
2699                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701                 /* CFC error attention */
2702                 if (val & 0x2)
2703                         BNX2X_ERR("FATAL error from CFC\n");
2704         }
2705
2706         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2707
2708                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710                 /* RQ_USDMDP_FIFO_OVERFLOW */
2711                 if (val & 0x18000)
2712                         BNX2X_ERR("FATAL error from PXP\n");
2713         }
2714
2715         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2716
2717                 int port = BP_PORT(bp);
2718                 int reg_offset;
2719
2720                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2722
2723                 val = REG_RD(bp, reg_offset);
2724                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725                 REG_WR(bp, reg_offset, val);
2726
2727                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728                           (attn & HW_INTERRUT_ASSERT_SET_2));
2729                 bnx2x_panic();
2730         }
2731 }
2732
2733 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2734 {
2735         u32 val;
2736
2737         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2738
2739                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740                         int func = BP_FUNC(bp);
2741
2742                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743                         bnx2x__link_status_update(bp);
2744                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2745                                                         DRV_STATUS_PMF)
2746                                 bnx2x_pmf_update(bp);
2747
2748                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2749
2750                         BNX2X_ERR("MC assert!\n");
2751                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2755                         bnx2x_panic();
2756
2757                 } else if (attn & BNX2X_MCP_ASSERT) {
2758
2759                         BNX2X_ERR("MCP assert!\n");
2760                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2761                         bnx2x_fw_dump(bp);
2762
2763                 } else
2764                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2765         }
2766
2767         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2768                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769                 if (attn & BNX2X_GRC_TIMEOUT) {
2770                         val = CHIP_IS_E1H(bp) ?
2771                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2773                 }
2774                 if (attn & BNX2X_GRC_RSV) {
2775                         val = CHIP_IS_E1H(bp) ?
2776                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2778                 }
2779                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2780         }
2781 }
2782
2783 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2784 {
2785         struct attn_route attn;
2786         struct attn_route group_mask;
2787         int port = BP_PORT(bp);
2788         int index;
2789         u32 reg_addr;
2790         u32 val;
2791         u32 aeu_mask;
2792
2793         /* need to take HW lock because MCP or other port might also
2794            try to handle this event */
2795         bnx2x_acquire_alr(bp);
2796
2797         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2801         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2803
2804         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805                 if (deasserted & (1 << index)) {
2806                         group_mask = bp->attn_group[index];
2807
2808                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809                            index, group_mask.sig[0], group_mask.sig[1],
2810                            group_mask.sig[2], group_mask.sig[3]);
2811
2812                         bnx2x_attn_int_deasserted3(bp,
2813                                         attn.sig[3] & group_mask.sig[3]);
2814                         bnx2x_attn_int_deasserted1(bp,
2815                                         attn.sig[1] & group_mask.sig[1]);
2816                         bnx2x_attn_int_deasserted2(bp,
2817                                         attn.sig[2] & group_mask.sig[2]);
2818                         bnx2x_attn_int_deasserted0(bp,
2819                                         attn.sig[0] & group_mask.sig[0]);
2820
2821                         if ((attn.sig[0] & group_mask.sig[0] &
2822                                                 HW_PRTY_ASSERT_SET_0) ||
2823                             (attn.sig[1] & group_mask.sig[1] &
2824                                                 HW_PRTY_ASSERT_SET_1) ||
2825                             (attn.sig[2] & group_mask.sig[2] &
2826                                                 HW_PRTY_ASSERT_SET_2))
2827                                 BNX2X_ERR("FATAL HW block parity attention\n");
2828                 }
2829         }
2830
2831         bnx2x_release_alr(bp);
2832
2833         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2834
2835         val = ~deasserted;
2836         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2837            val, reg_addr);
2838         REG_WR(bp, reg_addr, val);
2839
2840         if (~bp->attn_state & deasserted)
2841                 BNX2X_ERR("IGU ERROR\n");
2842
2843         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2845
2846         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847         aeu_mask = REG_RD(bp, reg_addr);
2848
2849         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2850            aeu_mask, deasserted);
2851         aeu_mask |= (deasserted & 0xff);
2852         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2853
2854         REG_WR(bp, reg_addr, aeu_mask);
2855         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856
2857         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858         bp->attn_state &= ~deasserted;
2859         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2860 }
2861
2862 static void bnx2x_attn_int(struct bnx2x *bp)
2863 {
2864         /* read local copy of bits */
2865         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866                                                                 attn_bits);
2867         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2868                                                                 attn_bits_ack);
2869         u32 attn_state = bp->attn_state;
2870
2871         /* look for changed bits */
2872         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2873         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2874
2875         DP(NETIF_MSG_HW,
2876            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2877            attn_bits, attn_ack, asserted, deasserted);
2878
2879         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2880                 BNX2X_ERR("BAD attention state\n");
2881
2882         /* handle bits that were raised */
2883         if (asserted)
2884                 bnx2x_attn_int_asserted(bp, asserted);
2885
2886         if (deasserted)
2887                 bnx2x_attn_int_deasserted(bp, deasserted);
2888 }
2889
2890 static void bnx2x_sp_task(struct work_struct *work)
2891 {
2892         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2893         u16 status;
2894
2895
2896         /* Return here if interrupt is disabled */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2899                 return;
2900         }
2901
2902         status = bnx2x_update_dsb_idx(bp);
2903 /*      if (status == 0)                                     */
2904 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2905
2906         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2907
2908         /* HW attentions */
2909         if (status & 0x1)
2910                 bnx2x_attn_int(bp);
2911
2912         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2913                      IGU_INT_NOP, 1);
2914         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2915                      IGU_INT_NOP, 1);
2916         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2917                      IGU_INT_NOP, 1);
2918         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2919                      IGU_INT_NOP, 1);
2920         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2921                      IGU_INT_ENABLE, 1);
2922
2923 }
2924
2925 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2926 {
2927         struct net_device *dev = dev_instance;
2928         struct bnx2x *bp = netdev_priv(dev);
2929
2930         /* Return here if interrupt is disabled */
2931         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2932                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2933                 return IRQ_HANDLED;
2934         }
2935
2936         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2937
2938 #ifdef BNX2X_STOP_ON_ERROR
2939         if (unlikely(bp->panic))
2940                 return IRQ_HANDLED;
2941 #endif
2942
2943         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2944
2945         return IRQ_HANDLED;
2946 }
2947
2948 /* end of slow path */
2949
2950 /* Statistics */
2951
2952 /****************************************************************************
2953 * Macros
2954 ****************************************************************************/
2955
2956 /* sum[hi:lo] += add[hi:lo] */
2957 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2958         do { \
2959                 s_lo += a_lo; \
2960                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2961         } while (0)
2962
2963 /* difference = minuend - subtrahend */
2964 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2965         do { \
2966                 if (m_lo < s_lo) { \
2967                         /* underflow */ \
2968                         d_hi = m_hi - s_hi; \
2969                         if (d_hi > 0) { \
2970                                 /* we can 'loan' 1 */ \
2971                                 d_hi--; \
2972                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2973                         } else { \
2974                                 /* m_hi <= s_hi */ \
2975                                 d_hi = 0; \
2976                                 d_lo = 0; \
2977                         } \
2978                 } else { \
2979                         /* m_lo >= s_lo */ \
2980                         if (m_hi < s_hi) { \
2981                                 d_hi = 0; \
2982                                 d_lo = 0; \
2983                         } else { \
2984                                 /* m_hi >= s_hi */ \
2985                                 d_hi = m_hi - s_hi; \
2986                                 d_lo = m_lo - s_lo; \
2987                         } \
2988                 } \
2989         } while (0)
2990
2991 #define UPDATE_STAT64(s, t) \
2992         do { \
2993                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998                        pstats->mac_stx[1].t##_lo, diff.lo); \
2999         } while (0)
3000
3001 #define UPDATE_STAT64_NIG(s, t) \
3002         do { \
3003                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004                         diff.lo, new->s##_lo, old->s##_lo); \
3005                 ADD_64(estats->t##_hi, diff.hi, \
3006                        estats->t##_lo, diff.lo); \
3007         } while (0)
3008
3009 /* sum[hi:lo] += add */
3010 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3011         do { \
3012                 s_lo += a; \
3013                 s_hi += (s_lo < a) ? 1 : 0; \
3014         } while (0)
3015
3016 #define UPDATE_EXTEND_STAT(s) \
3017         do { \
3018                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019                               pstats->mac_stx[1].s##_lo, \
3020                               new->s); \
3021         } while (0)
3022
3023 #define UPDATE_EXTEND_TSTAT(s, t) \
3024         do { \
3025                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026                 old_tclient->s = tclient->s; \
3027                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3028         } while (0)
3029
3030 #define UPDATE_EXTEND_USTAT(s, t) \
3031         do { \
3032                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033                 old_uclient->s = uclient->s; \
3034                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035         } while (0)
3036
3037 #define UPDATE_EXTEND_XSTAT(s, t) \
3038         do { \
3039                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040                 old_xclient->s = xclient->s; \
3041                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042         } while (0)
3043
3044 /* minuend -= subtrahend */
3045 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3046         do { \
3047                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3048         } while (0)
3049
3050 /* minuend[hi:lo] -= subtrahend */
3051 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3052         do { \
3053                 SUB_64(m_hi, 0, m_lo, s); \
3054         } while (0)
3055
3056 #define SUB_EXTEND_USTAT(s, t) \
3057         do { \
3058                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3060         } while (0)
3061
3062 /*
3063  * General service functions
3064  */
3065
3066 static inline long bnx2x_hilo(u32 *hiref)
3067 {
3068         u32 lo = *(hiref + 1);
3069 #if (BITS_PER_LONG == 64)
3070         u32 hi = *hiref;
3071
3072         return HILO_U64(hi, lo);
3073 #else
3074         return lo;
3075 #endif
3076 }
3077
3078 /*
3079  * Init service functions
3080  */
3081
3082 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3083 {
3084         if (!bp->stats_pending) {
3085                 struct eth_query_ramrod_data ramrod_data = {0};
3086                 int i, rc;
3087
3088                 ramrod_data.drv_counter = bp->stats_counter++;
3089                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3090                 for_each_queue(bp, i)
3091                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3092
3093                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094                                    ((u32 *)&ramrod_data)[1],
3095                                    ((u32 *)&ramrod_data)[0], 0);
3096                 if (rc == 0) {
3097                         /* stats ramrod has it's own slot on the spq */
3098                         bp->spq_left++;
3099                         bp->stats_pending = 1;
3100                 }
3101         }
3102 }
3103
3104 static void bnx2x_stats_init(struct bnx2x *bp)
3105 {
3106         int port = BP_PORT(bp);
3107         int i;
3108
3109         bp->stats_pending = 0;
3110         bp->executer_idx = 0;
3111         bp->stats_counter = 0;
3112
3113         /* port stats */
3114         if (!BP_NOMCP(bp))
3115                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3116         else
3117                 bp->port.port_stx = 0;
3118         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3119
3120         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121         bp->port.old_nig_stats.brb_discard =
3122                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3123         bp->port.old_nig_stats.brb_truncate =
3124                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3125         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3129
3130         /* function stats */
3131         for_each_queue(bp, i) {
3132                 struct bnx2x_fastpath *fp = &bp->fp[i];
3133
3134                 memset(&fp->old_tclient, 0,
3135                        sizeof(struct tstorm_per_client_stats));
3136                 memset(&fp->old_uclient, 0,
3137                        sizeof(struct ustorm_per_client_stats));
3138                 memset(&fp->old_xclient, 0,
3139                        sizeof(struct xstorm_per_client_stats));
3140                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3141         }
3142
3143         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3144         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3145
3146         bp->stats_state = STATS_STATE_DISABLED;
3147         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3149 }
3150
3151 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3152 {
3153         struct dmae_command *dmae = &bp->stats_dmae;
3154         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3155
3156         *stats_comp = DMAE_COMP_VAL;
3157         if (CHIP_REV_IS_SLOW(bp))
3158                 return;
3159
3160         /* loader */
3161         if (bp->executer_idx) {
3162                 int loader_idx = PMF_DMAE_C(bp);
3163
3164                 memset(dmae, 0, sizeof(struct dmae_command));
3165
3166                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168                                 DMAE_CMD_DST_RESET |
3169 #ifdef __BIG_ENDIAN
3170                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3171 #else
3172                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3173 #endif
3174                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3175                                                DMAE_CMD_PORT_0) |
3176                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180                                      sizeof(struct dmae_command) *
3181                                      (loader_idx + 1)) >> 2;
3182                 dmae->dst_addr_hi = 0;
3183                 dmae->len = sizeof(struct dmae_command) >> 2;
3184                 if (CHIP_IS_E1(bp))
3185                         dmae->len--;
3186                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187                 dmae->comp_addr_hi = 0;
3188                 dmae->comp_val = 1;
3189
3190                 *stats_comp = 0;
3191                 bnx2x_post_dmae(bp, dmae, loader_idx);
3192
3193         } else if (bp->func_stx) {
3194                 *stats_comp = 0;
3195                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3196         }
3197 }
3198
3199 static int bnx2x_stats_comp(struct bnx2x *bp)
3200 {
3201         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3202         int cnt = 10;
3203
3204         might_sleep();
3205         while (*stats_comp != DMAE_COMP_VAL) {
3206                 if (!cnt) {
3207                         BNX2X_ERR("timeout waiting for stats finished\n");
3208                         break;
3209                 }
3210                 cnt--;
3211                 msleep(1);
3212         }
3213         return 1;
3214 }
3215
3216 /*
3217  * Statistics service functions
3218  */
3219
3220 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3221 {
3222         struct dmae_command *dmae;
3223         u32 opcode;
3224         int loader_idx = PMF_DMAE_C(bp);
3225         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227         /* sanity */
3228         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229                 BNX2X_ERR("BUG!\n");
3230                 return;
3231         }
3232
3233         bp->executer_idx = 0;
3234
3235         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3236                   DMAE_CMD_C_ENABLE |
3237                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3238 #ifdef __BIG_ENDIAN
3239                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3240 #else
3241                   DMAE_CMD_ENDIANITY_DW_SWAP |
3242 #endif
3243                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3245
3246         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248         dmae->src_addr_lo = bp->port.port_stx >> 2;
3249         dmae->src_addr_hi = 0;
3250         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252         dmae->len = DMAE_LEN32_RD_MAX;
3253         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254         dmae->comp_addr_hi = 0;
3255         dmae->comp_val = 1;
3256
3257         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260         dmae->src_addr_hi = 0;
3261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262                                    DMAE_LEN32_RD_MAX * 4);
3263         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264                                    DMAE_LEN32_RD_MAX * 4);
3265         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268         dmae->comp_val = DMAE_COMP_VAL;
3269
3270         *stats_comp = 0;
3271         bnx2x_hw_stats_post(bp);
3272         bnx2x_stats_comp(bp);
3273 }
3274
3275 static void bnx2x_port_stats_init(struct bnx2x *bp)
3276 {
3277         struct dmae_command *dmae;
3278         int port = BP_PORT(bp);
3279         int vn = BP_E1HVN(bp);
3280         u32 opcode;
3281         int loader_idx = PMF_DMAE_C(bp);
3282         u32 mac_addr;
3283         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3284
3285         /* sanity */
3286         if (!bp->link_vars.link_up || !bp->port.pmf) {
3287                 BNX2X_ERR("BUG!\n");
3288                 return;
3289         }
3290
3291         bp->executer_idx = 0;
3292
3293         /* MCP */
3294         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3297 #ifdef __BIG_ENDIAN
3298                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3299 #else
3300                   DMAE_CMD_ENDIANITY_DW_SWAP |
3301 #endif
3302                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303                   (vn << DMAE_CMD_E1HVN_SHIFT));
3304
3305         if (bp->port.port_stx) {
3306
3307                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308                 dmae->opcode = opcode;
3309                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3312                 dmae->dst_addr_hi = 0;
3313                 dmae->len = sizeof(struct host_port_stats) >> 2;
3314                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315                 dmae->comp_addr_hi = 0;
3316                 dmae->comp_val = 1;
3317         }
3318
3319         if (bp->func_stx) {
3320
3321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322                 dmae->opcode = opcode;
3323                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325                 dmae->dst_addr_lo = bp->func_stx >> 2;
3326                 dmae->dst_addr_hi = 0;
3327                 dmae->len = sizeof(struct host_func_stats) >> 2;
3328                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329                 dmae->comp_addr_hi = 0;
3330                 dmae->comp_val = 1;
3331         }
3332
3333         /* MAC */
3334         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3337 #ifdef __BIG_ENDIAN
3338                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3339 #else
3340                   DMAE_CMD_ENDIANITY_DW_SWAP |
3341 #endif
3342                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343                   (vn << DMAE_CMD_E1HVN_SHIFT));
3344
3345         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3346
3347                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348                                    NIG_REG_INGRESS_BMAC0_MEM);
3349
3350                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3352                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353                 dmae->opcode = opcode;
3354                 dmae->src_addr_lo = (mac_addr +
3355                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356                 dmae->src_addr_hi = 0;
3357                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362                 dmae->comp_addr_hi = 0;
3363                 dmae->comp_val = 1;
3364
3365                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368                 dmae->opcode = opcode;
3369                 dmae->src_addr_lo = (mac_addr +
3370                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371                 dmae->src_addr_hi = 0;
3372                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3374                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3376                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379                 dmae->comp_addr_hi = 0;
3380                 dmae->comp_val = 1;
3381
3382         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3383
3384                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3385
3386                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388                 dmae->opcode = opcode;
3389                 dmae->src_addr_lo = (mac_addr +
3390                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391                 dmae->src_addr_hi = 0;
3392                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396                 dmae->comp_addr_hi = 0;
3397                 dmae->comp_val = 1;
3398
3399                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401                 dmae->opcode = opcode;
3402                 dmae->src_addr_lo = (mac_addr +
3403                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404                 dmae->src_addr_hi = 0;
3405                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3406                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3407                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3408                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3409                 dmae->len = 1;
3410                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411                 dmae->comp_addr_hi = 0;
3412                 dmae->comp_val = 1;
3413
3414                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416                 dmae->opcode = opcode;
3417                 dmae->src_addr_lo = (mac_addr +
3418                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419                 dmae->src_addr_hi = 0;
3420                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3421                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3422                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3423                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3424                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426                 dmae->comp_addr_hi = 0;
3427                 dmae->comp_val = 1;
3428         }
3429
3430         /* NIG */
3431         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432         dmae->opcode = opcode;
3433         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435         dmae->src_addr_hi = 0;
3436         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440         dmae->comp_addr_hi = 0;
3441         dmae->comp_val = 1;
3442
3443         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444         dmae->opcode = opcode;
3445         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447         dmae->src_addr_hi = 0;
3448         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452         dmae->len = (2*sizeof(u32)) >> 2;
3453         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454         dmae->comp_addr_hi = 0;
3455         dmae->comp_val = 1;
3456
3457         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3461 #ifdef __BIG_ENDIAN
3462                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3463 #else
3464                         DMAE_CMD_ENDIANITY_DW_SWAP |
3465 #endif
3466                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467                         (vn << DMAE_CMD_E1HVN_SHIFT));
3468         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3470         dmae->src_addr_hi = 0;
3471         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475         dmae->len = (2*sizeof(u32)) >> 2;
3476         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478         dmae->comp_val = DMAE_COMP_VAL;
3479
3480         *stats_comp = 0;
3481 }
3482
3483 static void bnx2x_func_stats_init(struct bnx2x *bp)
3484 {
3485         struct dmae_command *dmae = &bp->stats_dmae;
3486         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3487
3488         /* sanity */
3489         if (!bp->func_stx) {
3490                 BNX2X_ERR("BUG!\n");
3491                 return;
3492         }
3493
3494         bp->executer_idx = 0;
3495         memset(dmae, 0, sizeof(struct dmae_command));
3496
3497         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500 #ifdef __BIG_ENDIAN
3501                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502 #else
3503                         DMAE_CMD_ENDIANITY_DW_SWAP |
3504 #endif
3505                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509         dmae->dst_addr_lo = bp->func_stx >> 2;
3510         dmae->dst_addr_hi = 0;
3511         dmae->len = sizeof(struct host_func_stats) >> 2;
3512         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514         dmae->comp_val = DMAE_COMP_VAL;
3515
3516         *stats_comp = 0;
3517 }
3518
3519 static void bnx2x_stats_start(struct bnx2x *bp)
3520 {
3521         if (bp->port.pmf)
3522                 bnx2x_port_stats_init(bp);
3523
3524         else if (bp->func_stx)
3525                 bnx2x_func_stats_init(bp);
3526
3527         bnx2x_hw_stats_post(bp);
3528         bnx2x_storm_stats_post(bp);
3529 }
3530
3531 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3532 {
3533         bnx2x_stats_comp(bp);
3534         bnx2x_stats_pmf_update(bp);
3535         bnx2x_stats_start(bp);
3536 }
3537
3538 static void bnx2x_stats_restart(struct bnx2x *bp)
3539 {
3540         bnx2x_stats_comp(bp);
3541         bnx2x_stats_start(bp);
3542 }
3543
3544 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3545 {
3546         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3548         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3549         struct {
3550                 u32 lo;
3551                 u32 hi;
3552         } diff;
3553
3554         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3560         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3561         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3562         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3563         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566         UPDATE_STAT64(tx_stat_gt127,
3567                                 tx_stat_etherstatspkts65octetsto127octets);
3568         UPDATE_STAT64(tx_stat_gt255,
3569                                 tx_stat_etherstatspkts128octetsto255octets);
3570         UPDATE_STAT64(tx_stat_gt511,
3571                                 tx_stat_etherstatspkts256octetsto511octets);
3572         UPDATE_STAT64(tx_stat_gt1023,
3573                                 tx_stat_etherstatspkts512octetsto1023octets);
3574         UPDATE_STAT64(tx_stat_gt1518,
3575                                 tx_stat_etherstatspkts1024octetsto1522octets);
3576         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580         UPDATE_STAT64(tx_stat_gterr,
3581                                 tx_stat_dot3statsinternalmactransmiterrors);
3582         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3583
3584         estats->pause_frames_received_hi =
3585                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586         estats->pause_frames_received_lo =
3587                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3588
3589         estats->pause_frames_sent_hi =
3590                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591         estats->pause_frames_sent_lo =
3592                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3593 }
3594
3595 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3596 {
3597         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3599         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3600
3601         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3632
3633         estats->pause_frames_received_hi =
3634                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635         estats->pause_frames_received_lo =
3636                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637         ADD_64(estats->pause_frames_received_hi,
3638                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639                estats->pause_frames_received_lo,
3640                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3641
3642         estats->pause_frames_sent_hi =
3643                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644         estats->pause_frames_sent_lo =
3645                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646         ADD_64(estats->pause_frames_sent_hi,
3647                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648                estats->pause_frames_sent_lo,
3649                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3650 }
3651
3652 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3653 {
3654         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655         struct nig_stats *old = &(bp->port.old_nig_stats);
3656         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3658         struct {
3659                 u32 lo;
3660                 u32 hi;
3661         } diff;
3662         u32 nig_timer_max;
3663
3664         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665                 bnx2x_bmac_stats_update(bp);
3666
3667         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668                 bnx2x_emac_stats_update(bp);
3669
3670         else { /* unreached */
3671                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3672                 return -1;
3673         }
3674
3675         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676                       new->brb_discard - old->brb_discard);
3677         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678                       new->brb_truncate - old->brb_truncate);
3679
3680         UPDATE_STAT64_NIG(egress_mac_pkt0,
3681                                         etherstatspkts1024octetsto1522octets);
3682         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3683
3684         memcpy(old, new, sizeof(struct nig_stats));
3685
3686         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687                sizeof(struct mac_stx));
3688         estats->brb_drop_hi = pstats->brb_drop_hi;
3689         estats->brb_drop_lo = pstats->brb_drop_lo;
3690
3691         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3692
3693         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694         if (nig_timer_max != estats->nig_timer_max) {
3695                 estats->nig_timer_max = nig_timer_max;
3696                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3697         }
3698
3699         return 0;
3700 }
3701
3702 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3703 {
3704         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3705         struct tstorm_per_port_stats *tport =
3706                                         &stats->tstorm_common.port_statistics;
3707         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3709         int i;
3710
3711         memset(&(fstats->total_bytes_received_hi), 0,
3712                sizeof(struct host_func_stats) - 2*sizeof(u32));
3713         estats->error_bytes_received_hi = 0;
3714         estats->error_bytes_received_lo = 0;
3715         estats->etherstatsoverrsizepkts_hi = 0;
3716         estats->etherstatsoverrsizepkts_lo = 0;
3717         estats->no_buff_discard_hi = 0;
3718         estats->no_buff_discard_lo = 0;
3719
3720         for_each_queue(bp, i) {
3721                 struct bnx2x_fastpath *fp = &bp->fp[i];
3722                 int cl_id = fp->cl_id;
3723                 struct tstorm_per_client_stats *tclient =
3724                                 &stats->tstorm_common.client_statistics[cl_id];
3725                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726                 struct ustorm_per_client_stats *uclient =
3727                                 &stats->ustorm_common.client_statistics[cl_id];
3728                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729                 struct xstorm_per_client_stats *xclient =
3730                                 &stats->xstorm_common.client_statistics[cl_id];
3731                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3733                 u32 diff;
3734
3735                 /* are storm stats valid? */
3736                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3737                                                         bp->stats_counter) {
3738                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739                            "  xstorm counter (%d) != stats_counter (%d)\n",
3740                            i, xclient->stats_counter, bp->stats_counter);
3741                         return -1;
3742                 }
3743                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3744                                                         bp->stats_counter) {
3745                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746                            "  tstorm counter (%d) != stats_counter (%d)\n",
3747                            i, tclient->stats_counter, bp->stats_counter);
3748                         return -2;
3749                 }
3750                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751                                                         bp->stats_counter) {
3752                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753                            "  ustorm counter (%d) != stats_counter (%d)\n",
3754                            i, uclient->stats_counter, bp->stats_counter);
3755                         return -4;
3756                 }
3757
3758                 qstats->total_bytes_received_hi =
3759                 qstats->valid_bytes_received_hi =
3760                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3761                 qstats->total_bytes_received_lo =
3762                 qstats->valid_bytes_received_lo =
3763                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3764
3765                 qstats->error_bytes_received_hi =
3766                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3767                 qstats->error_bytes_received_lo =
3768                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3769
3770                 ADD_64(qstats->total_bytes_received_hi,
3771                        qstats->error_bytes_received_hi,
3772                        qstats->total_bytes_received_lo,
3773                        qstats->error_bytes_received_lo);
3774
3775                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776                                         total_unicast_packets_received);
3777                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778                                         total_multicast_packets_received);
3779                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780                                         total_broadcast_packets_received);
3781                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782                                         etherstatsoverrsizepkts);
3783                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3784
3785                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786                                         total_unicast_packets_received);
3787                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788                                         total_multicast_packets_received);
3789                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790                                         total_broadcast_packets_received);
3791                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3794
3795                 qstats->total_bytes_transmitted_hi =
3796                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3797                 qstats->total_bytes_transmitted_lo =
3798                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3799
3800                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801                                         total_unicast_packets_transmitted);
3802                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803                                         total_multicast_packets_transmitted);
3804                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805                                         total_broadcast_packets_transmitted);
3806
3807                 old_tclient->checksum_discard = tclient->checksum_discard;
3808                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3809
3810                 ADD_64(fstats->total_bytes_received_hi,
3811                        qstats->total_bytes_received_hi,
3812                        fstats->total_bytes_received_lo,
3813                        qstats->total_bytes_received_lo);
3814                 ADD_64(fstats->total_bytes_transmitted_hi,
3815                        qstats->total_bytes_transmitted_hi,
3816                        fstats->total_bytes_transmitted_lo,
3817                        qstats->total_bytes_transmitted_lo);
3818                 ADD_64(fstats->total_unicast_packets_received_hi,
3819                        qstats->total_unicast_packets_received_hi,
3820                        fstats->total_unicast_packets_received_lo,
3821                        qstats->total_unicast_packets_received_lo);
3822                 ADD_64(fstats->total_multicast_packets_received_hi,
3823                        qstats->total_multicast_packets_received_hi,
3824                        fstats->total_multicast_packets_received_lo,
3825                        qstats->total_multicast_packets_received_lo);
3826                 ADD_64(fstats->total_broadcast_packets_received_hi,
3827                        qstats->total_broadcast_packets_received_hi,
3828                        fstats->total_broadcast_packets_received_lo,
3829                        qstats->total_broadcast_packets_received_lo);
3830                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831                        qstats->total_unicast_packets_transmitted_hi,
3832                        fstats->total_unicast_packets_transmitted_lo,
3833                        qstats->total_unicast_packets_transmitted_lo);
3834                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835                        qstats->total_multicast_packets_transmitted_hi,
3836                        fstats->total_multicast_packets_transmitted_lo,
3837                        qstats->total_multicast_packets_transmitted_lo);
3838                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839                        qstats->total_broadcast_packets_transmitted_hi,
3840                        fstats->total_broadcast_packets_transmitted_lo,
3841                        qstats->total_broadcast_packets_transmitted_lo);
3842                 ADD_64(fstats->valid_bytes_received_hi,
3843                        qstats->valid_bytes_received_hi,
3844                        fstats->valid_bytes_received_lo,
3845                        qstats->valid_bytes_received_lo);
3846
3847                 ADD_64(estats->error_bytes_received_hi,
3848                        qstats->error_bytes_received_hi,
3849                        estats->error_bytes_received_lo,
3850                        qstats->error_bytes_received_lo);
3851                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852                        qstats->etherstatsoverrsizepkts_hi,
3853                        estats->etherstatsoverrsizepkts_lo,
3854                        qstats->etherstatsoverrsizepkts_lo);
3855                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3857         }
3858
3859         ADD_64(fstats->total_bytes_received_hi,
3860                estats->rx_stat_ifhcinbadoctets_hi,
3861                fstats->total_bytes_received_lo,
3862                estats->rx_stat_ifhcinbadoctets_lo);
3863
3864         memcpy(estats, &(fstats->total_bytes_received_hi),
3865                sizeof(struct host_func_stats) - 2*sizeof(u32));
3866
3867         ADD_64(estats->etherstatsoverrsizepkts_hi,
3868                estats->rx_stat_dot3statsframestoolong_hi,
3869                estats->etherstatsoverrsizepkts_lo,
3870                estats->rx_stat_dot3statsframestoolong_lo);
3871         ADD_64(estats->error_bytes_received_hi,
3872                estats->rx_stat_ifhcinbadoctets_hi,
3873                estats->error_bytes_received_lo,
3874                estats->rx_stat_ifhcinbadoctets_lo);
3875
3876         if (bp->port.pmf) {
3877                 estats->mac_filter_discard =
3878                                 le32_to_cpu(tport->mac_filter_discard);
3879                 estats->xxoverflow_discard =
3880                                 le32_to_cpu(tport->xxoverflow_discard);
3881                 estats->brb_truncate_discard =
3882                                 le32_to_cpu(tport->brb_truncate_discard);
3883                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3884         }
3885
3886         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3887
3888         bp->stats_pending = 0;
3889
3890         return 0;
3891 }
3892
3893 static void bnx2x_net_stats_update(struct bnx2x *bp)
3894 {
3895         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896         struct net_device_stats *nstats = &bp->dev->stats;
3897         int i;
3898
3899         nstats->rx_packets =
3900                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3903
3904         nstats->tx_packets =
3905                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3908
3909         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3910
3911         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3912
3913         nstats->rx_dropped = estats->mac_discard;
3914         for_each_queue(bp, i)
3915                 nstats->rx_dropped +=
3916                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3917
3918         nstats->tx_dropped = 0;
3919
3920         nstats->multicast =
3921                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3922
3923         nstats->collisions =
3924                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3925
3926         nstats->rx_length_errors =
3927                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930                                  bnx2x_hilo(&estats->brb_truncate_hi);
3931         nstats->rx_crc_errors =
3932                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933         nstats->rx_frame_errors =
3934                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3936         nstats->rx_missed_errors = estats->xxoverflow_discard;
3937
3938         nstats->rx_errors = nstats->rx_length_errors +
3939                             nstats->rx_over_errors +
3940                             nstats->rx_crc_errors +
3941                             nstats->rx_frame_errors +
3942                             nstats->rx_fifo_errors +
3943                             nstats->rx_missed_errors;
3944
3945         nstats->tx_aborted_errors =
3946                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948         nstats->tx_carrier_errors =
3949                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3950         nstats->tx_fifo_errors = 0;
3951         nstats->tx_heartbeat_errors = 0;
3952         nstats->tx_window_errors = 0;
3953
3954         nstats->tx_errors = nstats->tx_aborted_errors +
3955                             nstats->tx_carrier_errors +
3956             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3957 }
3958
3959 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3960 {
3961         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962         int i;
3963
3964         estats->driver_xoff = 0;
3965         estats->rx_err_discard_pkt = 0;
3966         estats->rx_skb_alloc_failed = 0;
3967         estats->hw_csum_err = 0;
3968         for_each_queue(bp, i) {
3969                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3970
3971                 estats->driver_xoff += qstats->driver_xoff;
3972                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974                 estats->hw_csum_err += qstats->hw_csum_err;
3975         }
3976 }
3977
3978 static void bnx2x_stats_update(struct bnx2x *bp)
3979 {
3980         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3981
3982         if (*stats_comp != DMAE_COMP_VAL)
3983                 return;
3984
3985         if (bp->port.pmf)
3986                 bnx2x_hw_stats_update(bp);
3987
3988         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3990                 bnx2x_panic();
3991                 return;
3992         }
3993
3994         bnx2x_net_stats_update(bp);
3995         bnx2x_drv_stats_update(bp);
3996
3997         if (bp->msglevel & NETIF_MSG_TIMER) {
3998                 struct tstorm_per_client_stats *old_tclient =
3999                                                         &bp->fp->old_tclient;
4000                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4001                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002                 struct net_device_stats *nstats = &bp->dev->stats;
4003                 int i;
4004
4005                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4007                                   "  tx pkt (%lx)\n",
4008                        bnx2x_tx_avail(bp->fp),
4009                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4010                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4011                                   "  rx pkt (%lx)\n",
4012                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013                              bp->fp->rx_comp_cons),
4014                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4015                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4016                                   "brb truncate %u\n",
4017                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018                        qstats->driver_xoff,
4019                        estats->brb_drop_lo, estats->brb_truncate_lo);
4020                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4021                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4022                         "mac_discard %u  mac_filter_discard %u  "
4023                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4024                         "ttl0_discard %u\n",
4025                        le32_to_cpu(old_tclient->checksum_discard),
4026                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4028                        estats->mac_discard, estats->mac_filter_discard,
4029                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4030                        le32_to_cpu(old_tclient->ttl0_discard));
4031
4032                 for_each_queue(bp, i) {
4033                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034                                bnx2x_fp(bp, i, tx_pkt),
4035                                bnx2x_fp(bp, i, rx_pkt),
4036                                bnx2x_fp(bp, i, rx_calls));
4037                 }
4038         }
4039
4040         bnx2x_hw_stats_post(bp);
4041         bnx2x_storm_stats_post(bp);
4042 }
4043
4044 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4045 {
4046         struct dmae_command *dmae;
4047         u32 opcode;
4048         int loader_idx = PMF_DMAE_C(bp);
4049         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4050
4051         bp->executer_idx = 0;
4052
4053         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4054                   DMAE_CMD_C_ENABLE |
4055                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4056 #ifdef __BIG_ENDIAN
4057                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4058 #else
4059                   DMAE_CMD_ENDIANITY_DW_SWAP |
4060 #endif
4061                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4063
4064         if (bp->port.port_stx) {
4065
4066                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4067                 if (bp->func_stx)
4068                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4069                 else
4070                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4074                 dmae->dst_addr_hi = 0;
4075                 dmae->len = sizeof(struct host_port_stats) >> 2;
4076                 if (bp->func_stx) {
4077                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078                         dmae->comp_addr_hi = 0;
4079                         dmae->comp_val = 1;
4080                 } else {
4081                         dmae->comp_addr_lo =
4082                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083                         dmae->comp_addr_hi =
4084                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085                         dmae->comp_val = DMAE_COMP_VAL;
4086
4087                         *stats_comp = 0;
4088                 }
4089         }
4090
4091         if (bp->func_stx) {
4092
4093                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097                 dmae->dst_addr_lo = bp->func_stx >> 2;
4098                 dmae->dst_addr_hi = 0;
4099                 dmae->len = sizeof(struct host_func_stats) >> 2;
4100                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102                 dmae->comp_val = DMAE_COMP_VAL;
4103
4104                 *stats_comp = 0;
4105         }
4106 }
4107
4108 static void bnx2x_stats_stop(struct bnx2x *bp)
4109 {
4110         int update = 0;
4111
4112         bnx2x_stats_comp(bp);
4113
4114         if (bp->port.pmf)
4115                 update = (bnx2x_hw_stats_update(bp) == 0);
4116
4117         update |= (bnx2x_storm_stats_update(bp) == 0);
4118
4119         if (update) {
4120                 bnx2x_net_stats_update(bp);
4121
4122                 if (bp->port.pmf)
4123                         bnx2x_port_stats_stop(bp);
4124
4125                 bnx2x_hw_stats_post(bp);
4126                 bnx2x_stats_comp(bp);
4127         }
4128 }
4129
4130 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4131 {
4132 }
4133
4134 static const struct {
4135         void (*action)(struct bnx2x *bp);
4136         enum bnx2x_stats_state next_state;
4137 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4138 /* state        event   */
4139 {
4140 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4142 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4144 },
4145 {
4146 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4147 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4148 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4149 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4150 }
4151 };
4152
4153 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4154 {
4155         enum bnx2x_stats_state state = bp->stats_state;
4156
4157         bnx2x_stats_stm[state][event].action(bp);
4158         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4159
4160         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162                    state, event, bp->stats_state);
4163 }
4164
4165 static void bnx2x_timer(unsigned long data)
4166 {
4167         struct bnx2x *bp = (struct bnx2x *) data;
4168
4169         if (!netif_running(bp->dev))
4170                 return;
4171
4172         if (atomic_read(&bp->intr_sem) != 0)
4173                 goto timer_restart;
4174
4175         if (poll) {
4176                 struct bnx2x_fastpath *fp = &bp->fp[0];
4177                 int rc;
4178
4179                 bnx2x_tx_int(fp);
4180                 rc = bnx2x_rx_int(fp, 1000);
4181         }
4182
4183         if (!BP_NOMCP(bp)) {
4184                 int func = BP_FUNC(bp);
4185                 u32 drv_pulse;
4186                 u32 mcp_pulse;
4187
4188                 ++bp->fw_drv_pulse_wr_seq;
4189                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190                 /* TBD - add SYSTEM_TIME */
4191                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4192                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4193
4194                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4195                              MCP_PULSE_SEQ_MASK);
4196                 /* The delta between driver pulse and mcp response
4197                  * should be 1 (before mcp response) or 0 (after mcp response)
4198                  */
4199                 if ((drv_pulse != mcp_pulse) &&
4200                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201                         /* someone lost a heartbeat... */
4202                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203                                   drv_pulse, mcp_pulse);
4204                 }
4205         }
4206
4207         if ((bp->state == BNX2X_STATE_OPEN) ||
4208             (bp->state == BNX2X_STATE_DISABLED))
4209                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4210
4211 timer_restart:
4212         mod_timer(&bp->timer, jiffies + bp->current_interval);
4213 }
4214
4215 /* end of Statistics */
4216
4217 /* nic init */
4218
4219 /*
4220  * nic init service functions
4221  */
4222
4223 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4224 {
4225         int port = BP_PORT(bp);
4226
4227         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4228                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4229                         sizeof(struct ustorm_status_block)/4);
4230         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4231                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4232                         sizeof(struct cstorm_status_block)/4);
4233 }
4234
4235 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236                           dma_addr_t mapping, int sb_id)
4237 {
4238         int port = BP_PORT(bp);
4239         int func = BP_FUNC(bp);
4240         int index;
4241         u64 section;
4242
4243         /* USTORM */
4244         section = ((u64)mapping) + offsetof(struct host_status_block,
4245                                             u_status_block);
4246         sb->u_status_block.status_block_id = sb_id;
4247
4248         REG_WR(bp, BAR_USTRORM_INTMEM +
4249                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4250         REG_WR(bp, BAR_USTRORM_INTMEM +
4251                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4252                U64_HI(section));
4253         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4255
4256         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4258                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4259
4260         /* CSTORM */
4261         section = ((u64)mapping) + offsetof(struct host_status_block,
4262                                             c_status_block);
4263         sb->c_status_block.status_block_id = sb_id;
4264
4265         REG_WR(bp, BAR_CSTRORM_INTMEM +
4266                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4267         REG_WR(bp, BAR_CSTRORM_INTMEM +
4268                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4269                U64_HI(section));
4270         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4272
4273         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4275                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4276
4277         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4278 }
4279
4280 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4281 {
4282         int func = BP_FUNC(bp);
4283
4284         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4285                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286                         sizeof(struct tstorm_def_status_block)/4);
4287         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4288                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289                         sizeof(struct ustorm_def_status_block)/4);
4290         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4291                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292                         sizeof(struct cstorm_def_status_block)/4);
4293         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4294                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295                         sizeof(struct xstorm_def_status_block)/4);
4296 }
4297
4298 static void bnx2x_init_def_sb(struct bnx2x *bp,
4299                               struct host_def_status_block *def_sb,
4300                               dma_addr_t mapping, int sb_id)
4301 {
4302         int port = BP_PORT(bp);
4303         int func = BP_FUNC(bp);
4304         int index, val, reg_offset;
4305         u64 section;
4306
4307         /* ATTN */
4308         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309                                             atten_status_block);
4310         def_sb->atten_status_block.status_block_id = sb_id;
4311
4312         bp->attn_state = 0;
4313
4314         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4316
4317         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4318                 bp->attn_group[index].sig[0] = REG_RD(bp,
4319                                                      reg_offset + 0x10*index);
4320                 bp->attn_group[index].sig[1] = REG_RD(bp,
4321                                                reg_offset + 0x4 + 0x10*index);
4322                 bp->attn_group[index].sig[2] = REG_RD(bp,
4323                                                reg_offset + 0x8 + 0x10*index);
4324                 bp->attn_group[index].sig[3] = REG_RD(bp,
4325                                                reg_offset + 0xc + 0x10*index);
4326         }
4327
4328         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329                              HC_REG_ATTN_MSG0_ADDR_L);
4330
4331         REG_WR(bp, reg_offset, U64_LO(section));
4332         REG_WR(bp, reg_offset + 4, U64_HI(section));
4333
4334         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4335
4336         val = REG_RD(bp, reg_offset);
4337         val |= sb_id;
4338         REG_WR(bp, reg_offset, val);
4339
4340         /* USTORM */
4341         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342                                             u_def_status_block);
4343         def_sb->u_def_status_block.status_block_id = sb_id;
4344
4345         REG_WR(bp, BAR_USTRORM_INTMEM +
4346                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347         REG_WR(bp, BAR_USTRORM_INTMEM +
4348                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4349                U64_HI(section));
4350         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4351                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4352
4353         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4355                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4356
4357         /* CSTORM */
4358         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359                                             c_def_status_block);
4360         def_sb->c_def_status_block.status_block_id = sb_id;
4361
4362         REG_WR(bp, BAR_CSTRORM_INTMEM +
4363                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364         REG_WR(bp, BAR_CSTRORM_INTMEM +
4365                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4366                U64_HI(section));
4367         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4368                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4369
4370         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4372                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4373
4374         /* TSTORM */
4375         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376                                             t_def_status_block);
4377         def_sb->t_def_status_block.status_block_id = sb_id;
4378
4379         REG_WR(bp, BAR_TSTRORM_INTMEM +
4380                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4381         REG_WR(bp, BAR_TSTRORM_INTMEM +
4382                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4383                U64_HI(section));
4384         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4385                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4386
4387         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4389                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4390
4391         /* XSTORM */
4392         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393                                             x_def_status_block);
4394         def_sb->x_def_status_block.status_block_id = sb_id;
4395
4396         REG_WR(bp, BAR_XSTRORM_INTMEM +
4397                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4398         REG_WR(bp, BAR_XSTRORM_INTMEM +
4399                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4400                U64_HI(section));
4401         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4402                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4403
4404         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4406                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4407
4408         bp->stats_pending = 0;
4409         bp->set_mac_pending = 0;
4410
4411         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4412 }
4413
4414 static void bnx2x_update_coalesce(struct bnx2x *bp)
4415 {
4416         int port = BP_PORT(bp);
4417         int i;
4418
4419         for_each_queue(bp, i) {
4420                 int sb_id = bp->fp[i].sb_id;
4421
4422                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4424                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4425                                                     U_SB_ETH_RX_CQ_INDEX),
4426                         bp->rx_ticks/12);
4427                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4428                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4429                                                      U_SB_ETH_RX_CQ_INDEX),
4430                          bp->rx_ticks ? 0 : 1);
4431
4432                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4434                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4435                                                     C_SB_ETH_TX_CQ_INDEX),
4436                         bp->tx_ticks/12);
4437                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4438                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4439                                                      C_SB_ETH_TX_CQ_INDEX),
4440                          bp->tx_ticks ? 0 : 1);
4441         }
4442 }
4443
4444 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445                                        struct bnx2x_fastpath *fp, int last)
4446 {
4447         int i;
4448
4449         for (i = 0; i < last; i++) {
4450                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451                 struct sk_buff *skb = rx_buf->skb;
4452
4453                 if (skb == NULL) {
4454                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4455                         continue;
4456                 }
4457
4458                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459                         pci_unmap_single(bp->pdev,
4460                                          pci_unmap_addr(rx_buf, mapping),
4461                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4462
4463                 dev_kfree_skb(skb);
4464                 rx_buf->skb = NULL;
4465         }
4466 }
4467
4468 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4469 {
4470         int func = BP_FUNC(bp);
4471         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4473         u16 ring_prod, cqe_ring_prod;
4474         int i, j;
4475
4476         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4477         DP(NETIF_MSG_IFUP,
4478            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4479
4480         if (bp->flags & TPA_ENABLE_FLAG) {
4481
4482                 for_each_rx_queue(bp, j) {
4483                         struct bnx2x_fastpath *fp = &bp->fp[j];
4484
4485                         for (i = 0; i < max_agg_queues; i++) {
4486                                 fp->tpa_pool[i].skb =
4487                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488                                 if (!fp->tpa_pool[i].skb) {
4489                                         BNX2X_ERR("Failed to allocate TPA "
4490                                                   "skb pool for queue[%d] - "
4491                                                   "disabling TPA on this "
4492                                                   "queue!\n", j);
4493                                         bnx2x_free_tpa_pool(bp, fp, i);
4494                                         fp->disable_tpa = 1;
4495                                         break;
4496                                 }
4497                                 pci_unmap_addr_set((struct sw_rx_bd *)
4498                                                         &bp->fp->tpa_pool[i],
4499                                                    mapping, 0);
4500                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4501                         }
4502                 }
4503         }
4504
4505         for_each_rx_queue(bp, j) {
4506                 struct bnx2x_fastpath *fp = &bp->fp[j];
4507
4508                 fp->rx_bd_cons = 0;
4509                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4510                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4511
4512                 /* "next page" elements initialization */
4513                 /* SGE ring */
4514                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515                         struct eth_rx_sge *sge;
4516
4517                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4518                         sge->addr_hi =
4519                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521                         sge->addr_lo =
4522                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4524                 }
4525
4526                 bnx2x_init_sge_ring_bit_mask(fp);
4527
4528                 /* RX BD ring */
4529                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530                         struct eth_rx_bd *rx_bd;
4531
4532                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4533                         rx_bd->addr_hi =
4534                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4535                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4536                         rx_bd->addr_lo =
4537                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4538                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4539                 }
4540
4541                 /* CQ ring */
4542                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543                         struct eth_rx_cqe_next_page *nextpg;
4544
4545                         nextpg = (struct eth_rx_cqe_next_page *)
4546                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4547                         nextpg->addr_hi =
4548                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4549                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4550                         nextpg->addr_lo =
4551                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4552                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4553                 }
4554
4555                 /* Allocate SGEs and initialize the ring elements */
4556                 for (i = 0, ring_prod = 0;
4557                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4558
4559                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560                                 BNX2X_ERR("was only able to allocate "
4561                                           "%d rx sges\n", i);
4562                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563                                 /* Cleanup already allocated elements */
4564                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4565                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4566                                 fp->disable_tpa = 1;
4567                                 ring_prod = 0;
4568                                 break;
4569                         }
4570                         ring_prod = NEXT_SGE_IDX(ring_prod);
4571                 }
4572                 fp->rx_sge_prod = ring_prod;
4573
4574                 /* Allocate BDs and initialize BD ring */
4575                 fp->rx_comp_cons = 0;
4576                 cqe_ring_prod = ring_prod = 0;
4577                 for (i = 0; i < bp->rx_ring_size; i++) {
4578                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579                                 BNX2X_ERR("was only able to allocate "
4580                                           "%d rx skbs on queue[%d]\n", i, j);
4581                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4582                                 break;
4583                         }
4584                         ring_prod = NEXT_RX_IDX(ring_prod);
4585                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4586                         WARN_ON(ring_prod <= i);
4587                 }
4588
4589                 fp->rx_bd_prod = ring_prod;
4590                 /* must not have more available CQEs than BDs */
4591                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4592                                        cqe_ring_prod);
4593                 fp->rx_pkt = fp->rx_calls = 0;
4594
4595                 /* Warning!
4596                  * this will generate an interrupt (to the TSTORM)
4597                  * must only be done after chip is initialized
4598                  */
4599                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4600                                      fp->rx_sge_prod);
4601                 if (j != 0)
4602                         continue;
4603
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4606                        U64_LO(fp->rx_comp_mapping));
4607                 REG_WR(bp, BAR_USTRORM_INTMEM +
4608                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4609                        U64_HI(fp->rx_comp_mapping));
4610         }
4611 }
4612
4613 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4614 {
4615         int i, j;
4616
4617         for_each_tx_queue(bp, j) {
4618                 struct bnx2x_fastpath *fp = &bp->fp[j];
4619
4620                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621                         struct eth_tx_bd *tx_bd =
4622                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4623
4624                         tx_bd->addr_hi =
4625                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4626                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4627                         tx_bd->addr_lo =
4628                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4629                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4630                 }
4631
4632                 fp->tx_pkt_prod = 0;
4633                 fp->tx_pkt_cons = 0;
4634                 fp->tx_bd_prod = 0;
4635                 fp->tx_bd_cons = 0;
4636                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4637                 fp->tx_pkt = 0;
4638         }
4639 }
4640
4641 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4642 {
4643         int func = BP_FUNC(bp);
4644
4645         spin_lock_init(&bp->spq_lock);
4646
4647         bp->spq_left = MAX_SPQ_PENDING;
4648         bp->spq_prod_idx = 0;
4649         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650         bp->spq_prod_bd = bp->spq;
4651         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4652
4653         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4654                U64_LO(bp->spq_mapping));
4655         REG_WR(bp,
4656                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4657                U64_HI(bp->spq_mapping));
4658
4659         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4660                bp->spq_prod_idx);
4661 }
4662
4663 static void bnx2x_init_context(struct bnx2x *bp)
4664 {
4665         int i;
4666
4667         for_each_queue(bp, i) {
4668                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669                 struct bnx2x_fastpath *fp = &bp->fp[i];
4670                 u8 cl_id = fp->cl_id;
4671                 u8 sb_id = fp->sb_id;
4672
4673                 context->ustorm_st_context.common.sb_index_numbers =
4674                                                 BNX2X_RX_SB_INDEX_NUM;
4675                 context->ustorm_st_context.common.clientId = cl_id;
4676                 context->ustorm_st_context.common.status_block_id = sb_id;
4677                 context->ustorm_st_context.common.flags =
4678                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680                 context->ustorm_st_context.common.statistics_counter_id =
4681                                                 cl_id;
4682                 context->ustorm_st_context.common.mc_alignment_log_size =
4683                                                 BNX2X_RX_ALIGN_SHIFT;
4684                 context->ustorm_st_context.common.bd_buff_size =
4685                                                 bp->rx_buf_size;
4686                 context->ustorm_st_context.common.bd_page_base_hi =
4687                                                 U64_HI(fp->rx_desc_mapping);
4688                 context->ustorm_st_context.common.bd_page_base_lo =
4689                                                 U64_LO(fp->rx_desc_mapping);
4690                 if (!fp->disable_tpa) {
4691                         context->ustorm_st_context.common.flags |=
4692                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694                         context->ustorm_st_context.common.sge_buff_size =
4695                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4696                                          (u32)0xffff);
4697                         context->ustorm_st_context.common.sge_page_base_hi =
4698                                                 U64_HI(fp->rx_sge_mapping);
4699                         context->ustorm_st_context.common.sge_page_base_lo =
4700                                                 U64_LO(fp->rx_sge_mapping);
4701                 }
4702
4703                 context->ustorm_ag_context.cdu_usage =
4704                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705                                                CDU_REGION_NUMBER_UCM_AG,
4706                                                ETH_CONNECTION_TYPE);
4707
4708                 context->xstorm_st_context.tx_bd_page_base_hi =
4709                                                 U64_HI(fp->tx_desc_mapping);
4710                 context->xstorm_st_context.tx_bd_page_base_lo =
4711                                                 U64_LO(fp->tx_desc_mapping);
4712                 context->xstorm_st_context.db_data_addr_hi =
4713                                                 U64_HI(fp->tx_prods_mapping);
4714                 context->xstorm_st_context.db_data_addr_lo =
4715                                                 U64_LO(fp->tx_prods_mapping);
4716                 context->xstorm_st_context.statistics_data = (cl_id |
4717                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4718                 context->cstorm_st_context.sb_index_number =
4719                                                 C_SB_ETH_TX_CQ_INDEX;
4720                 context->cstorm_st_context.status_block_id = sb_id;
4721
4722                 context->xstorm_ag_context.cdu_reserved =
4723                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724                                                CDU_REGION_NUMBER_XCM_AG,
4725                                                ETH_CONNECTION_TYPE);
4726         }
4727 }
4728
4729 static void bnx2x_init_ind_table(struct bnx2x *bp)
4730 {
4731         int func = BP_FUNC(bp);
4732         int i;
4733
4734         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4735                 return;
4736
4737         DP(NETIF_MSG_IFUP,
4738            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4739         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4740                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4741                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4742                         bp->fp->cl_id + (i % bp->num_rx_queues));
4743 }
4744
4745 static void bnx2x_set_client_config(struct bnx2x *bp)
4746 {
4747         struct tstorm_eth_client_config tstorm_client = {0};
4748         int port = BP_PORT(bp);
4749         int i;
4750
4751         tstorm_client.mtu = bp->dev->mtu;
4752         tstorm_client.config_flags =
4753                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4755 #ifdef BCM_VLAN
4756         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4757                 tstorm_client.config_flags |=
4758                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4759                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4760         }
4761 #endif
4762
4763         if (bp->flags & TPA_ENABLE_FLAG) {
4764                 tstorm_client.max_sges_for_packet =
4765                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4766                 tstorm_client.max_sges_for_packet =
4767                         ((tstorm_client.max_sges_for_packet +
4768                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769                         PAGES_PER_SGE_SHIFT;
4770
4771                 tstorm_client.config_flags |=
4772                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4773         }
4774
4775         for_each_queue(bp, i) {
4776                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4777
4778                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4780                        ((u32 *)&tstorm_client)[0]);
4781                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4782                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4783                        ((u32 *)&tstorm_client)[1]);
4784         }
4785
4786         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4788 }
4789
4790 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4791 {
4792         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4793         int mode = bp->rx_mode;
4794         int mask = (1 << BP_L_ID(bp));
4795         int func = BP_FUNC(bp);
4796         int i;
4797
4798         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4799
4800         switch (mode) {
4801         case BNX2X_RX_MODE_NONE: /* no Rx */
4802                 tstorm_mac_filter.ucast_drop_all = mask;
4803                 tstorm_mac_filter.mcast_drop_all = mask;
4804                 tstorm_mac_filter.bcast_drop_all = mask;
4805                 break;
4806
4807         case BNX2X_RX_MODE_NORMAL:
4808                 tstorm_mac_filter.bcast_accept_all = mask;
4809                 break;
4810
4811         case BNX2X_RX_MODE_ALLMULTI:
4812                 tstorm_mac_filter.mcast_accept_all = mask;
4813                 tstorm_mac_filter.bcast_accept_all = mask;
4814                 break;
4815
4816         case BNX2X_RX_MODE_PROMISC:
4817                 tstorm_mac_filter.ucast_accept_all = mask;
4818                 tstorm_mac_filter.mcast_accept_all = mask;
4819                 tstorm_mac_filter.bcast_accept_all = mask;
4820                 break;
4821
4822         default:
4823                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4824                 break;
4825         }
4826
4827         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4829                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4830                        ((u32 *)&tstorm_mac_filter)[i]);
4831
4832 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4833                    ((u32 *)&tstorm_mac_filter)[i]); */
4834         }
4835
4836         if (mode != BNX2X_RX_MODE_NONE)
4837                 bnx2x_set_client_config(bp);
4838 }
4839
4840 static void bnx2x_init_internal_common(struct bnx2x *bp)
4841 {
4842         int i;
4843
4844         if (bp->flags & TPA_ENABLE_FLAG) {
4845                 struct tstorm_eth_tpa_exist tpa = {0};
4846
4847                 tpa.tpa_exist = 1;
4848
4849                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4850                        ((u32 *)&tpa)[0]);
4851                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4852                        ((u32 *)&tpa)[1]);
4853         }
4854
4855         /* Zero this manually as its initialization is
4856            currently missing in the initTool */
4857         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858                 REG_WR(bp, BAR_USTRORM_INTMEM +
4859                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4860 }
4861
4862 static void bnx2x_init_internal_port(struct bnx2x *bp)
4863 {
4864         int port = BP_PORT(bp);
4865
4866         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4870 }
4871
4872 /* Calculates the sum of vn_min_rates.
4873    It's needed for further normalizing of the min_rates.
4874    Returns:
4875      sum of vn_min_rates.
4876        or
4877      0 - if all the min_rates are 0.
4878      In the later case fainess algorithm should be deactivated.
4879      If not all min_rates are zero then those that are zeroes will be set to 1.
4880  */
4881 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4882 {
4883         int all_zero = 1;
4884         int port = BP_PORT(bp);
4885         int vn;
4886
4887         bp->vn_weight_sum = 0;
4888         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889                 int func = 2*vn + port;
4890                 u32 vn_cfg =
4891                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4894
4895                 /* Skip hidden vns */
4896                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4897                         continue;
4898
4899                 /* If min rate is zero - set it to 1 */
4900                 if (!vn_min_rate)
4901                         vn_min_rate = DEF_MIN_RATE;
4902                 else
4903                         all_zero = 0;
4904
4905                 bp->vn_weight_sum += vn_min_rate;
4906         }
4907
4908         /* ... only if all min rates are zeros - disable fairness */
4909         if (all_zero)
4910                 bp->vn_weight_sum = 0;
4911 }
4912
4913 static void bnx2x_init_internal_func(struct bnx2x *bp)
4914 {
4915         struct tstorm_eth_function_common_config tstorm_config = {0};
4916         struct stats_indication_flags stats_flags = {0};
4917         int port = BP_PORT(bp);
4918         int func = BP_FUNC(bp);
4919         int i, j;
4920         u32 offset;
4921         u16 max_agg_size;
4922
4923         if (is_multi(bp)) {
4924                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4925                 tstorm_config.rss_result_mask = MULTI_MASK;
4926         }
4927         if (IS_E1HMF(bp))
4928                 tstorm_config.config_flags |=
4929                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4930
4931         tstorm_config.leading_client_id = BP_L_ID(bp);
4932
4933         REG_WR(bp, BAR_TSTRORM_INTMEM +
4934                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4935                (*(u32 *)&tstorm_config));
4936
4937         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4938         bnx2x_set_storm_rx_mode(bp);
4939
4940         for_each_queue(bp, i) {
4941                 u8 cl_id = bp->fp[i].cl_id;
4942
4943                 /* reset xstorm per client statistics */
4944                 offset = BAR_XSTRORM_INTMEM +
4945                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946                 for (j = 0;
4947                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948                         REG_WR(bp, offset + j*4, 0);
4949
4950                 /* reset tstorm per client statistics */
4951                 offset = BAR_TSTRORM_INTMEM +
4952                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953                 for (j = 0;
4954                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955                         REG_WR(bp, offset + j*4, 0);
4956
4957                 /* reset ustorm per client statistics */
4958                 offset = BAR_USTRORM_INTMEM +
4959                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960                 for (j = 0;
4961                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962                         REG_WR(bp, offset + j*4, 0);
4963         }
4964
4965         /* Init statistics related context */
4966         stats_flags.collect_eth = 1;
4967
4968         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4969                ((u32 *)&stats_flags)[0]);
4970         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4971                ((u32 *)&stats_flags)[1]);
4972
4973         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4974                ((u32 *)&stats_flags)[0]);
4975         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4976                ((u32 *)&stats_flags)[1]);
4977
4978         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979                ((u32 *)&stats_flags)[0]);
4980         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981                ((u32 *)&stats_flags)[1]);
4982
4983         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4984                ((u32 *)&stats_flags)[0]);
4985         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4986                ((u32 *)&stats_flags)[1]);
4987
4988         REG_WR(bp, BAR_XSTRORM_INTMEM +
4989                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991         REG_WR(bp, BAR_XSTRORM_INTMEM +
4992                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4994
4995         REG_WR(bp, BAR_TSTRORM_INTMEM +
4996                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998         REG_WR(bp, BAR_TSTRORM_INTMEM +
4999                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002         REG_WR(bp, BAR_USTRORM_INTMEM +
5003                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005         REG_WR(bp, BAR_USTRORM_INTMEM +
5006                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
5009         if (CHIP_IS_E1H(bp)) {
5010                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5011                         IS_E1HMF(bp));
5012                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5013                         IS_E1HMF(bp));
5014                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5015                         IS_E1HMF(bp));
5016                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5017                         IS_E1HMF(bp));
5018
5019                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5020                          bp->e1hov);
5021         }
5022
5023         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5024         max_agg_size =
5025                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5027                     (u32)0xffff);
5028         for_each_rx_queue(bp, i) {
5029                 struct bnx2x_fastpath *fp = &bp->fp[i];
5030
5031                 REG_WR(bp, BAR_USTRORM_INTMEM +
5032                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5033                        U64_LO(fp->rx_comp_mapping));
5034                 REG_WR(bp, BAR_USTRORM_INTMEM +
5035                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5036                        U64_HI(fp->rx_comp_mapping));
5037
5038                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5039                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5040                          max_agg_size);
5041         }
5042
5043         /* dropless flow control */
5044         if (CHIP_IS_E1H(bp)) {
5045                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5046
5047                 rx_pause.bd_thr_low = 250;
5048                 rx_pause.cqe_thr_low = 250;
5049                 rx_pause.cos = 1;
5050                 rx_pause.sge_thr_low = 0;
5051                 rx_pause.bd_thr_high = 350;
5052                 rx_pause.cqe_thr_high = 350;
5053                 rx_pause.sge_thr_high = 0;
5054
5055                 for_each_rx_queue(bp, i) {
5056                         struct bnx2x_fastpath *fp = &bp->fp[i];
5057
5058                         if (!fp->disable_tpa) {
5059                                 rx_pause.sge_thr_low = 150;
5060                                 rx_pause.sge_thr_high = 250;
5061                         }
5062
5063
5064                         offset = BAR_USTRORM_INTMEM +
5065                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5066                                                                    fp->cl_id);
5067                         for (j = 0;
5068                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5069                              j++)
5070                                 REG_WR(bp, offset + j*4,
5071                                        ((u32 *)&rx_pause)[j]);
5072                 }
5073         }
5074
5075         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5076
5077         /* Init rate shaping and fairness contexts */
5078         if (IS_E1HMF(bp)) {
5079                 int vn;
5080
5081                 /* During init there is no active link
5082                    Until link is up, set link rate to 10Gbps */
5083                 bp->link_vars.line_speed = SPEED_10000;
5084                 bnx2x_init_port_minmax(bp);
5085
5086                 bnx2x_calc_vn_weight_sum(bp);
5087
5088                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5090
5091                 /* Enable rate shaping and fairness */
5092                 bp->cmng.flags.cmng_enables =
5093                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094                 if (bp->vn_weight_sum)
5095                         bp->cmng.flags.cmng_enables |=
5096                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5097                 else
5098                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099                            "  fairness will be disabled\n");
5100         } else {
5101                 /* rate shaping and fairness are disabled */
5102                 DP(NETIF_MSG_IFUP,
5103                    "single function mode  minmax will be disabled\n");
5104         }
5105
5106
5107         /* Store it to internal memory */
5108         if (bp->port.pmf)
5109                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5111                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112                                ((u32 *)(&bp->cmng))[i]);
5113 }
5114
5115 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5116 {
5117         switch (load_code) {
5118         case FW_MSG_CODE_DRV_LOAD_COMMON:
5119                 bnx2x_init_internal_common(bp);
5120                 /* no break */
5121
5122         case FW_MSG_CODE_DRV_LOAD_PORT:
5123                 bnx2x_init_internal_port(bp);
5124                 /* no break */
5125
5126         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127                 bnx2x_init_internal_func(bp);
5128                 break;
5129
5130         default:
5131                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5132                 break;
5133         }
5134 }
5135
5136 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5137 {
5138         int i;
5139
5140         for_each_queue(bp, i) {
5141                 struct bnx2x_fastpath *fp = &bp->fp[i];
5142
5143                 fp->bp = bp;
5144                 fp->state = BNX2X_FP_STATE_CLOSED;
5145                 fp->index = i;
5146                 fp->cl_id = BP_L_ID(bp) + i;
5147                 fp->sb_id = fp->cl_id;
5148                 DP(NETIF_MSG_IFUP,
5149                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5150                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5151                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5152                               fp->sb_id);
5153                 bnx2x_update_fpsb_idx(fp);
5154         }
5155
5156         /* ensure status block indices were read */
5157         rmb();
5158
5159
5160         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5161                           DEF_SB_ID);
5162         bnx2x_update_dsb_idx(bp);
5163         bnx2x_update_coalesce(bp);
5164         bnx2x_init_rx_rings(bp);
5165         bnx2x_init_tx_ring(bp);
5166         bnx2x_init_sp_ring(bp);
5167         bnx2x_init_context(bp);
5168         bnx2x_init_internal(bp, load_code);
5169         bnx2x_init_ind_table(bp);
5170         bnx2x_stats_init(bp);
5171
5172         /* At this point, we are ready for interrupts */
5173         atomic_set(&bp->intr_sem, 0);
5174
5175         /* flush all before enabling interrupts */
5176         mb();
5177         mmiowb();
5178
5179         bnx2x_int_enable(bp);
5180 }
5181
5182 /* end of nic init */
5183
5184 /*
5185  * gzip service functions
5186  */
5187
5188 static int bnx2x_gunzip_init(struct bnx2x *bp)
5189 {
5190         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5191                                               &bp->gunzip_mapping);
5192         if (bp->gunzip_buf  == NULL)
5193                 goto gunzip_nomem1;
5194
5195         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5196         if (bp->strm  == NULL)
5197                 goto gunzip_nomem2;
5198
5199         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5200                                       GFP_KERNEL);
5201         if (bp->strm->workspace == NULL)
5202                 goto gunzip_nomem3;
5203
5204         return 0;
5205
5206 gunzip_nomem3:
5207         kfree(bp->strm);
5208         bp->strm = NULL;
5209
5210 gunzip_nomem2:
5211         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5212                             bp->gunzip_mapping);
5213         bp->gunzip_buf = NULL;
5214
5215 gunzip_nomem1:
5216         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5217                " un-compression\n", bp->dev->name);
5218         return -ENOMEM;
5219 }
5220
5221 static void bnx2x_gunzip_end(struct bnx2x *bp)
5222 {
5223         kfree(bp->strm->workspace);
5224
5225         kfree(bp->strm);
5226         bp->strm = NULL;
5227
5228         if (bp->gunzip_buf) {
5229                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5230                                     bp->gunzip_mapping);
5231                 bp->gunzip_buf = NULL;
5232         }
5233 }
5234
5235 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5236 {
5237         int n, rc;
5238
5239         /* check gzip header */
5240         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5241                 return -EINVAL;
5242
5243         n = 10;
5244
5245 #define FNAME                           0x8
5246
5247         if (zbuf[3] & FNAME)
5248                 while ((zbuf[n++] != 0) && (n < len));
5249
5250         bp->strm->next_in = zbuf + n;
5251         bp->strm->avail_in = len - n;
5252         bp->strm->next_out = bp->gunzip_buf;
5253         bp->strm->avail_out = FW_BUF_SIZE;
5254
5255         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5256         if (rc != Z_OK)
5257                 return rc;
5258
5259         rc = zlib_inflate(bp->strm, Z_FINISH);
5260         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5261                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5262                        bp->dev->name, bp->strm->msg);
5263
5264         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5265         if (bp->gunzip_outlen & 0x3)
5266                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5267                                     " gunzip_outlen (%d) not aligned\n",
5268                        bp->dev->name, bp->gunzip_outlen);
5269         bp->gunzip_outlen >>= 2;
5270
5271         zlib_inflateEnd(bp->strm);
5272
5273         if (rc == Z_STREAM_END)
5274                 return 0;
5275
5276         return rc;
5277 }
5278
5279 /* nic load/unload */
5280
5281 /*
5282  * General service functions
5283  */
5284
5285 /* send a NIG loopback debug packet */
5286 static void bnx2x_lb_pckt(struct bnx2x *bp)
5287 {
5288         u32 wb_write[3];
5289
5290         /* Ethernet source and destination addresses */
5291         wb_write[0] = 0x55555555;
5292         wb_write[1] = 0x55555555;
5293         wb_write[2] = 0x20;             /* SOP */
5294         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5295
5296         /* NON-IP protocol */
5297         wb_write[0] = 0x09000000;
5298         wb_write[1] = 0x55555555;
5299         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5300         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5301 }
5302
5303 /* some of the internal memories
5304  * are not directly readable from the driver
5305  * to test them we send debug packets
5306  */
5307 static int bnx2x_int_mem_test(struct bnx2x *bp)
5308 {
5309         int factor;
5310         int count, i;
5311         u32 val = 0;
5312
5313         if (CHIP_REV_IS_FPGA(bp))
5314                 factor = 120;
5315         else if (CHIP_REV_IS_EMUL(bp))
5316                 factor = 200;
5317         else
5318                 factor = 1;
5319
5320         DP(NETIF_MSG_HW, "start part1\n");
5321
5322         /* Disable inputs of parser neighbor blocks */
5323         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5324         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5325         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5326         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5327
5328         /*  Write 0 to parser credits for CFC search request */
5329         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5330
5331         /* send Ethernet packet */
5332         bnx2x_lb_pckt(bp);
5333
5334         /* TODO do i reset NIG statistic? */
5335         /* Wait until NIG register shows 1 packet of size 0x10 */
5336         count = 1000 * factor;
5337         while (count) {
5338
5339                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5340                 val = *bnx2x_sp(bp, wb_data[0]);
5341                 if (val == 0x10)
5342                         break;
5343
5344                 msleep(10);
5345                 count--;
5346         }
5347         if (val != 0x10) {
5348                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5349                 return -1;
5350         }
5351
5352         /* Wait until PRS register shows 1 packet */
5353         count = 1000 * factor;
5354         while (count) {
5355                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5356                 if (val == 1)
5357                         break;
5358
5359                 msleep(10);
5360                 count--;
5361         }
5362         if (val != 0x1) {
5363                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5364                 return -2;
5365         }
5366
5367         /* Reset and init BRB, PRS */
5368         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5369         msleep(50);
5370         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5371         msleep(50);
5372         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5373         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5374
5375         DP(NETIF_MSG_HW, "part2\n");
5376
5377         /* Disable inputs of parser neighbor blocks */
5378         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5379         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5380         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5381         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5382
5383         /* Write 0 to parser credits for CFC search request */
5384         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5385
5386         /* send 10 Ethernet packets */
5387         for (i = 0; i < 10; i++)
5388                 bnx2x_lb_pckt(bp);
5389
5390         /* Wait until NIG register shows 10 + 1
5391            packets of size 11*0x10 = 0xb0 */
5392         count = 1000 * factor;
5393         while (count) {
5394
5395                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5396                 val = *bnx2x_sp(bp, wb_data[0]);
5397                 if (val == 0xb0)
5398                         break;
5399
5400                 msleep(10);
5401                 count--;
5402         }
5403         if (val != 0xb0) {
5404                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5405                 return -3;
5406         }
5407
5408         /* Wait until PRS register shows 2 packets */
5409         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5410         if (val != 2)
5411                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5412
5413         /* Write 1 to parser credits for CFC search request */
5414         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5415
5416         /* Wait until PRS register shows 3 packets */
5417         msleep(10 * factor);
5418         /* Wait until NIG register shows 1 packet of size 0x10 */
5419         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5420         if (val != 3)
5421                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5422
5423         /* clear NIG EOP FIFO */
5424         for (i = 0; i < 11; i++)
5425                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5426         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5427         if (val != 1) {
5428                 BNX2X_ERR("clear of NIG failed\n");
5429                 return -4;
5430         }
5431
5432         /* Reset and init BRB, PRS, NIG */
5433         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5434         msleep(50);
5435         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5436         msleep(50);
5437         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5438         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5439 #ifndef BCM_ISCSI
5440         /* set NIC mode */
5441         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5442 #endif
5443
5444         /* Enable inputs of parser neighbor blocks */
5445         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5446         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5447         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5448         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5449
5450         DP(NETIF_MSG_HW, "done\n");
5451
5452         return 0; /* OK */
5453 }
5454
5455 static void enable_blocks_attention(struct bnx2x *bp)
5456 {
5457         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5458         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5459         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5460         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5461         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5462         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5463         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5464         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5465         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5466 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5467 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5468         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5469         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5470         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5471 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5472 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5473         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5474         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5475         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5476         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5477 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5478 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5479         if (CHIP_REV_IS_FPGA(bp))
5480                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5481         else
5482                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5483         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5484         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5485         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5486 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5487 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5488         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5489         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5490 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5491         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5492 }
5493
5494
5495 static void bnx2x_reset_common(struct bnx2x *bp)
5496 {
5497         /* reset_common */
5498         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5499                0xd3ffff7f);
5500         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5501 }
5502
5503 static int bnx2x_init_common(struct bnx2x *bp)
5504 {
5505         u32 val, i;
5506
5507         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5508
5509         bnx2x_reset_common(bp);
5510         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5511         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5512
5513         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5514         if (CHIP_IS_E1H(bp))
5515                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5516
5517         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5518         msleep(30);
5519         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5520
5521         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5522         if (CHIP_IS_E1(bp)) {
5523                 /* enable HW interrupt from PXP on USDM overflow
5524                    bit 16 on INT_MASK_0 */
5525                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5526         }
5527
5528         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5529         bnx2x_init_pxp(bp);
5530
5531 #ifdef __BIG_ENDIAN
5532         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5533         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5534         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5535         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5536         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5537         /* make sure this value is 0 */
5538         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5539
5540 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5541         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5542         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5543         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5544         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5545 #endif
5546
5547         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5548 #ifdef BCM_ISCSI
5549         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5550         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5551         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5552 #endif
5553
5554         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5555                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5556
5557         /* let the HW do it's magic ... */
5558         msleep(100);
5559         /* finish PXP init */
5560         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5561         if (val != 1) {
5562                 BNX2X_ERR("PXP2 CFG failed\n");
5563                 return -EBUSY;
5564         }
5565         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5566         if (val != 1) {
5567                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5568                 return -EBUSY;
5569         }
5570
5571         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5572         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5573
5574         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5575
5576         /* clean the DMAE memory */
5577         bp->dmae_ready = 1;
5578         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5579
5580         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5581         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5582         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5583         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5584
5585         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5586         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5587         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5588         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5589
5590         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5591         /* soft reset pulse */
5592         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5593         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5594
5595 #ifdef BCM_ISCSI
5596         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5597 #endif
5598
5599         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5600         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5601         if (!CHIP_REV_IS_SLOW(bp)) {
5602                 /* enable hw interrupt from doorbell Q */
5603                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5604         }
5605
5606         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5607         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5608         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5609         /* set NIC mode */
5610         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5611         if (CHIP_IS_E1H(bp))
5612                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5613
5614         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5615         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5616         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5617         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5618
5619         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5622         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5623
5624         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5625         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5626         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5627         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5628
5629         /* sync semi rtc */
5630         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5631                0x80000000);
5632         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5633                0x80000000);
5634
5635         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5636         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5637         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5638
5639         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5640         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5641                 REG_WR(bp, i, 0xc0cac01a);
5642                 /* TODO: replace with something meaningful */
5643         }
5644         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5645         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5646
5647         if (sizeof(union cdu_context) != 1024)
5648                 /* we currently assume that a context is 1024 bytes */
5649                 printk(KERN_ALERT PFX "please adjust the size of"
5650                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5651
5652         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5653         val = (4 << 24) + (0 << 12) + 1024;
5654         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5655         if (CHIP_IS_E1(bp)) {
5656                 /* !!! fix pxp client crdit until excel update */
5657                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5658                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5659         }
5660
5661         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5662         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5663         /* enable context validation interrupt from CFC */
5664         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5665
5666         /* set the thresholds to prevent CFC/CDU race */
5667         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5668
5669         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5670         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5671
5672         /* PXPCS COMMON comes here */
5673         /* Reset PCIE errors for debug */
5674         REG_WR(bp, 0x2814, 0xffffffff);
5675         REG_WR(bp, 0x3820, 0xffffffff);
5676
5677         /* EMAC0 COMMON comes here */
5678         /* EMAC1 COMMON comes here */
5679         /* DBU COMMON comes here */
5680         /* DBG COMMON comes here */
5681
5682         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5683         if (CHIP_IS_E1H(bp)) {
5684                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5685                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5686         }
5687
5688         if (CHIP_REV_IS_SLOW(bp))
5689                 msleep(200);
5690
5691         /* finish CFC init */
5692         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5693         if (val != 1) {
5694                 BNX2X_ERR("CFC LL_INIT failed\n");
5695                 return -EBUSY;
5696         }
5697         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5698         if (val != 1) {
5699                 BNX2X_ERR("CFC AC_INIT failed\n");
5700                 return -EBUSY;
5701         }
5702         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5703         if (val != 1) {
5704                 BNX2X_ERR("CFC CAM_INIT failed\n");
5705                 return -EBUSY;
5706         }
5707         REG_WR(bp, CFC_REG_DEBUG0, 0);
5708
5709         /* read NIG statistic
5710            to see if this is our first up since powerup */
5711         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5712         val = *bnx2x_sp(bp, wb_data[0]);
5713
5714         /* do internal memory self test */
5715         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5716                 BNX2X_ERR("internal mem self test failed\n");
5717                 return -EBUSY;
5718         }
5719
5720         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5721         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5722         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5723         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5724                 bp->port.need_hw_lock = 1;
5725                 break;
5726
5727         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5728                 /* Fan failure is indicated by SPIO 5 */
5729                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5730                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5731
5732                 /* set to active low mode */
5733                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5734                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5735                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5736                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5737
5738                 /* enable interrupt to signal the IGU */
5739                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5740                 val |= (1 << MISC_REGISTERS_SPIO_5);
5741                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5742                 break;
5743
5744         default:
5745                 break;
5746         }
5747
5748         /* clear PXP2 attentions */
5749         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5750
5751         enable_blocks_attention(bp);
5752
5753         if (!BP_NOMCP(bp)) {
5754                 bnx2x_acquire_phy_lock(bp);
5755                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5756                 bnx2x_release_phy_lock(bp);
5757         } else
5758                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5759
5760         return 0;
5761 }
5762
5763 static int bnx2x_init_port(struct bnx2x *bp)
5764 {
5765         int port = BP_PORT(bp);
5766         u32 low, high;
5767         u32 val;
5768
5769         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5770
5771         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5772
5773         /* Port PXP comes here */
5774         /* Port PXP2 comes here */
5775 #ifdef BCM_ISCSI
5776         /* Port0  1
5777          * Port1  385 */
5778         i++;