Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738         u16 tx_cons_sb;
739
740         /* Tell compiler that status block fields can change */
741         barrier();
742         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743         return (fp->tx_pkt_cons != tx_cons_sb);
744 }
745
746 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747 {
748         /* Tell compiler that consumer and producer can change */
749         barrier();
750         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
752 }
753
754 /* free skb in the packet ring at pos idx
755  * return idx of last bd freed
756  */
757 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758                              u16 idx)
759 {
760         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761         struct eth_tx_bd *tx_bd;
762         struct sk_buff *skb = tx_buf->skb;
763         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
764         int nbd;
765
766         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
767            idx, tx_buf, skb);
768
769         /* unmap first bd */
770         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771         tx_bd = &fp->tx_desc_ring[bd_idx];
772         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
774
775         nbd = le16_to_cpu(tx_bd->nbd) - 1;
776         new_cons = nbd + tx_buf->first_bd;
777 #ifdef BNX2X_STOP_ON_ERROR
778         if (nbd > (MAX_SKB_FRAGS + 2)) {
779                 BNX2X_ERR("BAD nbd!\n");
780                 bnx2x_panic();
781         }
782 #endif
783
784         /* Skip a parse bd and the TSO split header bd
785            since they have no mapping */
786         if (nbd)
787                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788
789         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790                                            ETH_TX_BD_FLAGS_TCP_CSUM |
791                                            ETH_TX_BD_FLAGS_SW_LSO)) {
792                 if (--nbd)
793                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794                 tx_bd = &fp->tx_desc_ring[bd_idx];
795                 /* is this a TSO split header bd? */
796                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
797                         if (--nbd)
798                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799                 }
800         }
801
802         /* now free frags */
803         while (nbd > 0) {
804
805                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806                 tx_bd = &fp->tx_desc_ring[bd_idx];
807                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809                 if (--nbd)
810                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
811         }
812
813         /* release skb */
814         WARN_ON(!skb);
815         dev_kfree_skb(skb);
816         tx_buf->first_bd = 0;
817         tx_buf->skb = NULL;
818
819         return new_cons;
820 }
821
822 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
823 {
824         s16 used;
825         u16 prod;
826         u16 cons;
827
828         barrier(); /* Tell compiler that prod and cons can change */
829         prod = fp->tx_bd_prod;
830         cons = fp->tx_bd_cons;
831
832         /* NUM_TX_RINGS = number of "next-page" entries
833            It will be used as a threshold */
834         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
835
836 #ifdef BNX2X_STOP_ON_ERROR
837         WARN_ON(used < 0);
838         WARN_ON(used > fp->bp->tx_ring_size);
839         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
840 #endif
841
842         return (s16)(fp->bp->tx_ring_size) - used;
843 }
844
845 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
846 {
847         struct bnx2x *bp = fp->bp;
848         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
849         int done = 0;
850
851 #ifdef BNX2X_STOP_ON_ERROR
852         if (unlikely(bp->panic))
853                 return;
854 #endif
855
856         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857         sw_cons = fp->tx_pkt_cons;
858
859         while (sw_cons != hw_cons) {
860                 u16 pkt_cons;
861
862                 pkt_cons = TX_BD(sw_cons);
863
864                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865
866                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
867                    hw_cons, sw_cons, pkt_cons);
868
869 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
870                         rmb();
871                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872                 }
873 */
874                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875                 sw_cons++;
876                 done++;
877
878                 if (done == work)
879                         break;
880         }
881
882         fp->tx_pkt_cons = sw_cons;
883         fp->tx_bd_cons = bd_cons;
884
885         /* Need to make the tx_cons update visible to start_xmit()
886          * before checking for netif_queue_stopped().  Without the
887          * memory barrier, there is a small possibility that start_xmit()
888          * will miss it and cause the queue to be stopped forever.
889          */
890         smp_mb();
891
892         /* TBD need a thresh? */
893         if (unlikely(netif_queue_stopped(bp->dev))) {
894
895                 netif_tx_lock(bp->dev);
896
897                 if (netif_queue_stopped(bp->dev) &&
898                     (bp->state == BNX2X_STATE_OPEN) &&
899                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900                         netif_wake_queue(bp->dev);
901
902                 netif_tx_unlock(bp->dev);
903         }
904 }
905
906
907 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908                            union eth_rx_cqe *rr_cqe)
909 {
910         struct bnx2x *bp = fp->bp;
911         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913
914         DP(BNX2X_MSG_SP,
915            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
916            FP_IDX(fp), cid, command, bp->state,
917            rr_cqe->ramrod_cqe.ramrod_type);
918
919         bp->spq_left++;
920
921         if (FP_IDX(fp)) {
922                 switch (command | fp->state) {
923                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924                                                 BNX2X_FP_STATE_OPENING):
925                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926                            cid);
927                         fp->state = BNX2X_FP_STATE_OPEN;
928                         break;
929
930                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932                            cid);
933                         fp->state = BNX2X_FP_STATE_HALTED;
934                         break;
935
936                 default:
937                         BNX2X_ERR("unexpected MC reply (%d)  "
938                                   "fp->state is %x\n", command, fp->state);
939                         break;
940                 }
941                 mb(); /* force bnx2x_wait_ramrod() to see the change */
942                 return;
943         }
944
945         switch (command | bp->state) {
946         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948                 bp->state = BNX2X_STATE_OPEN;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954                 fp->state = BNX2X_FP_STATE_HALTED;
955                 break;
956
957         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
958                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
959                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
960                 break;
961
962
963         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
966                 bp->set_mac_pending = 0;
967                 break;
968
969         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
970                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
971                 break;
972
973         default:
974                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
975                           command, bp->state);
976                 break;
977         }
978         mb(); /* force bnx2x_wait_ramrod() to see the change */
979 }
980
981 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982                                      struct bnx2x_fastpath *fp, u16 index)
983 {
984         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985         struct page *page = sw_buf->page;
986         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988         /* Skip "next page" elements */
989         if (!page)
990                 return;
991
992         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994         __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996         sw_buf->page = NULL;
997         sge->addr_hi = 0;
998         sge->addr_lo = 0;
999 }
1000
1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002                                            struct bnx2x_fastpath *fp, int last)
1003 {
1004         int i;
1005
1006         for (i = 0; i < last; i++)
1007                 bnx2x_free_rx_sge(bp, fp, i);
1008 }
1009
1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011                                      struct bnx2x_fastpath *fp, u16 index)
1012 {
1013         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016         dma_addr_t mapping;
1017
1018         if (unlikely(page == NULL))
1019                 return -ENOMEM;
1020
1021         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1022                                PCI_DMA_FROMDEVICE);
1023         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025                 return -ENOMEM;
1026         }
1027
1028         sw_buf->page = page;
1029         pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034         return 0;
1035 }
1036
1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038                                      struct bnx2x_fastpath *fp, u16 index)
1039 {
1040         struct sk_buff *skb;
1041         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043         dma_addr_t mapping;
1044
1045         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046         if (unlikely(skb == NULL))
1047                 return -ENOMEM;
1048
1049         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050                                  PCI_DMA_FROMDEVICE);
1051         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052                 dev_kfree_skb(skb);
1053                 return -ENOMEM;
1054         }
1055
1056         rx_buf->skb = skb;
1057         pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062         return 0;
1063 }
1064
1065 /* note that we are not allocating a new skb,
1066  * we are just moving one from cons to prod
1067  * we are not creating a new mapping,
1068  * so there is no need to check for dma_mapping_error().
1069  */
1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071                                struct sk_buff *skb, u16 cons, u16 prod)
1072 {
1073         struct bnx2x *bp = fp->bp;
1074         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079         pci_dma_sync_single_for_device(bp->pdev,
1080                                        pci_unmap_addr(cons_rx_buf, mapping),
1081                                        bp->rx_offset + RX_COPY_THRESH,
1082                                        PCI_DMA_FROMDEVICE);
1083
1084         prod_rx_buf->skb = cons_rx_buf->skb;
1085         pci_unmap_addr_set(prod_rx_buf, mapping,
1086                            pci_unmap_addr(cons_rx_buf, mapping));
1087         *prod_bd = *cons_bd;
1088 }
1089
1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091                                              u16 idx)
1092 {
1093         u16 last_max = fp->last_max_sge;
1094
1095         if (SUB_S16(idx, last_max) > 0)
1096                 fp->last_max_sge = idx;
1097 }
1098
1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100 {
1101         int i, j;
1102
1103         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104                 int idx = RX_SGE_CNT * i - 1;
1105
1106                 for (j = 0; j < 2; j++) {
1107                         SGE_MASK_CLEAR_BIT(fp, idx);
1108                         idx--;
1109                 }
1110         }
1111 }
1112
1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114                                   struct eth_fast_path_rx_cqe *fp_cqe)
1115 {
1116         struct bnx2x *bp = fp->bp;
1117         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1119                       SGE_PAGE_SHIFT;
1120         u16 last_max, last_elem, first_elem;
1121         u16 delta = 0;
1122         u16 i;
1123
1124         if (!sge_len)
1125                 return;
1126
1127         /* First mark all used pages */
1128         for (i = 0; i < sge_len; i++)
1129                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134         /* Here we assume that the last SGE index is the biggest */
1135         prefetch((void *)(fp->sge_mask));
1136         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138         last_max = RX_SGE(fp->last_max_sge);
1139         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142         /* If ring is not full */
1143         if (last_elem + 1 != first_elem)
1144                 last_elem++;
1145
1146         /* Now update the prod */
1147         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148                 if (likely(fp->sge_mask[i]))
1149                         break;
1150
1151                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152                 delta += RX_SGE_MASK_ELEM_SZ;
1153         }
1154
1155         if (delta > 0) {
1156                 fp->rx_sge_prod += delta;
1157                 /* clear page-end entries */
1158                 bnx2x_clear_sge_mask_next_elems(fp);
1159         }
1160
1161         DP(NETIF_MSG_RX_STATUS,
1162            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1163            fp->last_max_sge, fp->rx_sge_prod);
1164 }
1165
1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167 {
1168         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169         memset(fp->sge_mask, 0xff,
1170                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
1172         /* Clear the two last indices in the page to 1:
1173            these are the indices that correspond to the "next" element,
1174            hence will never be indicated and should be removed from
1175            the calculations. */
1176         bnx2x_clear_sge_mask_next_elems(fp);
1177 }
1178
1179 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180                             struct sk_buff *skb, u16 cons, u16 prod)
1181 {
1182         struct bnx2x *bp = fp->bp;
1183         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186         dma_addr_t mapping;
1187
1188         /* move empty skb from pool to prod and map it */
1189         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194         /* move partial skb from cons to pool (don't unmap yet) */
1195         fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197         /* mark bin state as start - print error if current state != stop */
1198         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201         fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203         /* point prod_bd to new skb */
1204         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207 #ifdef BNX2X_STOP_ON_ERROR
1208         fp->tpa_queue_used |= (1 << queue);
1209 #ifdef __powerpc64__
1210         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211 #else
1212         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213 #endif
1214            fp->tpa_queue_used);
1215 #endif
1216 }
1217
1218 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219                                struct sk_buff *skb,
1220                                struct eth_fast_path_rx_cqe *fp_cqe,
1221                                u16 cqe_idx)
1222 {
1223         struct sw_rx_page *rx_pg, old_rx_pg;
1224         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225         u32 i, frag_len, frag_size, pages;
1226         int err;
1227         int j;
1228
1229         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1230         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1231
1232         /* This is needed in order to enable forwarding support */
1233         if (frag_size)
1234                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1235                                                max(frag_size, (u32)len_on_bd));
1236
1237 #ifdef BNX2X_STOP_ON_ERROR
1238         if (pages >
1239             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1240                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241                           pages, cqe_idx);
1242                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1243                           fp_cqe->pkt_len, len_on_bd);
1244                 bnx2x_panic();
1245                 return -EINVAL;
1246         }
1247 #endif
1248
1249         /* Run through the SGL and compose the fragmented skb */
1250         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253                 /* FW gives the indices of the SGE as if the ring is an array
1254                    (meaning that "next" element will consume 2 indices) */
1255                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1256                 rx_pg = &fp->rx_page_ring[sge_idx];
1257                 old_rx_pg = *rx_pg;
1258
1259                 /* If we fail to allocate a substitute page, we simply stop
1260                    where we are and drop the whole packet */
1261                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262                 if (unlikely(err)) {
1263                         bp->eth_stats.rx_skb_alloc_failed++;
1264                         return err;
1265                 }
1266
1267                 /* Unmap the page as we r going to pass it to the stack */
1268                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1269                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1270
1271                 /* Add one frag and update the appropriate fields in the skb */
1272                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273
1274                 skb->data_len += frag_len;
1275                 skb->truesize += frag_len;
1276                 skb->len += frag_len;
1277
1278                 frag_size -= frag_len;
1279         }
1280
1281         return 0;
1282 }
1283
1284 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286                            u16 cqe_idx)
1287 {
1288         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289         struct sk_buff *skb = rx_buf->skb;
1290         /* alloc new skb */
1291         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292
1293         /* Unmap skb in the pool anyway, as we are going to change
1294            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295            fails. */
1296         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1297                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1298
1299         if (likely(new_skb)) {
1300                 /* fix ip xsum and give it to the stack */
1301                 /* (no need to map the new skb) */
1302 #ifdef BCM_VLAN
1303                 int is_vlan_cqe =
1304                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305                          PARSING_FLAGS_VLAN);
1306                 int is_not_hwaccel_vlan_cqe =
1307                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308 #endif
1309
1310                 prefetch(skb);
1311                 prefetch(((char *)(skb)) + 128);
1312
1313 #ifdef BNX2X_STOP_ON_ERROR
1314                 if (pad + len > bp->rx_buf_size) {
1315                         BNX2X_ERR("skb_put is about to fail...  "
1316                                   "pad %d  len %d  rx_buf_size %d\n",
1317                                   pad, len, bp->rx_buf_size);
1318                         bnx2x_panic();
1319                         return;
1320                 }
1321 #endif
1322
1323                 skb_reserve(skb, pad);
1324                 skb_put(skb, len);
1325
1326                 skb->protocol = eth_type_trans(skb, bp->dev);
1327                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1328
1329                 {
1330                         struct iphdr *iph;
1331
1332                         iph = (struct iphdr *)skb->data;
1333 #ifdef BCM_VLAN
1334                         /* If there is no Rx VLAN offloading -
1335                            take VLAN tag into an account */
1336                         if (unlikely(is_not_hwaccel_vlan_cqe))
1337                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1338 #endif
1339                         iph->check = 0;
1340                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1341                 }
1342
1343                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1344                                          &cqe->fast_path_cqe, cqe_idx)) {
1345 #ifdef BCM_VLAN
1346                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1347                             (!is_not_hwaccel_vlan_cqe))
1348                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1349                                                 le16_to_cpu(cqe->fast_path_cqe.
1350                                                             vlan_tag));
1351                         else
1352 #endif
1353                                 netif_receive_skb(skb);
1354                 } else {
1355                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1356                            " - dropping packet!\n");
1357                         dev_kfree_skb(skb);
1358                 }
1359
1360
1361                 /* put new skb in bin */
1362                 fp->tpa_pool[queue].skb = new_skb;
1363
1364         } else {
1365                 /* else drop the packet and keep the buffer in the bin */
1366                 DP(NETIF_MSG_RX_STATUS,
1367                    "Failed to allocate new skb - dropping packet!\n");
1368                 bp->eth_stats.rx_skb_alloc_failed++;
1369         }
1370
1371         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1372 }
1373
1374 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1375                                         struct bnx2x_fastpath *fp,
1376                                         u16 bd_prod, u16 rx_comp_prod,
1377                                         u16 rx_sge_prod)
1378 {
1379         struct tstorm_eth_rx_producers rx_prods = {0};
1380         int i;
1381
1382         /* Update producers */
1383         rx_prods.bd_prod = bd_prod;
1384         rx_prods.cqe_prod = rx_comp_prod;
1385         rx_prods.sge_prod = rx_sge_prod;
1386
1387         /*
1388          * Make sure that the BD and SGE data is updated before updating the
1389          * producers since FW might read the BD/SGE right after the producer
1390          * is updated.
1391          * This is only applicable for weak-ordered memory model archs such
1392          * as IA-64. The following barrier is also mandatory since FW will
1393          * assumes BDs must have buffers.
1394          */
1395         wmb();
1396
1397         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1398                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1399                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1400                        ((u32 *)&rx_prods)[i]);
1401
1402         mmiowb(); /* keep prod updates ordered */
1403
1404         DP(NETIF_MSG_RX_STATUS,
1405            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1406            bd_prod, rx_comp_prod, rx_sge_prod);
1407 }
1408
1409 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1410 {
1411         struct bnx2x *bp = fp->bp;
1412         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1413         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1414         int rx_pkt = 0;
1415
1416 #ifdef BNX2X_STOP_ON_ERROR
1417         if (unlikely(bp->panic))
1418                 return 0;
1419 #endif
1420
1421         /* CQ "next element" is of the size of the regular element,
1422            that's why it's ok here */
1423         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1424         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1425                 hw_comp_cons++;
1426
1427         bd_cons = fp->rx_bd_cons;
1428         bd_prod = fp->rx_bd_prod;
1429         bd_prod_fw = bd_prod;
1430         sw_comp_cons = fp->rx_comp_cons;
1431         sw_comp_prod = fp->rx_comp_prod;
1432
1433         /* Memory barrier necessary as speculative reads of the rx
1434          * buffer can be ahead of the index in the status block
1435          */
1436         rmb();
1437
1438         DP(NETIF_MSG_RX_STATUS,
1439            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1440            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1441
1442         while (sw_comp_cons != hw_comp_cons) {
1443                 struct sw_rx_bd *rx_buf = NULL;
1444                 struct sk_buff *skb;
1445                 union eth_rx_cqe *cqe;
1446                 u8 cqe_fp_flags;
1447                 u16 len, pad;
1448
1449                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1450                 bd_prod = RX_BD(bd_prod);
1451                 bd_cons = RX_BD(bd_cons);
1452
1453                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1454                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1455
1456                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1457                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1458                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1459                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1460                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1461                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1462
1463                 /* is this a slowpath msg? */
1464                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1465                         bnx2x_sp_event(fp, cqe);
1466                         goto next_cqe;
1467
1468                 /* this is an rx packet */
1469                 } else {
1470                         rx_buf = &fp->rx_buf_ring[bd_cons];
1471                         skb = rx_buf->skb;
1472                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1473                         pad = cqe->fast_path_cqe.placement_offset;
1474
1475                         /* If CQE is marked both TPA_START and TPA_END
1476                            it is a non-TPA CQE */
1477                         if ((!fp->disable_tpa) &&
1478                             (TPA_TYPE(cqe_fp_flags) !=
1479                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1480                                 u16 queue = cqe->fast_path_cqe.queue_index;
1481
1482                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1483                                         DP(NETIF_MSG_RX_STATUS,
1484                                            "calling tpa_start on queue %d\n",
1485                                            queue);
1486
1487                                         bnx2x_tpa_start(fp, queue, skb,
1488                                                         bd_cons, bd_prod);
1489                                         goto next_rx;
1490                                 }
1491
1492                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1493                                         DP(NETIF_MSG_RX_STATUS,
1494                                            "calling tpa_stop on queue %d\n",
1495                                            queue);
1496
1497                                         if (!BNX2X_RX_SUM_FIX(cqe))
1498                                                 BNX2X_ERR("STOP on none TCP "
1499                                                           "data\n");
1500
1501                                         /* This is a size of the linear data
1502                                            on this skb */
1503                                         len = le16_to_cpu(cqe->fast_path_cqe.
1504                                                                 len_on_bd);
1505                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1506                                                     len, cqe, comp_ring_cons);
1507 #ifdef BNX2X_STOP_ON_ERROR
1508                                         if (bp->panic)
1509                                                 return -EINVAL;
1510 #endif
1511
1512                                         bnx2x_update_sge_prod(fp,
1513                                                         &cqe->fast_path_cqe);
1514                                         goto next_cqe;
1515                                 }
1516                         }
1517
1518                         pci_dma_sync_single_for_device(bp->pdev,
1519                                         pci_unmap_addr(rx_buf, mapping),
1520                                                        pad + RX_COPY_THRESH,
1521                                                        PCI_DMA_FROMDEVICE);
1522                         prefetch(skb);
1523                         prefetch(((char *)(skb)) + 128);
1524
1525                         /* is this an error packet? */
1526                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1527                                 DP(NETIF_MSG_RX_ERR,
1528                                    "ERROR  flags %x  rx packet %u\n",
1529                                    cqe_fp_flags, sw_comp_cons);
1530                                 bp->eth_stats.rx_err_discard_pkt++;
1531                                 goto reuse_rx;
1532                         }
1533
1534                         /* Since we don't have a jumbo ring
1535                          * copy small packets if mtu > 1500
1536                          */
1537                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1538                             (len <= RX_COPY_THRESH)) {
1539                                 struct sk_buff *new_skb;
1540
1541                                 new_skb = netdev_alloc_skb(bp->dev,
1542                                                            len + pad);
1543                                 if (new_skb == NULL) {
1544                                         DP(NETIF_MSG_RX_ERR,
1545                                            "ERROR  packet dropped "
1546                                            "because of alloc failure\n");
1547                                         bp->eth_stats.rx_skb_alloc_failed++;
1548                                         goto reuse_rx;
1549                                 }
1550
1551                                 /* aligned copy */
1552                                 skb_copy_from_linear_data_offset(skb, pad,
1553                                                     new_skb->data + pad, len);
1554                                 skb_reserve(new_skb, pad);
1555                                 skb_put(new_skb, len);
1556
1557                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558
1559                                 skb = new_skb;
1560
1561                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1562                                 pci_unmap_single(bp->pdev,
1563                                         pci_unmap_addr(rx_buf, mapping),
1564                                                  bp->rx_buf_size,
1565                                                  PCI_DMA_FROMDEVICE);
1566                                 skb_reserve(skb, pad);
1567                                 skb_put(skb, len);
1568
1569                         } else {
1570                                 DP(NETIF_MSG_RX_ERR,
1571                                    "ERROR  packet dropped because "
1572                                    "of alloc failure\n");
1573                                 bp->eth_stats.rx_skb_alloc_failed++;
1574 reuse_rx:
1575                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1576                                 goto next_rx;
1577                         }
1578
1579                         skb->protocol = eth_type_trans(skb, bp->dev);
1580
1581                         skb->ip_summed = CHECKSUM_NONE;
1582                         if (bp->rx_csum) {
1583                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1584                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1585                                 else
1586                                         bp->eth_stats.hw_csum_err++;
1587                         }
1588                 }
1589
1590 #ifdef BCM_VLAN
1591                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1592                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1593                      PARSING_FLAGS_VLAN))
1594                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1595                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1596                 else
1597 #endif
1598                         netif_receive_skb(skb);
1599
1600
1601 next_rx:
1602                 rx_buf->skb = NULL;
1603
1604                 bd_cons = NEXT_RX_IDX(bd_cons);
1605                 bd_prod = NEXT_RX_IDX(bd_prod);
1606                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1607                 rx_pkt++;
1608 next_cqe:
1609                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1610                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1611
1612                 if (rx_pkt == budget)
1613                         break;
1614         } /* while */
1615
1616         fp->rx_bd_cons = bd_cons;
1617         fp->rx_bd_prod = bd_prod_fw;
1618         fp->rx_comp_cons = sw_comp_cons;
1619         fp->rx_comp_prod = sw_comp_prod;
1620
1621         /* Update producers */
1622         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1623                              fp->rx_sge_prod);
1624
1625         fp->rx_pkt += rx_pkt;
1626         fp->rx_calls++;
1627
1628         return rx_pkt;
1629 }
1630
1631 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1632 {
1633         struct bnx2x_fastpath *fp = fp_cookie;
1634         struct bnx2x *bp = fp->bp;
1635         int index = FP_IDX(fp);
1636
1637         /* Return here if interrupt is disabled */
1638         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1639                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1640                 return IRQ_HANDLED;
1641         }
1642
1643         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1644            index, FP_SB_ID(fp));
1645         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1646
1647 #ifdef BNX2X_STOP_ON_ERROR
1648         if (unlikely(bp->panic))
1649                 return IRQ_HANDLED;
1650 #endif
1651
1652         prefetch(fp->rx_cons_sb);
1653         prefetch(fp->tx_cons_sb);
1654         prefetch(&fp->status_blk->c_status_block.status_block_index);
1655         prefetch(&fp->status_blk->u_status_block.status_block_index);
1656
1657         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1658
1659         return IRQ_HANDLED;
1660 }
1661
1662 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1663 {
1664         struct net_device *dev = dev_instance;
1665         struct bnx2x *bp = netdev_priv(dev);
1666         u16 status = bnx2x_ack_int(bp);
1667         u16 mask;
1668
1669         /* Return here if interrupt is shared and it's not for us */
1670         if (unlikely(status == 0)) {
1671                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1672                 return IRQ_NONE;
1673         }
1674         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1675
1676         /* Return here if interrupt is disabled */
1677         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1678                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1679                 return IRQ_HANDLED;
1680         }
1681
1682 #ifdef BNX2X_STOP_ON_ERROR
1683         if (unlikely(bp->panic))
1684                 return IRQ_HANDLED;
1685 #endif
1686
1687         mask = 0x2 << bp->fp[0].sb_id;
1688         if (status & mask) {
1689                 struct bnx2x_fastpath *fp = &bp->fp[0];
1690
1691                 prefetch(fp->rx_cons_sb);
1692                 prefetch(fp->tx_cons_sb);
1693                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1694                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1695
1696                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1697
1698                 status &= ~mask;
1699         }
1700
1701
1702         if (unlikely(status & 0x1)) {
1703                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1704
1705                 status &= ~0x1;
1706                 if (!status)
1707                         return IRQ_HANDLED;
1708         }
1709
1710         if (status)
1711                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1712                    status);
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 /* end of fast path */
1718
1719 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1720
1721 /* Link */
1722
1723 /*
1724  * General service functions
1725  */
1726
1727 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1728 {
1729         u32 lock_status;
1730         u32 resource_bit = (1 << resource);
1731         int func = BP_FUNC(bp);
1732         u32 hw_lock_control_reg;
1733         int cnt;
1734
1735         /* Validating that the resource is within range */
1736         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1737                 DP(NETIF_MSG_HW,
1738                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1739                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1740                 return -EINVAL;
1741         }
1742
1743         if (func <= 5) {
1744                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1745         } else {
1746                 hw_lock_control_reg =
1747                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1748         }
1749
1750         /* Validating that the resource is not already taken */
1751         lock_status = REG_RD(bp, hw_lock_control_reg);
1752         if (lock_status & resource_bit) {
1753                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1754                    lock_status, resource_bit);
1755                 return -EEXIST;
1756         }
1757
1758         /* Try for 5 second every 5ms */
1759         for (cnt = 0; cnt < 1000; cnt++) {
1760                 /* Try to acquire the lock */
1761                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1762                 lock_status = REG_RD(bp, hw_lock_control_reg);
1763                 if (lock_status & resource_bit)
1764                         return 0;
1765
1766                 msleep(5);
1767         }
1768         DP(NETIF_MSG_HW, "Timeout\n");
1769         return -EAGAIN;
1770 }
1771
1772 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1773 {
1774         u32 lock_status;
1775         u32 resource_bit = (1 << resource);
1776         int func = BP_FUNC(bp);
1777         u32 hw_lock_control_reg;
1778
1779         /* Validating that the resource is within range */
1780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1781                 DP(NETIF_MSG_HW,
1782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1784                 return -EINVAL;
1785         }
1786
1787         if (func <= 5) {
1788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1789         } else {
1790                 hw_lock_control_reg =
1791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1792         }
1793
1794         /* Validating that the resource is currently taken */
1795         lock_status = REG_RD(bp, hw_lock_control_reg);
1796         if (!(lock_status & resource_bit)) {
1797                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1798                    lock_status, resource_bit);
1799                 return -EFAULT;
1800         }
1801
1802         REG_WR(bp, hw_lock_control_reg, resource_bit);
1803         return 0;
1804 }
1805
1806 /* HW Lock for shared dual port PHYs */
1807 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1808 {
1809         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1810
1811         mutex_lock(&bp->port.phy_mutex);
1812
1813         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1814             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1815                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1816 }
1817
1818 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1819 {
1820         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1821
1822         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1823             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1824                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1825
1826         mutex_unlock(&bp->port.phy_mutex);
1827 }
1828
1829 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1830 {
1831         /* The GPIO should be swapped if swap register is set and active */
1832         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1833                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1834         int gpio_shift = gpio_num +
1835                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1836         u32 gpio_mask = (1 << gpio_shift);
1837         u32 gpio_reg;
1838
1839         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1840                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1841                 return -EINVAL;
1842         }
1843
1844         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1845         /* read GPIO and mask except the float bits */
1846         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1847
1848         switch (mode) {
1849         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1850                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1851                    gpio_num, gpio_shift);
1852                 /* clear FLOAT and set CLR */
1853                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1854                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1855                 break;
1856
1857         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1858                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1859                    gpio_num, gpio_shift);
1860                 /* clear FLOAT and set SET */
1861                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1862                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1863                 break;
1864
1865         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1866                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1867                    gpio_num, gpio_shift);
1868                 /* set FLOAT */
1869                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1870                 break;
1871
1872         default:
1873                 break;
1874         }
1875
1876         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1877         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1878
1879         return 0;
1880 }
1881
1882 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1883 {
1884         u32 spio_mask = (1 << spio_num);
1885         u32 spio_reg;
1886
1887         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1888             (spio_num > MISC_REGISTERS_SPIO_7)) {
1889                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1890                 return -EINVAL;
1891         }
1892
1893         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1894         /* read SPIO and mask except the float bits */
1895         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1896
1897         switch (mode) {
1898         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1899                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1900                 /* clear FLOAT and set CLR */
1901                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1903                 break;
1904
1905         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1906                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1907                 /* clear FLOAT and set SET */
1908                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1910                 break;
1911
1912         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1913                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1914                 /* set FLOAT */
1915                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1916                 break;
1917
1918         default:
1919                 break;
1920         }
1921
1922         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1923         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1924
1925         return 0;
1926 }
1927
1928 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1929 {
1930         switch (bp->link_vars.ieee_fc &
1931                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1932         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1933                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1934                                           ADVERTISED_Pause);
1935                 break;
1936         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1937                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1938                                          ADVERTISED_Pause);
1939                 break;
1940         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1941                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1942                 break;
1943         default:
1944                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1945                                           ADVERTISED_Pause);
1946                 break;
1947         }
1948 }
1949
1950 static void bnx2x_link_report(struct bnx2x *bp)
1951 {
1952         if (bp->link_vars.link_up) {
1953                 if (bp->state == BNX2X_STATE_OPEN)
1954                         netif_carrier_on(bp->dev);
1955                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1956
1957                 printk("%d Mbps ", bp->link_vars.line_speed);
1958
1959                 if (bp->link_vars.duplex == DUPLEX_FULL)
1960                         printk("full duplex");
1961                 else
1962                         printk("half duplex");
1963
1964                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1965                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1966                                 printk(", receive ");
1967                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1968                                         printk("& transmit ");
1969                         } else {
1970                                 printk(", transmit ");
1971                         }
1972                         printk("flow control ON");
1973                 }
1974                 printk("\n");
1975
1976         } else { /* link_down */
1977                 netif_carrier_off(bp->dev);
1978                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1979         }
1980 }
1981
1982 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1983 {
1984         if (!BP_NOMCP(bp)) {
1985                 u8 rc;
1986
1987                 /* Initialize link parameters structure variables */
1988                 /* It is recommended to turn off RX FC for jumbo frames
1989                    for better performance */
1990                 if (IS_E1HMF(bp))
1991                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1992                 else if (bp->dev->mtu > 5000)
1993                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1994                 else
1995                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1996
1997                 bnx2x_acquire_phy_lock(bp);
1998                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1999                 bnx2x_release_phy_lock(bp);
2000
2001                 bnx2x_calc_fc_adv(bp);
2002
2003                 if (bp->link_vars.link_up)
2004                         bnx2x_link_report(bp);
2005
2006
2007                 return rc;
2008         }
2009         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2010         return -EINVAL;
2011 }
2012
2013 static void bnx2x_link_set(struct bnx2x *bp)
2014 {
2015         if (!BP_NOMCP(bp)) {
2016                 bnx2x_acquire_phy_lock(bp);
2017                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2018                 bnx2x_release_phy_lock(bp);
2019
2020                 bnx2x_calc_fc_adv(bp);
2021         } else
2022                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2023 }
2024
2025 static void bnx2x__link_reset(struct bnx2x *bp)
2026 {
2027         if (!BP_NOMCP(bp)) {
2028                 bnx2x_acquire_phy_lock(bp);
2029                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2030                 bnx2x_release_phy_lock(bp);
2031         } else
2032                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2033 }
2034
2035 static u8 bnx2x_link_test(struct bnx2x *bp)
2036 {
2037         u8 rc;
2038
2039         bnx2x_acquire_phy_lock(bp);
2040         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2041         bnx2x_release_phy_lock(bp);
2042
2043         return rc;
2044 }
2045
2046 /* Calculates the sum of vn_min_rates.
2047    It's needed for further normalizing of the min_rates.
2048
2049    Returns:
2050      sum of vn_min_rates
2051        or
2052      0 - if all the min_rates are 0.
2053      In the later case fairness algorithm should be deactivated.
2054      If not all min_rates are zero then those that are zeroes will
2055      be set to 1.
2056  */
2057 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2058 {
2059         int i, port = BP_PORT(bp);
2060         u32 wsum = 0;
2061         int all_zero = 1;
2062
2063         for (i = 0; i < E1HVN_MAX; i++) {
2064                 u32 vn_cfg =
2065                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2066                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2067                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2068                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2069                         /* If min rate is zero - set it to 1 */
2070                         if (!vn_min_rate)
2071                                 vn_min_rate = DEF_MIN_RATE;
2072                         else
2073                                 all_zero = 0;
2074
2075                         wsum += vn_min_rate;
2076                 }
2077         }
2078
2079         /* ... only if all min rates are zeros - disable FAIRNESS */
2080         if (all_zero)
2081                 return 0;
2082
2083         return wsum;
2084 }
2085
2086 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2087                                    int en_fness,
2088                                    u16 port_rate,
2089                                    struct cmng_struct_per_port *m_cmng_port)
2090 {
2091         u32 r_param = port_rate / 8;
2092         int port = BP_PORT(bp);
2093         int i;
2094
2095         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2096
2097         /* Enable minmax only if we are in e1hmf mode */
2098         if (IS_E1HMF(bp)) {
2099                 u32 fair_periodic_timeout_usec;
2100                 u32 t_fair;
2101
2102                 /* Enable rate shaping and fairness */
2103                 m_cmng_port->flags.cmng_vn_enable = 1;
2104                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2105                 m_cmng_port->flags.rate_shaping_enable = 1;
2106
2107                 if (!en_fness)
2108                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2109                            "  fairness will be disabled\n");
2110
2111                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2112                 m_cmng_port->rs_vars.rs_periodic_timeout =
2113                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2114
2115                 /* this is the threshold below which no timer arming will occur
2116                    1.25 coefficient is for the threshold to be a little bigger
2117                    than the real time, to compensate for timer in-accuracy */
2118                 m_cmng_port->rs_vars.rs_threshold =
2119                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2120
2121                 /* resolution of fairness timer */
2122                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2123                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2124                 t_fair = T_FAIR_COEF / port_rate;
2125
2126                 /* this is the threshold below which we won't arm
2127                    the timer anymore */
2128                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2129
2130                 /* we multiply by 1e3/8 to get bytes/msec.
2131                    We don't want the credits to pass a credit
2132                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2133                 m_cmng_port->fair_vars.upper_bound =
2134                                                 r_param * t_fair * FAIR_MEM;
2135                 /* since each tick is 4 usec */
2136                 m_cmng_port->fair_vars.fairness_timeout =
2137                                                 fair_periodic_timeout_usec / 4;
2138
2139         } else {
2140                 /* Disable rate shaping and fairness */
2141                 m_cmng_port->flags.cmng_vn_enable = 0;
2142                 m_cmng_port->flags.fairness_enable = 0;
2143                 m_cmng_port->flags.rate_shaping_enable = 0;
2144
2145                 DP(NETIF_MSG_IFUP,
2146                    "Single function mode  minmax will be disabled\n");
2147         }
2148
2149         /* Store it to internal memory */
2150         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2151                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2152                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2153                        ((u32 *)(m_cmng_port))[i]);
2154 }
2155
2156 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2157                                    u32 wsum, u16 port_rate,
2158                                  struct cmng_struct_per_port *m_cmng_port)
2159 {
2160         struct rate_shaping_vars_per_vn m_rs_vn;
2161         struct fairness_vars_per_vn m_fair_vn;
2162         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2163         u16 vn_min_rate, vn_max_rate;
2164         int i;
2165
2166         /* If function is hidden - set min and max to zeroes */
2167         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2168                 vn_min_rate = 0;
2169                 vn_max_rate = 0;
2170
2171         } else {
2172                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2173                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2174                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2175                    if current min rate is zero - set it to 1.
2176                    This is a requirement of the algorithm. */
2177                 if ((vn_min_rate == 0) && wsum)
2178                         vn_min_rate = DEF_MIN_RATE;
2179                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2180                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2181         }
2182
2183         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2184            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2185
2186         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2187         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2188
2189         /* global vn counter - maximal Mbps for this vn */
2190         m_rs_vn.vn_counter.rate = vn_max_rate;
2191
2192         /* quota - number of bytes transmitted in this period */
2193         m_rs_vn.vn_counter.quota =
2194                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2195
2196 #ifdef BNX2X_PER_PROT_QOS
2197         /* per protocol counter */
2198         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2199                 /* maximal Mbps for this protocol */
2200                 m_rs_vn.protocol_counters[protocol].rate =
2201                                                 protocol_max_rate[protocol];
2202                 /* the quota in each timer period -
2203                    number of bytes transmitted in this period */
2204                 m_rs_vn.protocol_counters[protocol].quota =
2205                         (u32)(rs_periodic_timeout_usec *
2206                           ((double)m_rs_vn.
2207                                    protocol_counters[protocol].rate/8));
2208         }
2209 #endif
2210
2211         if (wsum) {
2212                 /* credit for each period of the fairness algorithm:
2213                    number of bytes in T_FAIR (the vn share the port rate).
2214                    wsum should not be larger than 10000, thus
2215                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2216                 m_fair_vn.vn_credit_delta =
2217                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2218                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2219                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2220                    m_fair_vn.vn_credit_delta);
2221         }
2222
2223 #ifdef BNX2X_PER_PROT_QOS
2224         do {
2225                 u32 protocolWeightSum = 0;
2226
2227                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2228                         protocolWeightSum +=
2229                                         drvInit.protocol_min_rate[protocol];
2230                 /* per protocol counter -
2231                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2232                 if (protocolWeightSum > 0) {
2233                         for (protocol = 0;
2234                              protocol < NUM_OF_PROTOCOLS; protocol++)
2235                                 /* credit for each period of the
2236                                    fairness algorithm - number of bytes in
2237                                    T_FAIR (the protocol share the vn rate) */
2238                                 m_fair_vn.protocol_credit_delta[protocol] =
2239                                         (u32)((vn_min_rate / 8) * t_fair *
2240                                         protocol_min_rate / protocolWeightSum);
2241                 }
2242         } while (0);
2243 #endif
2244
2245         /* Store it to internal memory */
2246         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2248                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249                        ((u32 *)(&m_rs_vn))[i]);
2250
2251         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2253                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254                        ((u32 *)(&m_fair_vn))[i]);
2255 }
2256
2257 /* This function is called upon link interrupt */
2258 static void bnx2x_link_attn(struct bnx2x *bp)
2259 {
2260         int vn;
2261
2262         /* Make sure that we are synced with the current statistics */
2263         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
2265         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2266
2267         if (bp->link_vars.link_up) {
2268
2269                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2270                         struct host_port_stats *pstats;
2271
2272                         pstats = bnx2x_sp(bp, port_stats);
2273                         /* reset old bmac stats */
2274                         memset(&(pstats->mac_stx[0]), 0,
2275                                sizeof(struct mac_stx));
2276                 }
2277                 if ((bp->state == BNX2X_STATE_OPEN) ||
2278                     (bp->state == BNX2X_STATE_DISABLED))
2279                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2280         }
2281
2282         /* indicate link status */
2283         bnx2x_link_report(bp);
2284
2285         if (IS_E1HMF(bp)) {
2286                 int func;
2287
2288                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2289                         if (vn == BP_E1HVN(bp))
2290                                 continue;
2291
2292                         func = ((vn << 1) | BP_PORT(bp));
2293
2294                         /* Set the attention towards other drivers
2295                            on the same port */
2296                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2297                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2298                 }
2299         }
2300
2301         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2302                 struct cmng_struct_per_port m_cmng_port;
2303                 u32 wsum;
2304                 int port = BP_PORT(bp);
2305
2306                 /* Init RATE SHAPING and FAIRNESS contexts */
2307                 wsum = bnx2x_calc_vn_wsum(bp);
2308                 bnx2x_init_port_minmax(bp, (int)wsum,
2309                                         bp->link_vars.line_speed,
2310                                         &m_cmng_port);
2311                 if (IS_E1HMF(bp))
2312                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2313                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2314                                         wsum, bp->link_vars.line_speed,
2315                                                      &m_cmng_port);
2316         }
2317 }
2318
2319 static void bnx2x__link_status_update(struct bnx2x *bp)
2320 {
2321         if (bp->state != BNX2X_STATE_OPEN)
2322                 return;
2323
2324         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2325
2326         if (bp->link_vars.link_up)
2327                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2328         else
2329                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2330
2331         /* indicate link status */
2332         bnx2x_link_report(bp);
2333 }
2334
2335 static void bnx2x_pmf_update(struct bnx2x *bp)
2336 {
2337         int port = BP_PORT(bp);
2338         u32 val;
2339
2340         bp->port.pmf = 1;
2341         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2342
2343         /* enable nig attention */
2344         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2345         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2346         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2347
2348         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2349 }
2350
2351 /* end of Link */
2352
2353 /* slow path */
2354
2355 /*
2356  * General service functions
2357  */
2358
2359 /* the slow path queue is odd since completions arrive on the fastpath ring */
2360 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2361                          u32 data_hi, u32 data_lo, int common)
2362 {
2363         int func = BP_FUNC(bp);
2364
2365         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2366            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2367            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2368            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2369            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2370
2371 #ifdef BNX2X_STOP_ON_ERROR
2372         if (unlikely(bp->panic))
2373                 return -EIO;
2374 #endif
2375
2376         spin_lock_bh(&bp->spq_lock);
2377
2378         if (!bp->spq_left) {
2379                 BNX2X_ERR("BUG! SPQ ring full!\n");
2380                 spin_unlock_bh(&bp->spq_lock);
2381                 bnx2x_panic();
2382                 return -EBUSY;
2383         }
2384
2385         /* CID needs port number to be encoded int it */
2386         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2387                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2388                                      HW_CID(bp, cid)));
2389         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2390         if (common)
2391                 bp->spq_prod_bd->hdr.type |=
2392                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2393
2394         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2395         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2396
2397         bp->spq_left--;
2398
2399         if (bp->spq_prod_bd == bp->spq_last_bd) {
2400                 bp->spq_prod_bd = bp->spq;
2401                 bp->spq_prod_idx = 0;
2402                 DP(NETIF_MSG_TIMER, "end of spq\n");
2403
2404         } else {
2405                 bp->spq_prod_bd++;
2406                 bp->spq_prod_idx++;
2407         }
2408
2409         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2410                bp->spq_prod_idx);
2411
2412         spin_unlock_bh(&bp->spq_lock);
2413         return 0;
2414 }
2415
2416 /* acquire split MCP access lock register */
2417 static int bnx2x_acquire_alr(struct bnx2x *bp)
2418 {
2419         u32 i, j, val;
2420         int rc = 0;
2421
2422         might_sleep();
2423         i = 100;
2424         for (j = 0; j < i*10; j++) {
2425                 val = (1UL << 31);
2426                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2427                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2428                 if (val & (1L << 31))
2429                         break;
2430
2431                 msleep(5);
2432         }
2433         if (!(val & (1L << 31))) {
2434                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2435                 rc = -EBUSY;
2436         }
2437
2438         return rc;
2439 }
2440
2441 /* release split MCP access lock register */
2442 static void bnx2x_release_alr(struct bnx2x *bp)
2443 {
2444         u32 val = 0;
2445
2446         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2447 }
2448
2449 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2450 {
2451         struct host_def_status_block *def_sb = bp->def_status_blk;
2452         u16 rc = 0;
2453
2454         barrier(); /* status block is written to by the chip */
2455         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2456                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2457                 rc |= 1;
2458         }
2459         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2460                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2461                 rc |= 2;
2462         }
2463         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2464                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2465                 rc |= 4;
2466         }
2467         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2468                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2469                 rc |= 8;
2470         }
2471         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2472                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2473                 rc |= 16;
2474         }
2475         return rc;
2476 }
2477
2478 /*
2479  * slow path service functions
2480  */
2481
2482 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2483 {
2484         int port = BP_PORT(bp);
2485         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2486                        COMMAND_REG_ATTN_BITS_SET);
2487         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2488                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2489         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2490                                        NIG_REG_MASK_INTERRUPT_PORT0;
2491         u32 aeu_mask;
2492
2493         if (bp->attn_state & asserted)
2494                 BNX2X_ERR("IGU ERROR\n");
2495
2496         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497         aeu_mask = REG_RD(bp, aeu_addr);
2498
2499         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2500            aeu_mask, asserted);
2501         aeu_mask &= ~(asserted & 0xff);
2502         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2503
2504         REG_WR(bp, aeu_addr, aeu_mask);
2505         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2506
2507         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2508         bp->attn_state |= asserted;
2509         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2510
2511         if (asserted & ATTN_HARD_WIRED_MASK) {
2512                 if (asserted & ATTN_NIG_FOR_FUNC) {
2513
2514                         bnx2x_acquire_phy_lock(bp);
2515
2516                         /* save nig interrupt mask */
2517                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2518                         REG_WR(bp, nig_int_mask_addr, 0);
2519
2520                         bnx2x_link_attn(bp);
2521
2522                         /* handle unicore attn? */
2523                 }
2524                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2525                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2526
2527                 if (asserted & GPIO_2_FUNC)
2528                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2529
2530                 if (asserted & GPIO_3_FUNC)
2531                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2532
2533                 if (asserted & GPIO_4_FUNC)
2534                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2535
2536                 if (port == 0) {
2537                         if (asserted & ATTN_GENERAL_ATTN_1) {
2538                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2539                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2540                         }
2541                         if (asserted & ATTN_GENERAL_ATTN_2) {
2542                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2543                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2544                         }
2545                         if (asserted & ATTN_GENERAL_ATTN_3) {
2546                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2547                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2548                         }
2549                 } else {
2550                         if (asserted & ATTN_GENERAL_ATTN_4) {
2551                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2552                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2553                         }
2554                         if (asserted & ATTN_GENERAL_ATTN_5) {
2555                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2556                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2557                         }
2558                         if (asserted & ATTN_GENERAL_ATTN_6) {
2559                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2560                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2561                         }
2562                 }
2563
2564         } /* if hardwired */
2565
2566         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2567            asserted, hc_addr);
2568         REG_WR(bp, hc_addr, asserted);
2569
2570         /* now set back the mask */
2571         if (asserted & ATTN_NIG_FOR_FUNC) {
2572                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2573                 bnx2x_release_phy_lock(bp);
2574         }
2575 }
2576
2577 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2578 {
2579         int port = BP_PORT(bp);
2580         int reg_offset;
2581         u32 val;
2582
2583         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2584                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2585
2586         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2587
2588                 val = REG_RD(bp, reg_offset);
2589                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2590                 REG_WR(bp, reg_offset, val);
2591
2592                 BNX2X_ERR("SPIO5 hw attention\n");
2593
2594                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2595                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2596                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2597                         /* Fan failure attention */
2598
2599                         /* The PHY reset is controlled by GPIO 1 */
2600                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2601                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2602                         /* Low power mode is controlled by GPIO 2 */
2603                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2604                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2605                         /* mark the failure */
2606                         bp->link_params.ext_phy_config &=
2607                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2608                         bp->link_params.ext_phy_config |=
2609                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2610                         SHMEM_WR(bp,
2611                                  dev_info.port_hw_config[port].
2612                                                         external_phy_config,
2613                                  bp->link_params.ext_phy_config);
2614                         /* log the failure */
2615                         printk(KERN_ERR PFX "Fan Failure on Network"
2616                                " Controller %s has caused the driver to"
2617                                " shutdown the card to prevent permanent"
2618                                " damage.  Please contact Dell Support for"
2619                                " assistance\n", bp->dev->name);
2620                         break;
2621
2622                 default:
2623                         break;
2624                 }
2625         }
2626
2627         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2628
2629                 val = REG_RD(bp, reg_offset);
2630                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2631                 REG_WR(bp, reg_offset, val);
2632
2633                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2634                           (attn & HW_INTERRUT_ASSERT_SET_0));
2635                 bnx2x_panic();
2636         }
2637 }
2638
2639 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2640 {
2641         u32 val;
2642
2643         if (attn & BNX2X_DOORQ_ASSERT) {
2644
2645                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2646                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2647                 /* DORQ discard attention */
2648                 if (val & 0x2)
2649                         BNX2X_ERR("FATAL error from DORQ\n");
2650         }
2651
2652         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2653
2654                 int port = BP_PORT(bp);
2655                 int reg_offset;
2656
2657                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2658                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2659
2660                 val = REG_RD(bp, reg_offset);
2661                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2662                 REG_WR(bp, reg_offset, val);
2663
2664                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2665                           (attn & HW_INTERRUT_ASSERT_SET_1));
2666                 bnx2x_panic();
2667         }
2668 }
2669
2670 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2671 {
2672         u32 val;
2673
2674         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2675
2676                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2677                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2678                 /* CFC error attention */
2679                 if (val & 0x2)
2680                         BNX2X_ERR("FATAL error from CFC\n");
2681         }
2682
2683         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2684
2685                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2686                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2687                 /* RQ_USDMDP_FIFO_OVERFLOW */
2688                 if (val & 0x18000)
2689                         BNX2X_ERR("FATAL error from PXP\n");
2690         }
2691
2692         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2693
2694                 int port = BP_PORT(bp);
2695                 int reg_offset;
2696
2697                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2698                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2699
2700                 val = REG_RD(bp, reg_offset);
2701                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2702                 REG_WR(bp, reg_offset, val);
2703
2704                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2705                           (attn & HW_INTERRUT_ASSERT_SET_2));
2706                 bnx2x_panic();
2707         }
2708 }
2709
2710 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2711 {
2712         u32 val;
2713
2714         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2715
2716                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2717                         int func = BP_FUNC(bp);
2718
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2720                         bnx2x__link_status_update(bp);
2721                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2722                                                         DRV_STATUS_PMF)
2723                                 bnx2x_pmf_update(bp);
2724
2725                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2726
2727                         BNX2X_ERR("MC assert!\n");
2728                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2729                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2731                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2732                         bnx2x_panic();
2733
2734                 } else if (attn & BNX2X_MCP_ASSERT) {
2735
2736                         BNX2X_ERR("MCP assert!\n");
2737                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2738                         bnx2x_fw_dump(bp);
2739
2740                 } else
2741                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2742         }
2743
2744         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2745                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2746                 if (attn & BNX2X_GRC_TIMEOUT) {
2747                         val = CHIP_IS_E1H(bp) ?
2748                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2749                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2750                 }
2751                 if (attn & BNX2X_GRC_RSV) {
2752                         val = CHIP_IS_E1H(bp) ?
2753                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2754                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2755                 }
2756                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2757         }
2758 }
2759
2760 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2761 {
2762         struct attn_route attn;
2763         struct attn_route group_mask;
2764         int port = BP_PORT(bp);
2765         int index;
2766         u32 reg_addr;
2767         u32 val;
2768         u32 aeu_mask;
2769
2770         /* need to take HW lock because MCP or other port might also
2771            try to handle this event */
2772         bnx2x_acquire_alr(bp);
2773
2774         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2775         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2776         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2777         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2778         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2779            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2780
2781         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2782                 if (deasserted & (1 << index)) {
2783                         group_mask = bp->attn_group[index];
2784
2785                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2786                            index, group_mask.sig[0], group_mask.sig[1],
2787                            group_mask.sig[2], group_mask.sig[3]);
2788
2789                         bnx2x_attn_int_deasserted3(bp,
2790                                         attn.sig[3] & group_mask.sig[3]);
2791                         bnx2x_attn_int_deasserted1(bp,
2792                                         attn.sig[1] & group_mask.sig[1]);
2793                         bnx2x_attn_int_deasserted2(bp,
2794                                         attn.sig[2] & group_mask.sig[2]);
2795                         bnx2x_attn_int_deasserted0(bp,
2796                                         attn.sig[0] & group_mask.sig[0]);
2797
2798                         if ((attn.sig[0] & group_mask.sig[0] &
2799                                                 HW_PRTY_ASSERT_SET_0) ||
2800                             (attn.sig[1] & group_mask.sig[1] &
2801                                                 HW_PRTY_ASSERT_SET_1) ||
2802                             (attn.sig[2] & group_mask.sig[2] &
2803                                                 HW_PRTY_ASSERT_SET_2))
2804                                 BNX2X_ERR("FATAL HW block parity attention\n");
2805                 }
2806         }
2807
2808         bnx2x_release_alr(bp);
2809
2810         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2811
2812         val = ~deasserted;
2813         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2814            val, reg_addr);
2815         REG_WR(bp, reg_addr, val);
2816
2817         if (~bp->attn_state & deasserted)
2818                 BNX2X_ERR("IGU ERROR\n");
2819
2820         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2821                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2822
2823         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2824         aeu_mask = REG_RD(bp, reg_addr);
2825
2826         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2827            aeu_mask, deasserted);
2828         aeu_mask |= (deasserted & 0xff);
2829         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2830
2831         REG_WR(bp, reg_addr, aeu_mask);
2832         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2833
2834         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2835         bp->attn_state &= ~deasserted;
2836         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2837 }
2838
2839 static void bnx2x_attn_int(struct bnx2x *bp)
2840 {
2841         /* read local copy of bits */
2842         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2843                                                                 attn_bits);
2844         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2845                                                                 attn_bits_ack);
2846         u32 attn_state = bp->attn_state;
2847
2848         /* look for changed bits */
2849         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2850         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2851
2852         DP(NETIF_MSG_HW,
2853            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2854            attn_bits, attn_ack, asserted, deasserted);
2855
2856         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2857                 BNX2X_ERR("BAD attention state\n");
2858
2859         /* handle bits that were raised */
2860         if (asserted)
2861                 bnx2x_attn_int_asserted(bp, asserted);
2862
2863         if (deasserted)
2864                 bnx2x_attn_int_deasserted(bp, deasserted);
2865 }
2866
2867 static void bnx2x_sp_task(struct work_struct *work)
2868 {
2869         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2870         u16 status;
2871
2872
2873         /* Return here if interrupt is disabled */
2874         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2875                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2876                 return;
2877         }
2878
2879         status = bnx2x_update_dsb_idx(bp);
2880 /*      if (status == 0)                                     */
2881 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2882
2883         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2884
2885         /* HW attentions */
2886         if (status & 0x1)
2887                 bnx2x_attn_int(bp);
2888
2889         /* CStorm events: query_stats, port delete ramrod */
2890         if (status & 0x2)
2891                 bp->stats_pending = 0;
2892
2893         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2894                      IGU_INT_NOP, 1);
2895         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2896                      IGU_INT_NOP, 1);
2897         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2898                      IGU_INT_NOP, 1);
2899         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2900                      IGU_INT_NOP, 1);
2901         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2902                      IGU_INT_ENABLE, 1);
2903
2904 }
2905
2906 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2907 {
2908         struct net_device *dev = dev_instance;
2909         struct bnx2x *bp = netdev_priv(dev);
2910
2911         /* Return here if interrupt is disabled */
2912         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2913                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2914                 return IRQ_HANDLED;
2915         }
2916
2917         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2918
2919 #ifdef BNX2X_STOP_ON_ERROR
2920         if (unlikely(bp->panic))
2921                 return IRQ_HANDLED;
2922 #endif
2923
2924         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2925
2926         return IRQ_HANDLED;
2927 }
2928
2929 /* end of slow path */
2930
2931 /* Statistics */
2932
2933 /****************************************************************************
2934 * Macros
2935 ****************************************************************************/
2936
2937 /* sum[hi:lo] += add[hi:lo] */
2938 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2939         do { \
2940                 s_lo += a_lo; \
2941                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2942         } while (0)
2943
2944 /* difference = minuend - subtrahend */
2945 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2946         do { \
2947                 if (m_lo < s_lo) { \
2948                         /* underflow */ \
2949                         d_hi = m_hi - s_hi; \
2950                         if (d_hi > 0) { \
2951                                 /* we can 'loan' 1 */ \
2952                                 d_hi--; \
2953                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2954                         } else { \
2955                                 /* m_hi <= s_hi */ \
2956                                 d_hi = 0; \
2957                                 d_lo = 0; \
2958                         } \
2959                 } else { \
2960                         /* m_lo >= s_lo */ \
2961                         if (m_hi < s_hi) { \
2962                                 d_hi = 0; \
2963                                 d_lo = 0; \
2964                         } else { \
2965                                 /* m_hi >= s_hi */ \
2966                                 d_hi = m_hi - s_hi; \
2967                                 d_lo = m_lo - s_lo; \
2968                         } \
2969                 } \
2970         } while (0)
2971
2972 #define UPDATE_STAT64(s, t) \
2973         do { \
2974                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2975                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2976                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2977                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2978                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2979                        pstats->mac_stx[1].t##_lo, diff.lo); \
2980         } while (0)
2981
2982 #define UPDATE_STAT64_NIG(s, t) \
2983         do { \
2984                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2985                         diff.lo, new->s##_lo, old->s##_lo); \
2986                 ADD_64(estats->t##_hi, diff.hi, \
2987                        estats->t##_lo, diff.lo); \
2988         } while (0)
2989
2990 /* sum[hi:lo] += add */
2991 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2992         do { \
2993                 s_lo += a; \
2994                 s_hi += (s_lo < a) ? 1 : 0; \
2995         } while (0)
2996
2997 #define UPDATE_EXTEND_STAT(s) \
2998         do { \
2999                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3000                               pstats->mac_stx[1].s##_lo, \
3001                               new->s); \
3002         } while (0)
3003
3004 #define UPDATE_EXTEND_TSTAT(s, t) \
3005         do { \
3006                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3007                 old_tclient->s = le32_to_cpu(tclient->s); \
3008                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009         } while (0)
3010
3011 #define UPDATE_EXTEND_XSTAT(s, t) \
3012         do { \
3013                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3014                 old_xclient->s = le32_to_cpu(xclient->s); \
3015                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3016         } while (0)
3017
3018 /*
3019  * General service functions
3020  */
3021
3022 static inline long bnx2x_hilo(u32 *hiref)
3023 {
3024         u32 lo = *(hiref + 1);
3025 #if (BITS_PER_LONG == 64)
3026         u32 hi = *hiref;
3027
3028         return HILO_U64(hi, lo);
3029 #else
3030         return lo;
3031 #endif
3032 }
3033
3034 /*
3035  * Init service functions
3036  */
3037
3038 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3039 {
3040         if (!bp->stats_pending) {
3041                 struct eth_query_ramrod_data ramrod_data = {0};
3042                 int rc;
3043
3044                 ramrod_data.drv_counter = bp->stats_counter++;
3045                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3046                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3047
3048                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3049                                    ((u32 *)&ramrod_data)[1],
3050                                    ((u32 *)&ramrod_data)[0], 0);
3051                 if (rc == 0) {
3052                         /* stats ramrod has it's own slot on the spq */
3053                         bp->spq_left++;
3054                         bp->stats_pending = 1;
3055                 }
3056         }
3057 }
3058
3059 static void bnx2x_stats_init(struct bnx2x *bp)
3060 {
3061         int port = BP_PORT(bp);
3062
3063         bp->executer_idx = 0;
3064         bp->stats_counter = 0;
3065
3066         /* port stats */
3067         if (!BP_NOMCP(bp))
3068                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3069         else
3070                 bp->port.port_stx = 0;
3071         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3072
3073         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3074         bp->port.old_nig_stats.brb_discard =
3075                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3076         bp->port.old_nig_stats.brb_truncate =
3077                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3078         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3079                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3080         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3081                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3082
3083         /* function stats */
3084         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3085         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3086         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3087         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3088
3089         bp->stats_state = STATS_STATE_DISABLED;
3090         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3091                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3092 }
3093
3094 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3095 {
3096         struct dmae_command *dmae = &bp->stats_dmae;
3097         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098
3099         *stats_comp = DMAE_COMP_VAL;
3100
3101         /* loader */
3102         if (bp->executer_idx) {
3103                 int loader_idx = PMF_DMAE_C(bp);
3104
3105                 memset(dmae, 0, sizeof(struct dmae_command));
3106
3107                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3108                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3109                                 DMAE_CMD_DST_RESET |
3110 #ifdef __BIG_ENDIAN
3111                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3112 #else
3113                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3114 #endif
3115                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3116                                                DMAE_CMD_PORT_0) |
3117                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3118                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3119                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3120                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3121                                      sizeof(struct dmae_command) *
3122                                      (loader_idx + 1)) >> 2;
3123                 dmae->dst_addr_hi = 0;
3124                 dmae->len = sizeof(struct dmae_command) >> 2;
3125                 if (CHIP_IS_E1(bp))
3126                         dmae->len--;
3127                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3128                 dmae->comp_addr_hi = 0;
3129                 dmae->comp_val = 1;
3130
3131                 *stats_comp = 0;
3132                 bnx2x_post_dmae(bp, dmae, loader_idx);
3133
3134         } else if (bp->func_stx) {
3135                 *stats_comp = 0;
3136                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3137         }
3138 }
3139
3140 static int bnx2x_stats_comp(struct bnx2x *bp)
3141 {
3142         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3143         int cnt = 10;
3144
3145         might_sleep();
3146         while (*stats_comp != DMAE_COMP_VAL) {
3147                 if (!cnt) {
3148                         BNX2X_ERR("timeout waiting for stats finished\n");
3149                         break;
3150                 }
3151                 cnt--;
3152                 msleep(1);
3153         }
3154         return 1;
3155 }
3156
3157 /*
3158  * Statistics service functions
3159  */
3160
3161 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3162 {
3163         struct dmae_command *dmae;
3164         u32 opcode;
3165         int loader_idx = PMF_DMAE_C(bp);
3166         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3167
3168         /* sanity */
3169         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3170                 BNX2X_ERR("BUG!\n");
3171                 return;
3172         }
3173
3174         bp->executer_idx = 0;
3175
3176         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3177                   DMAE_CMD_C_ENABLE |
3178                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3179 #ifdef __BIG_ENDIAN
3180                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3181 #else
3182                   DMAE_CMD_ENDIANITY_DW_SWAP |
3183 #endif
3184                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3185                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3186
3187         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3188         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3189         dmae->src_addr_lo = bp->port.port_stx >> 2;
3190         dmae->src_addr_hi = 0;
3191         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3192         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3193         dmae->len = DMAE_LEN32_RD_MAX;
3194         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3195         dmae->comp_addr_hi = 0;
3196         dmae->comp_val = 1;
3197
3198         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3200         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3201         dmae->src_addr_hi = 0;
3202         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3203                                    DMAE_LEN32_RD_MAX * 4);
3204         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3205                                    DMAE_LEN32_RD_MAX * 4);
3206         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3207         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3208         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3209         dmae->comp_val = DMAE_COMP_VAL;
3210