b4e9c6ebac54ae0f397eee67fb8ace6afedde599
[linux-3.10.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.52.1"
60 #define DRV_MODULE_RELDATE      "2009/08/12"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1       "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H      "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350                                u32 addr, u32 len)
351 {
352         int offset = 0;
353
354         while (len > DMAE_LEN32_WR_MAX) {
355                 bnx2x_write_dmae(bp, phys_addr + offset,
356                                  addr + offset, DMAE_LEN32_WR_MAX);
357                 offset += DMAE_LEN32_WR_MAX * 4;
358                 len -= DMAE_LEN32_WR_MAX;
359         }
360
361         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362 }
363
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366 {
367         u32 wb_write[2];
368
369         wb_write[0] = val_hi;
370         wb_write[1] = val_lo;
371         REG_WR_DMAE(bp, reg, wb_write, 2);
372 }
373
374 #ifdef USE_WB_RD
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376 {
377         u32 wb_data[2];
378
379         REG_RD_DMAE(bp, reg, wb_data, 2);
380
381         return HILO_U64(wb_data[0], wb_data[1]);
382 }
383 #endif
384
385 static int bnx2x_mc_assert(struct bnx2x *bp)
386 {
387         char last_idx;
388         int i, rc = 0;
389         u32 row0, row1, row2, row3;
390
391         /* XSTORM */
392         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
394         if (last_idx)
395                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397         /* print the asserts */
398         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401                               XSTORM_ASSERT_LIST_OFFSET(i));
402                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411                                   " 0x%08x 0x%08x 0x%08x\n",
412                                   i, row3, row2, row1, row0);
413                         rc++;
414                 } else {
415                         break;
416                 }
417         }
418
419         /* TSTORM */
420         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
422         if (last_idx)
423                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425         /* print the asserts */
426         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429                               TSTORM_ASSERT_LIST_OFFSET(i));
430                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439                                   " 0x%08x 0x%08x 0x%08x\n",
440                                   i, row3, row2, row1, row0);
441                         rc++;
442                 } else {
443                         break;
444                 }
445         }
446
447         /* CSTORM */
448         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
450         if (last_idx)
451                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453         /* print the asserts */
454         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457                               CSTORM_ASSERT_LIST_OFFSET(i));
458                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467                                   " 0x%08x 0x%08x 0x%08x\n",
468                                   i, row3, row2, row1, row0);
469                         rc++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* USTORM */
476         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477                            USTORM_ASSERT_LIST_INDEX_OFFSET);
478         if (last_idx)
479                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481         /* print the asserts */
482         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485                               USTORM_ASSERT_LIST_OFFSET(i));
486                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
488                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
490                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495                                   " 0x%08x 0x%08x 0x%08x\n",
496                                   i, row3, row2, row1, row0);
497                         rc++;
498                 } else {
499                         break;
500                 }
501         }
502
503         return rc;
504 }
505
506 static void bnx2x_fw_dump(struct bnx2x *bp)
507 {
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513         mark = ((mark + 0x3) & ~0x3);
514         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
515
516         printk(KERN_ERR PFX);
517         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518                 for (word = 0; word < 8; word++)
519                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520                                                   offset + 4*word));
521                 data[8] = 0x0;
522                 printk(KERN_CONT "%s", (char *)data);
523         }
524         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525                 for (word = 0; word < 8; word++)
526                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527                                                   offset + 4*word));
528                 data[8] = 0x0;
529                 printk(KERN_CONT "%s", (char *)data);
530         }
531         printk(KERN_ERR PFX "end of fw dump\n");
532 }
533
534 static void bnx2x_panic_dump(struct bnx2x *bp)
535 {
536         int i;
537         u16 j, start, end;
538
539         bp->stats_state = STATS_STATE_DISABLED;
540         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
542         BNX2X_ERR("begin crash dump -----------------\n");
543
544         /* Indices */
545         /* Common */
546         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
547                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
548                   "  spq_prod_idx(%u)\n",
549                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552         /* Rx */
553         for_each_rx_queue(bp, i) {
554                 struct bnx2x_fastpath *fp = &bp->fp[i];
555
556                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
557                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
558                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
559                           i, fp->rx_bd_prod, fp->rx_bd_cons,
560                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
563                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
564                           fp->rx_sge_prod, fp->last_max_sge,
565                           le16_to_cpu(fp->fp_u_idx),
566                           fp->status_blk->u_status_block.status_block_index);
567         }
568
569         /* Tx */
570         for_each_tx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
574                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
575                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
578                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579                           fp->status_blk->c_status_block.status_block_index,
580                           fp->tx_db.data.prod);
581         }
582
583         /* Rings */
584         /* Rx */
585         for_each_rx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590                 for (j = start; j != end; j = RX_BD(j + 1)) {
591                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
594                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
595                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
596                 }
597
598                 start = RX_SGE(fp->rx_sge_prod);
599                 end = RX_SGE(fp->last_max_sge);
600                 for (j = start; j != end; j = RX_SGE(j + 1)) {
601                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
605                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
606                 }
607
608                 start = RCQ_BD(fp->rx_comp_cons - 10);
609                 end = RCQ_BD(fp->rx_comp_cons + 503);
610                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
613                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
615                 }
616         }
617
618         /* Tx */
619         for_each_tx_queue(bp, i) {
620                 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624                 for (j = start; j != end; j = TX_BD(j + 1)) {
625                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
627                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628                                   i, j, sw_bd->skb, sw_bd->first_bd);
629                 }
630
631                 start = TX_BD(fp->tx_bd_cons - 10);
632                 end = TX_BD(fp->tx_bd_cons + 254);
633                 for (j = start; j != end; j = TX_BD(j + 1)) {
634                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
636                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
638                 }
639         }
640
641         bnx2x_fw_dump(bp);
642         bnx2x_mc_assert(bp);
643         BNX2X_ERR("end crash dump -----------------\n");
644 }
645
646 static void bnx2x_int_enable(struct bnx2x *bp)
647 {
648         int port = BP_PORT(bp);
649         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650         u32 val = REG_RD(bp, addr);
651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
653
654         if (msix) {
655                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656                          HC_CONFIG_0_REG_INT_LINE_EN_0);
657                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659         } else if (msi) {
660                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else {
665                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669
670                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671                    val, port, addr);
672
673                 REG_WR(bp, addr, val);
674
675                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676         }
677
678         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
679            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
680
681         REG_WR(bp, addr, val);
682         /*
683          * Ensure that HC_CONFIG is written before leading/trailing edge config
684          */
685         mmiowb();
686         barrier();
687
688         if (CHIP_IS_E1H(bp)) {
689                 /* init leading/trailing edge */
690                 if (IS_E1HMF(bp)) {
691                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
692                         if (bp->port.pmf)
693                                 /* enable nig and gpio3 attention */
694                                 val |= 0x1100;
695                 } else
696                         val = 0xffff;
697
698                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700         }
701
702         /* Make sure that interrupts are indeed enabled from here on */
703         mmiowb();
704 }
705
706 static void bnx2x_int_disable(struct bnx2x *bp)
707 {
708         int port = BP_PORT(bp);
709         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710         u32 val = REG_RD(bp, addr);
711
712         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
715                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718            val, port, addr);
719
720         /* flush all outstanding writes */
721         mmiowb();
722
723         REG_WR(bp, addr, val);
724         if (REG_RD(bp, addr) != val)
725                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726 }
727
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
729 {
730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
731         int i, offset;
732
733         /* disable interrupt handling */
734         atomic_inc(&bp->intr_sem);
735         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
737         if (disable_hw)
738                 /* prevent the HW from sending interrupts */
739                 bnx2x_int_disable(bp);
740
741         /* make sure all ISRs are done */
742         if (msix) {
743                 synchronize_irq(bp->msix_table[0].vector);
744                 offset = 1;
745 #ifdef BCM_CNIC
746                 offset++;
747 #endif
748                 for_each_queue(bp, i)
749                         synchronize_irq(bp->msix_table[i + offset].vector);
750         } else
751                 synchronize_irq(bp->pdev->irq);
752
753         /* make sure sp_task is not running */
754         cancel_delayed_work(&bp->sp_task);
755         flush_workqueue(bnx2x_wq);
756 }
757
758 /* fast path */
759
760 /*
761  * General service functions
762  */
763
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765                                 u8 storm, u16 index, u8 op, u8 update)
766 {
767         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768                        COMMAND_REG_INT_ACK);
769         struct igu_ack_register igu_ack;
770
771         igu_ack.status_block_index = index;
772         igu_ack.sb_id_and_flags =
773                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
778         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779            (*(u32 *)&igu_ack), hc_addr);
780         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
781
782         /* Make sure that ACK is written */
783         mmiowb();
784         barrier();
785 }
786
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788 {
789         struct host_status_block *fpsb = fp->status_blk;
790         u16 rc = 0;
791
792         barrier(); /* status block is written to by the chip */
793         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795                 rc |= 1;
796         }
797         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799                 rc |= 2;
800         }
801         return rc;
802 }
803
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
805 {
806         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807                        COMMAND_REG_SIMD_MASK);
808         u32 result = REG_RD(bp, hc_addr);
809
810         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811            result, hc_addr);
812
813         return result;
814 }
815
816
817 /*
818  * fast path service functions
819  */
820
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822 {
823         /* Tell compiler that consumer and producer can change */
824         barrier();
825         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
826 }
827
828 /* free skb in the packet ring at pos idx
829  * return idx of last bd freed
830  */
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832                              u16 idx)
833 {
834         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835         struct eth_tx_start_bd *tx_start_bd;
836         struct eth_tx_bd *tx_data_bd;
837         struct sk_buff *skb = tx_buf->skb;
838         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
839         int nbd;
840
841         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
842            idx, tx_buf, skb);
843
844         /* unmap first bd */
845         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
849
850         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853                 BNX2X_ERR("BAD nbd!\n");
854                 bnx2x_panic();
855         }
856 #endif
857         new_cons = nbd + tx_buf->first_bd;
858
859         /* Get the next bd */
860         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861
862         /* Skip a parse bd... */
863         --nbd;
864         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866         /* ...and the TSO split header bd since they have no mapping */
867         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868                 --nbd;
869                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
870         }
871
872         /* now free frags */
873         while (nbd > 0) {
874
875                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
879                 if (--nbd)
880                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881         }
882
883         /* release skb */
884         WARN_ON(!skb);
885         dev_kfree_skb_any(skb);
886         tx_buf->first_bd = 0;
887         tx_buf->skb = NULL;
888
889         return new_cons;
890 }
891
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
893 {
894         s16 used;
895         u16 prod;
896         u16 cons;
897
898         barrier(); /* Tell compiler that prod and cons can change */
899         prod = fp->tx_bd_prod;
900         cons = fp->tx_bd_cons;
901
902         /* NUM_TX_RINGS = number of "next-page" entries
903            It will be used as a threshold */
904         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
905
906 #ifdef BNX2X_STOP_ON_ERROR
907         WARN_ON(used < 0);
908         WARN_ON(used > fp->bp->tx_ring_size);
909         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 #endif
911
912         return (s16)(fp->bp->tx_ring_size) - used;
913 }
914
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
916 {
917         struct bnx2x *bp = fp->bp;
918         struct netdev_queue *txq;
919         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920         int done = 0;
921
922 #ifdef BNX2X_STOP_ON_ERROR
923         if (unlikely(bp->panic))
924                 return;
925 #endif
926
927         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929         sw_cons = fp->tx_pkt_cons;
930
931         while (sw_cons != hw_cons) {
932                 u16 pkt_cons;
933
934                 pkt_cons = TX_BD(sw_cons);
935
936                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
938                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
939                    hw_cons, sw_cons, pkt_cons);
940
941 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
942                         rmb();
943                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944                 }
945 */
946                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947                 sw_cons++;
948                 done++;
949         }
950
951         fp->tx_pkt_cons = sw_cons;
952         fp->tx_bd_cons = bd_cons;
953
954         /* TBD need a thresh? */
955         if (unlikely(netif_tx_queue_stopped(txq))) {
956
957                 /* Need to make the tx_bd_cons update visible to start_xmit()
958                  * before checking for netif_tx_queue_stopped().  Without the
959                  * memory barrier, there is a small possibility that
960                  * start_xmit() will miss it and cause the queue to be stopped
961                  * forever.
962                  */
963                 smp_mb();
964
965                 if ((netif_tx_queue_stopped(txq)) &&
966                     (bp->state == BNX2X_STATE_OPEN) &&
967                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968                         netif_tx_wake_queue(txq);
969         }
970 }
971
972 #ifdef BCM_CNIC
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974 #endif
975
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977                            union eth_rx_cqe *rr_cqe)
978 {
979         struct bnx2x *bp = fp->bp;
980         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
983         DP(BNX2X_MSG_SP,
984            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
985            fp->index, cid, command, bp->state,
986            rr_cqe->ramrod_cqe.ramrod_type);
987
988         bp->spq_left++;
989
990         if (fp->index) {
991                 switch (command | fp->state) {
992                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993                                                 BNX2X_FP_STATE_OPENING):
994                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995                            cid);
996                         fp->state = BNX2X_FP_STATE_OPEN;
997                         break;
998
999                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001                            cid);
1002                         fp->state = BNX2X_FP_STATE_HALTED;
1003                         break;
1004
1005                 default:
1006                         BNX2X_ERR("unexpected MC reply (%d)  "
1007                                   "fp->state is %x\n", command, fp->state);
1008                         break;
1009                 }
1010                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1011                 return;
1012         }
1013
1014         switch (command | bp->state) {
1015         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017                 bp->state = BNX2X_STATE_OPEN;
1018                 break;
1019
1020         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023                 fp->state = BNX2X_FP_STATE_HALTED;
1024                 break;
1025
1026         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1029                 break;
1030
1031 #ifdef BCM_CNIC
1032         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034                 bnx2x_cnic_cfc_comp(bp, cid);
1035                 break;
1036 #endif
1037
1038         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041                 bp->set_mac_pending--;
1042                 smp_wmb();
1043                 break;
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         default:
1053                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1054                           command, bp->state);
1055                 break;
1056         }
1057         mb(); /* force bnx2x_wait_ramrod() to see the change */
1058 }
1059
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061                                      struct bnx2x_fastpath *fp, u16 index)
1062 {
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct page *page = sw_buf->page;
1065         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066
1067         /* Skip "next page" elements */
1068         if (!page)
1069                 return;
1070
1071         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073         __free_pages(page, PAGES_PER_SGE_SHIFT);
1074
1075         sw_buf->page = NULL;
1076         sge->addr_hi = 0;
1077         sge->addr_lo = 0;
1078 }
1079
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081                                            struct bnx2x_fastpath *fp, int last)
1082 {
1083         int i;
1084
1085         for (i = 0; i < last; i++)
1086                 bnx2x_free_rx_sge(bp, fp, i);
1087 }
1088
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090                                      struct bnx2x_fastpath *fp, u16 index)
1091 {
1092         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1095         dma_addr_t mapping;
1096
1097         if (unlikely(page == NULL))
1098                 return -ENOMEM;
1099
1100         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101                                PCI_DMA_FROMDEVICE);
1102         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1104                 return -ENOMEM;
1105         }
1106
1107         sw_buf->page = page;
1108         pci_unmap_addr_set(sw_buf, mapping, mapping);
1109
1110         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1112
1113         return 0;
1114 }
1115
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117                                      struct bnx2x_fastpath *fp, u16 index)
1118 {
1119         struct sk_buff *skb;
1120         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1122         dma_addr_t mapping;
1123
1124         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125         if (unlikely(skb == NULL))
1126                 return -ENOMEM;
1127
1128         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129                                  PCI_DMA_FROMDEVICE);
1130         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1131                 dev_kfree_skb(skb);
1132                 return -ENOMEM;
1133         }
1134
1135         rx_buf->skb = skb;
1136         pci_unmap_addr_set(rx_buf, mapping, mapping);
1137
1138         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1140
1141         return 0;
1142 }
1143
1144 /* note that we are not allocating a new skb,
1145  * we are just moving one from cons to prod
1146  * we are not creating a new mapping,
1147  * so there is no need to check for dma_mapping_error().
1148  */
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150                                struct sk_buff *skb, u16 cons, u16 prod)
1151 {
1152         struct bnx2x *bp = fp->bp;
1153         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157
1158         pci_dma_sync_single_for_device(bp->pdev,
1159                                        pci_unmap_addr(cons_rx_buf, mapping),
1160                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1161
1162         prod_rx_buf->skb = cons_rx_buf->skb;
1163         pci_unmap_addr_set(prod_rx_buf, mapping,
1164                            pci_unmap_addr(cons_rx_buf, mapping));
1165         *prod_bd = *cons_bd;
1166 }
1167
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1169                                              u16 idx)
1170 {
1171         u16 last_max = fp->last_max_sge;
1172
1173         if (SUB_S16(idx, last_max) > 0)
1174                 fp->last_max_sge = idx;
1175 }
1176
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1178 {
1179         int i, j;
1180
1181         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182                 int idx = RX_SGE_CNT * i - 1;
1183
1184                 for (j = 0; j < 2; j++) {
1185                         SGE_MASK_CLEAR_BIT(fp, idx);
1186                         idx--;
1187                 }
1188         }
1189 }
1190
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192                                   struct eth_fast_path_rx_cqe *fp_cqe)
1193 {
1194         struct bnx2x *bp = fp->bp;
1195         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1197                       SGE_PAGE_SHIFT;
1198         u16 last_max, last_elem, first_elem;
1199         u16 delta = 0;
1200         u16 i;
1201
1202         if (!sge_len)
1203                 return;
1204
1205         /* First mark all used pages */
1206         for (i = 0; i < sge_len; i++)
1207                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208
1209         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211
1212         /* Here we assume that the last SGE index is the biggest */
1213         prefetch((void *)(fp->sge_mask));
1214         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215
1216         last_max = RX_SGE(fp->last_max_sge);
1217         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219
1220         /* If ring is not full */
1221         if (last_elem + 1 != first_elem)
1222                 last_elem++;
1223
1224         /* Now update the prod */
1225         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226                 if (likely(fp->sge_mask[i]))
1227                         break;
1228
1229                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230                 delta += RX_SGE_MASK_ELEM_SZ;
1231         }
1232
1233         if (delta > 0) {
1234                 fp->rx_sge_prod += delta;
1235                 /* clear page-end entries */
1236                 bnx2x_clear_sge_mask_next_elems(fp);
1237         }
1238
1239         DP(NETIF_MSG_RX_STATUS,
1240            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1241            fp->last_max_sge, fp->rx_sge_prod);
1242 }
1243
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245 {
1246         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247         memset(fp->sge_mask, 0xff,
1248                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249
1250         /* Clear the two last indices in the page to 1:
1251            these are the indices that correspond to the "next" element,
1252            hence will never be indicated and should be removed from
1253            the calculations. */
1254         bnx2x_clear_sge_mask_next_elems(fp);
1255 }
1256
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258                             struct sk_buff *skb, u16 cons, u16 prod)
1259 {
1260         struct bnx2x *bp = fp->bp;
1261         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1264         dma_addr_t mapping;
1265
1266         /* move empty skb from pool to prod and map it */
1267         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271
1272         /* move partial skb from cons to pool (don't unmap yet) */
1273         fp->tpa_pool[queue] = *cons_rx_buf;
1274
1275         /* mark bin state as start - print error if current state != stop */
1276         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278
1279         fp->tpa_state[queue] = BNX2X_TPA_START;
1280
1281         /* point prod_bd to new skb */
1282         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289 #else
1290         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291 #endif
1292            fp->tpa_queue_used);
1293 #endif
1294 }
1295
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297                                struct sk_buff *skb,
1298                                struct eth_fast_path_rx_cqe *fp_cqe,
1299                                u16 cqe_idx)
1300 {
1301         struct sw_rx_page *rx_pg, old_rx_pg;
1302         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303         u32 i, frag_len, frag_size, pages;
1304         int err;
1305         int j;
1306
1307         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1309
1310         /* This is needed in order to enable forwarding support */
1311         if (frag_size)
1312                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313                                                max(frag_size, (u32)len_on_bd));
1314
1315 #ifdef BNX2X_STOP_ON_ERROR
1316         if (pages >
1317             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319                           pages, cqe_idx);
1320                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1321                           fp_cqe->pkt_len, len_on_bd);
1322                 bnx2x_panic();
1323                 return -EINVAL;
1324         }
1325 #endif
1326
1327         /* Run through the SGL and compose the fragmented skb */
1328         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330
1331                 /* FW gives the indices of the SGE as if the ring is an array
1332                    (meaning that "next" element will consume 2 indices) */
1333                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334                 rx_pg = &fp->rx_page_ring[sge_idx];
1335                 old_rx_pg = *rx_pg;
1336
1337                 /* If we fail to allocate a substitute page, we simply stop
1338                    where we are and drop the whole packet */
1339                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340                 if (unlikely(err)) {
1341                         fp->eth_q_stats.rx_skb_alloc_failed++;
1342                         return err;
1343                 }
1344
1345                 /* Unmap the page as we r going to pass it to the stack */
1346                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1348
1349                 /* Add one frag and update the appropriate fields in the skb */
1350                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351
1352                 skb->data_len += frag_len;
1353                 skb->truesize += frag_len;
1354                 skb->len += frag_len;
1355
1356                 frag_size -= frag_len;
1357         }
1358
1359         return 0;
1360 }
1361
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1364                            u16 cqe_idx)
1365 {
1366         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367         struct sk_buff *skb = rx_buf->skb;
1368         /* alloc new skb */
1369         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370
1371         /* Unmap skb in the pool anyway, as we are going to change
1372            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373            fails. */
1374         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1376
1377         if (likely(new_skb)) {
1378                 /* fix ip xsum and give it to the stack */
1379                 /* (no need to map the new skb) */
1380 #ifdef BCM_VLAN
1381                 int is_vlan_cqe =
1382                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383                          PARSING_FLAGS_VLAN);
1384                 int is_not_hwaccel_vlan_cqe =
1385                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1386 #endif
1387
1388                 prefetch(skb);
1389                 prefetch(((char *)(skb)) + 128);
1390
1391 #ifdef BNX2X_STOP_ON_ERROR
1392                 if (pad + len > bp->rx_buf_size) {
1393                         BNX2X_ERR("skb_put is about to fail...  "
1394                                   "pad %d  len %d  rx_buf_size %d\n",
1395                                   pad, len, bp->rx_buf_size);
1396                         bnx2x_panic();
1397                         return;
1398                 }
1399 #endif
1400
1401                 skb_reserve(skb, pad);
1402                 skb_put(skb, len);
1403
1404                 skb->protocol = eth_type_trans(skb, bp->dev);
1405                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1406
1407                 {
1408                         struct iphdr *iph;
1409
1410                         iph = (struct iphdr *)skb->data;
1411 #ifdef BCM_VLAN
1412                         /* If there is no Rx VLAN offloading -
1413                            take VLAN tag into an account */
1414                         if (unlikely(is_not_hwaccel_vlan_cqe))
1415                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1416 #endif
1417                         iph->check = 0;
1418                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1419                 }
1420
1421                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422                                          &cqe->fast_path_cqe, cqe_idx)) {
1423 #ifdef BCM_VLAN
1424                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425                             (!is_not_hwaccel_vlan_cqe))
1426                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427                                                 le16_to_cpu(cqe->fast_path_cqe.
1428                                                             vlan_tag));
1429                         else
1430 #endif
1431                                 netif_receive_skb(skb);
1432                 } else {
1433                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434                            " - dropping packet!\n");
1435                         dev_kfree_skb(skb);
1436                 }
1437
1438
1439                 /* put new skb in bin */
1440                 fp->tpa_pool[queue].skb = new_skb;
1441
1442         } else {
1443                 /* else drop the packet and keep the buffer in the bin */
1444                 DP(NETIF_MSG_RX_STATUS,
1445                    "Failed to allocate new skb - dropping packet!\n");
1446                 fp->eth_q_stats.rx_skb_alloc_failed++;
1447         }
1448
1449         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1450 }
1451
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453                                         struct bnx2x_fastpath *fp,
1454                                         u16 bd_prod, u16 rx_comp_prod,
1455                                         u16 rx_sge_prod)
1456 {
1457         struct ustorm_eth_rx_producers rx_prods = {0};
1458         int i;
1459
1460         /* Update producers */
1461         rx_prods.bd_prod = bd_prod;
1462         rx_prods.cqe_prod = rx_comp_prod;
1463         rx_prods.sge_prod = rx_sge_prod;
1464
1465         /*
1466          * Make sure that the BD and SGE data is updated before updating the
1467          * producers since FW might read the BD/SGE right after the producer
1468          * is updated.
1469          * This is only applicable for weak-ordered memory model archs such
1470          * as IA-64. The following barrier is also mandatory since FW will
1471          * assumes BDs must have buffers.
1472          */
1473         wmb();
1474
1475         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476                 REG_WR(bp, BAR_USTRORM_INTMEM +
1477                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478                        ((u32 *)&rx_prods)[i]);
1479
1480         mmiowb(); /* keep prod updates ordered */
1481
1482         DP(NETIF_MSG_RX_STATUS,
1483            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1484            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1485 }
1486
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488 {
1489         struct bnx2x *bp = fp->bp;
1490         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1492         int rx_pkt = 0;
1493
1494 #ifdef BNX2X_STOP_ON_ERROR
1495         if (unlikely(bp->panic))
1496                 return 0;
1497 #endif
1498
1499         /* CQ "next element" is of the size of the regular element,
1500            that's why it's ok here */
1501         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1503                 hw_comp_cons++;
1504
1505         bd_cons = fp->rx_bd_cons;
1506         bd_prod = fp->rx_bd_prod;
1507         bd_prod_fw = bd_prod;
1508         sw_comp_cons = fp->rx_comp_cons;
1509         sw_comp_prod = fp->rx_comp_prod;
1510
1511         /* Memory barrier necessary as speculative reads of the rx
1512          * buffer can be ahead of the index in the status block
1513          */
1514         rmb();
1515
1516         DP(NETIF_MSG_RX_STATUS,
1517            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1518            fp->index, hw_comp_cons, sw_comp_cons);
1519
1520         while (sw_comp_cons != hw_comp_cons) {
1521                 struct sw_rx_bd *rx_buf = NULL;
1522                 struct sk_buff *skb;
1523                 union eth_rx_cqe *cqe;
1524                 u8 cqe_fp_flags;
1525                 u16 len, pad;
1526
1527                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528                 bd_prod = RX_BD(bd_prod);
1529                 bd_cons = RX_BD(bd_cons);
1530
1531                 /* Prefetch the page containing the BD descriptor
1532                    at producer's index. It will be needed when new skb is
1533                    allocated */
1534                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535                                              (&fp->rx_desc_ring[bd_prod])) -
1536                                   PAGE_SIZE + 1));
1537
1538                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1540
1541                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1542                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1543                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1547
1548                 /* is this a slowpath msg? */
1549                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550                         bnx2x_sp_event(fp, cqe);
1551                         goto next_cqe;
1552
1553                 /* this is an rx packet */
1554                 } else {
1555                         rx_buf = &fp->rx_buf_ring[bd_cons];
1556                         skb = rx_buf->skb;
1557                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558                         pad = cqe->fast_path_cqe.placement_offset;
1559
1560                         /* If CQE is marked both TPA_START and TPA_END
1561                            it is a non-TPA CQE */
1562                         if ((!fp->disable_tpa) &&
1563                             (TPA_TYPE(cqe_fp_flags) !=
1564                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1565                                 u16 queue = cqe->fast_path_cqe.queue_index;
1566
1567                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568                                         DP(NETIF_MSG_RX_STATUS,
1569                                            "calling tpa_start on queue %d\n",
1570                                            queue);
1571
1572                                         bnx2x_tpa_start(fp, queue, skb,
1573                                                         bd_cons, bd_prod);
1574                                         goto next_rx;
1575                                 }
1576
1577                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578                                         DP(NETIF_MSG_RX_STATUS,
1579                                            "calling tpa_stop on queue %d\n",
1580                                            queue);
1581
1582                                         if (!BNX2X_RX_SUM_FIX(cqe))
1583                                                 BNX2X_ERR("STOP on none TCP "
1584                                                           "data\n");
1585
1586                                         /* This is a size of the linear data
1587                                            on this skb */
1588                                         len = le16_to_cpu(cqe->fast_path_cqe.
1589                                                                 len_on_bd);
1590                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1591                                                     len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1593                                         if (bp->panic)
1594                                                 return 0;
1595 #endif
1596
1597                                         bnx2x_update_sge_prod(fp,
1598                                                         &cqe->fast_path_cqe);
1599                                         goto next_cqe;
1600                                 }
1601                         }
1602
1603                         pci_dma_sync_single_for_device(bp->pdev,
1604                                         pci_unmap_addr(rx_buf, mapping),
1605                                                        pad + RX_COPY_THRESH,
1606                                                        PCI_DMA_FROMDEVICE);
1607                         prefetch(skb);
1608                         prefetch(((char *)(skb)) + 128);
1609
1610                         /* is this an error packet? */
1611                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612                                 DP(NETIF_MSG_RX_ERR,
1613                                    "ERROR  flags %x  rx packet %u\n",
1614                                    cqe_fp_flags, sw_comp_cons);
1615                                 fp->eth_q_stats.rx_err_discard_pkt++;
1616                                 goto reuse_rx;
1617                         }
1618
1619                         /* Since we don't have a jumbo ring
1620                          * copy small packets if mtu > 1500
1621                          */
1622                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623                             (len <= RX_COPY_THRESH)) {
1624                                 struct sk_buff *new_skb;
1625
1626                                 new_skb = netdev_alloc_skb(bp->dev,
1627                                                            len + pad);
1628                                 if (new_skb == NULL) {
1629                                         DP(NETIF_MSG_RX_ERR,
1630                                            "ERROR  packet dropped "
1631                                            "because of alloc failure\n");
1632                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1633                                         goto reuse_rx;
1634                                 }
1635
1636                                 /* aligned copy */
1637                                 skb_copy_from_linear_data_offset(skb, pad,
1638                                                     new_skb->data + pad, len);
1639                                 skb_reserve(new_skb, pad);
1640                                 skb_put(new_skb, len);
1641
1642                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1643
1644                                 skb = new_skb;
1645
1646                         } else
1647                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648                                 pci_unmap_single(bp->pdev,
1649                                         pci_unmap_addr(rx_buf, mapping),
1650                                                  bp->rx_buf_size,
1651                                                  PCI_DMA_FROMDEVICE);
1652                                 skb_reserve(skb, pad);
1653                                 skb_put(skb, len);
1654
1655                         } else {
1656                                 DP(NETIF_MSG_RX_ERR,
1657                                    "ERROR  packet dropped because "
1658                                    "of alloc failure\n");
1659                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1660 reuse_rx:
1661                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1662                                 goto next_rx;
1663                         }
1664
1665                         skb->protocol = eth_type_trans(skb, bp->dev);
1666
1667                         skb->ip_summed = CHECKSUM_NONE;
1668                         if (bp->rx_csum) {
1669                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1671                                 else
1672                                         fp->eth_q_stats.hw_csum_err++;
1673                         }
1674                 }
1675
1676                 skb_record_rx_queue(skb, fp->index);
1677
1678 #ifdef BCM_VLAN
1679                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681                      PARSING_FLAGS_VLAN))
1682                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1684                 else
1685 #endif
1686                         netif_receive_skb(skb);
1687
1688
1689 next_rx:
1690                 rx_buf->skb = NULL;
1691
1692                 bd_cons = NEXT_RX_IDX(bd_cons);
1693                 bd_prod = NEXT_RX_IDX(bd_prod);
1694                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1695                 rx_pkt++;
1696 next_cqe:
1697                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1699
1700                 if (rx_pkt == budget)
1701                         break;
1702         } /* while */
1703
1704         fp->rx_bd_cons = bd_cons;
1705         fp->rx_bd_prod = bd_prod_fw;
1706         fp->rx_comp_cons = sw_comp_cons;
1707         fp->rx_comp_prod = sw_comp_prod;
1708
1709         /* Update producers */
1710         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1711                              fp->rx_sge_prod);
1712
1713         fp->rx_pkt += rx_pkt;
1714         fp->rx_calls++;
1715
1716         return rx_pkt;
1717 }
1718
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 {
1721         struct bnx2x_fastpath *fp = fp_cookie;
1722         struct bnx2x *bp = fp->bp;
1723
1724         /* Return here if interrupt is disabled */
1725         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1727                 return IRQ_HANDLED;
1728         }
1729
1730         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731            fp->index, fp->sb_id);
1732         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1733
1734 #ifdef BNX2X_STOP_ON_ERROR
1735         if (unlikely(bp->panic))
1736                 return IRQ_HANDLED;
1737 #endif
1738         /* Handle Rx or Tx according to MSI-X vector */
1739         if (fp->is_rx_queue) {
1740                 prefetch(fp->rx_cons_sb);
1741                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1742
1743                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1744
1745         } else {
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748
1749                 bnx2x_update_fpsb_idx(fp);
1750                 rmb();
1751                 bnx2x_tx_int(fp);
1752
1753                 /* Re-enable interrupts */
1754                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1758         }
1759
1760         return IRQ_HANDLED;
1761 }
1762
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764 {
1765         struct bnx2x *bp = netdev_priv(dev_instance);
1766         u16 status = bnx2x_ack_int(bp);
1767         u16 mask;
1768         int i;
1769
1770         /* Return here if interrupt is shared and it's not for us */
1771         if (unlikely(status == 0)) {
1772                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1773                 return IRQ_NONE;
1774         }
1775         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1776
1777         /* Return here if interrupt is disabled */
1778         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1780                 return IRQ_HANDLED;
1781         }
1782
1783 #ifdef BNX2X_STOP_ON_ERROR
1784         if (unlikely(bp->panic))
1785                 return IRQ_HANDLED;
1786 #endif
1787
1788         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789                 struct bnx2x_fastpath *fp = &bp->fp[i];
1790
1791                 mask = 0x2 << fp->sb_id;
1792                 if (status & mask) {
1793                         /* Handle Rx or Tx according to SB id */
1794                         if (fp->is_rx_queue) {
1795                                 prefetch(fp->rx_cons_sb);
1796                                 prefetch(&fp->status_blk->u_status_block.
1797                                                         status_block_index);
1798
1799                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1800
1801                         } else {
1802                                 prefetch(fp->tx_cons_sb);
1803                                 prefetch(&fp->status_blk->c_status_block.
1804                                                         status_block_index);
1805
1806                                 bnx2x_update_fpsb_idx(fp);
1807                                 rmb();
1808                                 bnx2x_tx_int(fp);
1809
1810                                 /* Re-enable interrupts */
1811                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812                                              le16_to_cpu(fp->fp_u_idx),
1813                                              IGU_INT_NOP, 1);
1814                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815                                              le16_to_cpu(fp->fp_c_idx),
1816                                              IGU_INT_ENABLE, 1);
1817                         }
1818                         status &= ~mask;
1819                 }
1820         }
1821
1822 #ifdef BCM_CNIC
1823         mask = 0x2 << CNIC_SB_ID(bp);
1824         if (status & (mask | 0x1)) {
1825                 struct cnic_ops *c_ops = NULL;
1826
1827                 rcu_read_lock();
1828                 c_ops = rcu_dereference(bp->cnic_ops);
1829                 if (c_ops)
1830                         c_ops->cnic_handler(bp->cnic_data, NULL);
1831                 rcu_read_unlock();
1832
1833                 status &= ~mask;
1834         }
1835 #endif
1836
1837         if (unlikely(status & 0x1)) {
1838                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1839
1840                 status &= ~0x1;
1841                 if (!status)
1842                         return IRQ_HANDLED;
1843         }
1844
1845         if (status)
1846                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1847                    status);
1848
1849         return IRQ_HANDLED;
1850 }
1851
1852 /* end of fast path */
1853
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1855
1856 /* Link */
1857
1858 /*
1859  * General service functions
1860  */
1861
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1863 {
1864         u32 lock_status;
1865         u32 resource_bit = (1 << resource);
1866         int func = BP_FUNC(bp);
1867         u32 hw_lock_control_reg;
1868         int cnt;
1869
1870         /* Validating that the resource is within range */
1871         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872                 DP(NETIF_MSG_HW,
1873                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1875                 return -EINVAL;
1876         }
1877
1878         if (func <= 5) {
1879                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880         } else {
1881                 hw_lock_control_reg =
1882                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1883         }
1884
1885         /* Validating that the resource is not already taken */
1886         lock_status = REG_RD(bp, hw_lock_control_reg);
1887         if (lock_status & resource_bit) {
1888                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1889                    lock_status, resource_bit);
1890                 return -EEXIST;
1891         }
1892
1893         /* Try for 5 second every 5ms */
1894         for (cnt = 0; cnt < 1000; cnt++) {
1895                 /* Try to acquire the lock */
1896                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897                 lock_status = REG_RD(bp, hw_lock_control_reg);
1898                 if (lock_status & resource_bit)
1899                         return 0;
1900
1901                 msleep(5);
1902         }
1903         DP(NETIF_MSG_HW, "Timeout\n");
1904         return -EAGAIN;
1905 }
1906
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1908 {
1909         u32 lock_status;
1910         u32 resource_bit = (1 << resource);
1911         int func = BP_FUNC(bp);
1912         u32 hw_lock_control_reg;
1913
1914         /* Validating that the resource is within range */
1915         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916                 DP(NETIF_MSG_HW,
1917                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1919                 return -EINVAL;
1920         }
1921
1922         if (func <= 5) {
1923                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924         } else {
1925                 hw_lock_control_reg =
1926                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1927         }
1928
1929         /* Validating that the resource is currently taken */
1930         lock_status = REG_RD(bp, hw_lock_control_reg);
1931         if (!(lock_status & resource_bit)) {
1932                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1933                    lock_status, resource_bit);
1934                 return -EFAULT;
1935         }
1936
1937         REG_WR(bp, hw_lock_control_reg, resource_bit);
1938         return 0;
1939 }
1940
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1943 {
1944         mutex_lock(&bp->port.phy_mutex);
1945
1946         if (bp->port.need_hw_lock)
1947                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1948 }
1949
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1951 {
1952         if (bp->port.need_hw_lock)
1953                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1954
1955         mutex_unlock(&bp->port.phy_mutex);
1956 }
1957
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959 {
1960         /* The GPIO should be swapped if swap register is set and active */
1961         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963         int gpio_shift = gpio_num +
1964                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965         u32 gpio_mask = (1 << gpio_shift);
1966         u32 gpio_reg;
1967         int value;
1968
1969         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971                 return -EINVAL;
1972         }
1973
1974         /* read GPIO value */
1975         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976
1977         /* get the requested pin value */
1978         if ((gpio_reg & gpio_mask) == gpio_mask)
1979                 value = 1;
1980         else
1981                 value = 0;
1982
1983         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1984
1985         return value;
1986 }
1987
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO and mask except the float bits */
2005         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010                    gpio_num, gpio_shift);
2011                 /* clear FLOAT and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018                    gpio_num, gpio_shift);
2019                 /* clear FLOAT and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2022                 break;
2023
2024         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026                    gpio_num, gpio_shift);
2027                 /* set FLOAT */
2028                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2029                 break;
2030
2031         default:
2032                 break;
2033         }
2034
2035         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2037
2038         return 0;
2039 }
2040
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042 {
2043         /* The GPIO should be swapped if swap register is set and active */
2044         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046         int gpio_shift = gpio_num +
2047                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048         u32 gpio_mask = (1 << gpio_shift);
2049         u32 gpio_reg;
2050
2051         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053                 return -EINVAL;
2054         }
2055
2056         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057         /* read GPIO int */
2058         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2059
2060         switch (mode) {
2061         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063                                    "output low\n", gpio_num, gpio_shift);
2064                 /* clear SET and set CLR */
2065                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2067                 break;
2068
2069         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071                                    "output high\n", gpio_num, gpio_shift);
2072                 /* clear CLR and set SET */
2073                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075                 break;
2076
2077         default:
2078                 break;
2079         }
2080
2081         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083
2084         return 0;
2085 }
2086
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2088 {
2089         u32 spio_mask = (1 << spio_num);
2090         u32 spio_reg;
2091
2092         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093             (spio_num > MISC_REGISTERS_SPIO_7)) {
2094                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2095                 return -EINVAL;
2096         }
2097
2098         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099         /* read SPIO and mask except the float bits */
2100         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2101
2102         switch (mode) {
2103         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105                 /* clear FLOAT and set CLR */
2106                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2108                 break;
2109
2110         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112                 /* clear FLOAT and set SET */
2113                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2115                 break;
2116
2117         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119                 /* set FLOAT */
2120                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121                 break;
2122
2123         default:
2124                 break;
2125         }
2126
2127         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2129
2130         return 0;
2131 }
2132
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2134 {
2135         switch (bp->link_vars.ieee_fc &
2136                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2139                                           ADVERTISED_Pause);
2140                 break;
2141
2142         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2144                                          ADVERTISED_Pause);
2145                 break;
2146
2147         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2149                 break;
2150
2151         default:
2152                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2153                                           ADVERTISED_Pause);
2154                 break;
2155         }
2156 }
2157
2158 static void bnx2x_link_report(struct bnx2x *bp)
2159 {
2160         if (bp->state == BNX2X_STATE_DISABLED) {
2161                 netif_carrier_off(bp->dev);
2162                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2163                 return;
2164         }
2165
2166         if (bp->link_vars.link_up) {
2167                 if (bp->state == BNX2X_STATE_OPEN)
2168                         netif_carrier_on(bp->dev);
2169                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2170
2171                 printk("%d Mbps ", bp->link_vars.line_speed);
2172
2173                 if (bp->link_vars.duplex == DUPLEX_FULL)
2174                         printk("full duplex");
2175                 else
2176                         printk("half duplex");
2177
2178                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180                                 printk(", receive ");
2181                                 if (bp->link_vars.flow_ctrl &
2182                                     BNX2X_FLOW_CTRL_TX)
2183                                         printk("& transmit ");
2184                         } else {
2185                                 printk(", transmit ");
2186                         }
2187                         printk("flow control ON");
2188                 }
2189                 printk("\n");
2190
2191         } else { /* link_down */
2192                 netif_carrier_off(bp->dev);
2193                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2194         }
2195 }
2196
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2198 {
2199         if (!BP_NOMCP(bp)) {
2200                 u8 rc;
2201
2202                 /* Initialize link parameters structure variables */
2203                 /* It is recommended to turn off RX FC for jumbo frames
2204                    for better performance */
2205                 if (bp->dev->mtu > 5000)
2206                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2207                 else
2208                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2209
2210                 bnx2x_acquire_phy_lock(bp);
2211
2212                 if (load_mode == LOAD_DIAG)
2213                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2214
2215                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2216
2217                 bnx2x_release_phy_lock(bp);
2218
2219                 bnx2x_calc_fc_adv(bp);
2220
2221                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223                         bnx2x_link_report(bp);
2224                 }
2225
2226                 return rc;
2227         }
2228         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2229         return -EINVAL;
2230 }
2231
2232 static void bnx2x_link_set(struct bnx2x *bp)
2233 {
2234         if (!BP_NOMCP(bp)) {
2235                 bnx2x_acquire_phy_lock(bp);
2236                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237                 bnx2x_release_phy_lock(bp);
2238
2239                 bnx2x_calc_fc_adv(bp);
2240         } else
2241                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2242 }
2243
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2245 {
2246         if (!BP_NOMCP(bp)) {
2247                 bnx2x_acquire_phy_lock(bp);
2248                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249                 bnx2x_release_phy_lock(bp);
2250         } else
2251                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2252 }
2253
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2255 {
2256         u8 rc;
2257
2258         bnx2x_acquire_phy_lock(bp);
2259         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260         bnx2x_release_phy_lock(bp);
2261
2262         return rc;
2263 }
2264
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2266 {
2267         u32 r_param = bp->link_vars.line_speed / 8;
2268         u32 fair_periodic_timeout_usec;
2269         u32 t_fair;
2270
2271         memset(&(bp->cmng.rs_vars), 0,
2272                sizeof(struct rate_shaping_vars_per_port));
2273         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2274
2275         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2277
2278         /* this is the threshold below which no timer arming will occur
2279            1.25 coefficient is for the threshold to be a little bigger
2280            than the real time, to compensate for timer in-accuracy */
2281         bp->cmng.rs_vars.rs_threshold =
2282                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2283
2284         /* resolution of fairness timer */
2285         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2288
2289         /* this is the threshold below which we won't arm the timer anymore */
2290         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2291
2292         /* we multiply by 1e3/8 to get bytes/msec.
2293            We don't want the credits to pass a credit
2294            of the t_fair*FAIR_MEM (algorithm resolution) */
2295         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296         /* since each tick is 4 usec */
2297         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2298 }
2299
2300 /* Calculates the sum of vn_min_rates.
2301    It's needed for further normalizing of the min_rates.
2302    Returns:
2303      sum of vn_min_rates.
2304        or
2305      0 - if all the min_rates are 0.
2306      In the later case fainess algorithm should be deactivated.
2307      If not all min_rates are zero then those that are zeroes will be set to 1.
2308  */
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2310 {
2311         int all_zero = 1;
2312         int port = BP_PORT(bp);
2313         int vn;
2314
2315         bp->vn_weight_sum = 0;
2316         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317                 int func = 2*vn + port;
2318                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2321
2322                 /* Skip hidden vns */
2323                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2324                         continue;
2325
2326                 /* If min rate is zero - set it to 1 */
2327                 if (!vn_min_rate)
2328                         vn_min_rate = DEF_MIN_RATE;
2329                 else
2330                         all_zero = 0;
2331
2332                 bp->vn_weight_sum += vn_min_rate;
2333         }
2334
2335         /* ... only if all min rates are zeros - disable fairness */
2336         if (all_zero)
2337                 bp->vn_weight_sum = 0;
2338 }
2339
2340 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2341 {
2342         struct rate_shaping_vars_per_vn m_rs_vn;
2343         struct fairness_vars_per_vn m_fair_vn;
2344         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2345         u16 vn_min_rate, vn_max_rate;
2346         int i;
2347
2348         /* If function is hidden - set min and max to zeroes */
2349         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2350                 vn_min_rate = 0;
2351                 vn_max_rate = 0;
2352
2353         } else {
2354                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2355                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2356                 /* If fairness is enabled (not all min rates are zeroes) and
2357                    if current min rate is zero - set it to 1.
2358                    This is a requirement of the algorithm. */
2359                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2360                         vn_min_rate = DEF_MIN_RATE;
2361                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2362                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2363         }
2364
2365         DP(NETIF_MSG_IFUP,
2366            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2367            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2368
2369         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2370         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2371
2372         /* global vn counter - maximal Mbps for this vn */
2373         m_rs_vn.vn_counter.rate = vn_max_rate;
2374
2375         /* quota - number of bytes transmitted in this period */
2376         m_rs_vn.vn_counter.quota =
2377                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2378
2379         if (bp->vn_weight_sum) {
2380                 /* credit for each period of the fairness algorithm:
2381                    number of bytes in T_FAIR (the vn share the port rate).
2382                    vn_weight_sum should not be larger than 10000, thus
2383                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2384                    than zero */
2385                 m_fair_vn.vn_credit_delta =
2386                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2387                                                  (8 * bp->vn_weight_sum))),
2388                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2389                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2390                    m_fair_vn.vn_credit_delta);
2391         }
2392
2393         /* Store it to internal memory */
2394         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2395                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2396                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2397                        ((u32 *)(&m_rs_vn))[i]);
2398
2399         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2400                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2402                        ((u32 *)(&m_fair_vn))[i]);
2403 }
2404
2405
2406 /* This function is called upon link interrupt */
2407 static void bnx2x_link_attn(struct bnx2x *bp)
2408 {
2409         /* Make sure that we are synced with the current statistics */
2410         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2411
2412         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2413
2414         if (bp->link_vars.link_up) {
2415
2416                 /* dropless flow control */
2417                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2418                         int port = BP_PORT(bp);
2419                         u32 pause_enabled = 0;
2420
2421                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2422                                 pause_enabled = 1;
2423
2424                         REG_WR(bp, BAR_USTRORM_INTMEM +
2425                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2426                                pause_enabled);
2427                 }
2428
2429                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2430                         struct host_port_stats *pstats;
2431
2432                         pstats = bnx2x_sp(bp, port_stats);
2433                         /* reset old bmac stats */
2434                         memset(&(pstats->mac_stx[0]), 0,
2435                                sizeof(struct mac_stx));
2436                 }
2437                 if ((bp->state == BNX2X_STATE_OPEN) ||
2438                     (bp->state == BNX2X_STATE_DISABLED))
2439                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2440         }
2441
2442         /* indicate link status */
2443         bnx2x_link_report(bp);
2444
2445         if (IS_E1HMF(bp)) {
2446                 int port = BP_PORT(bp);
2447                 int func;
2448                 int vn;
2449
2450                 /* Set the attention towards other drivers on the same port */
2451                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2452                         if (vn == BP_E1HVN(bp))
2453                                 continue;
2454
2455                         func = ((vn << 1) | port);
2456                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2457                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2458                 }
2459
2460                 if (bp->link_vars.link_up) {
2461                         int i;
2462
2463                         /* Init rate shaping and fairness contexts */
2464                         bnx2x_init_port_minmax(bp);
2465
2466                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2467                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2468
2469                         /* Store it to internal memory */
2470                         for (i = 0;
2471                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2472                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2473                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2474                                        ((u32 *)(&bp->cmng))[i]);
2475                 }
2476         }
2477 }
2478
2479 static void bnx2x__link_status_update(struct bnx2x *bp)
2480 {
2481         int func = BP_FUNC(bp);
2482
2483         if (bp->state != BNX2X_STATE_OPEN)
2484                 return;
2485
2486         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2487
2488         if (bp->link_vars.link_up)
2489                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2490         else
2491                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2492
2493         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2494         bnx2x_calc_vn_weight_sum(bp);
2495
2496         /* indicate link status */
2497         bnx2x_link_report(bp);
2498 }
2499
2500 static void bnx2x_pmf_update(struct bnx2x *bp)
2501 {
2502         int port = BP_PORT(bp);
2503         u32 val;
2504
2505         bp->port.pmf = 1;
2506         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2507
2508         /* enable nig attention */
2509         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2510         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2511         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2512
2513         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2514 }
2515
2516 /* end of Link */
2517
2518 /* slow path */
2519
2520 /*
2521  * General service functions
2522  */
2523
2524 /* send the MCP a request, block until there is a reply */
2525 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2526 {
2527         int func = BP_FUNC(bp);
2528         u32 seq = ++bp->fw_seq;
2529         u32 rc = 0;
2530         u32 cnt = 1;
2531         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2532
2533         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2534         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2535
2536         do {
2537                 /* let the FW do it's magic ... */
2538                 msleep(delay);
2539
2540                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2541
2542                 /* Give the FW up to 2 second (200*10ms) */
2543         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2544
2545         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2546            cnt*delay, rc, seq);
2547
2548         /* is this a reply to our command? */
2549         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2550                 rc &= FW_MSG_CODE_MASK;
2551         else {
2552                 /* FW BUG! */
2553                 BNX2X_ERR("FW failed to respond!\n");
2554                 bnx2x_fw_dump(bp);
2555                 rc = 0;
2556         }
2557
2558         return rc;
2559 }
2560
2561 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2562 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2563 static void bnx2x_set_rx_mode(struct net_device *dev);
2564
2565 static void bnx2x_e1h_disable(struct bnx2x *bp)
2566 {
2567         int port = BP_PORT(bp);
2568         int i;
2569
2570         bp->rx_mode = BNX2X_RX_MODE_NONE;
2571         bnx2x_set_storm_rx_mode(bp);
2572
2573         netif_tx_disable(bp->dev);
2574         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2575
2576         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2577
2578         bnx2x_set_eth_mac_addr_e1h(bp, 0);
2579
2580         for (i = 0; i < MC_HASH_SIZE; i++)
2581                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2582
2583         netif_carrier_off(bp->dev);
2584 }
2585
2586 static void bnx2x_e1h_enable(struct bnx2x *bp)
2587 {
2588         int port = BP_PORT(bp);
2589
2590         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2591
2592         bnx2x_set_eth_mac_addr_e1h(bp, 1);
2593
2594         /* Tx queue should be only reenabled */
2595         netif_tx_wake_all_queues(bp->dev);
2596
2597         /* Initialize the receive filter. */
2598         bnx2x_set_rx_mode(bp->dev);
2599 }
2600
2601 static void bnx2x_update_min_max(struct bnx2x *bp)
2602 {
2603         int port = BP_PORT(bp);
2604         int vn, i;
2605
2606         /* Init rate shaping and fairness contexts */
2607         bnx2x_init_port_minmax(bp);
2608
2609         bnx2x_calc_vn_weight_sum(bp);
2610
2611         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2612                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2613
2614         if (bp->port.pmf) {
2615                 int func;
2616
2617                 /* Set the attention towards other drivers on the same port */
2618                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2619                         if (vn == BP_E1HVN(bp))
2620                                 continue;
2621
2622                         func = ((vn << 1) | port);
2623                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2624                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2625                 }
2626
2627                 /* Store it to internal memory */
2628                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2629                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2630                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2631                                ((u32 *)(&bp->cmng))[i]);
2632         }
2633 }
2634
2635 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2636 {
2637         int func = BP_FUNC(bp);
2638
2639         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2640         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2641
2642         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643
2644                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2645                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2646                         bp->state = BNX2X_STATE_DISABLED;
2647
2648                         bnx2x_e1h_disable(bp);
2649                 } else {
2650                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2651                         bp->state = BNX2X_STATE_OPEN;
2652
2653                         bnx2x_e1h_enable(bp);
2654                 }
2655                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2656         }
2657         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2658
2659                 bnx2x_update_min_max(bp);
2660                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2661         }
2662
2663         /* Report results to MCP */
2664         if (dcc_event)
2665                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2666         else
2667                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2668 }
2669
2670 /* must be called under the spq lock */
2671 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2672 {
2673         struct eth_spe *next_spe = bp->spq_prod_bd;
2674
2675         if (bp->spq_prod_bd == bp->spq_last_bd) {
2676                 bp->spq_prod_bd = bp->spq;
2677                 bp->spq_prod_idx = 0;
2678                 DP(NETIF_MSG_TIMER, "end of spq\n");
2679         } else {
2680                 bp->spq_prod_bd++;
2681                 bp->spq_prod_idx++;
2682         }
2683         return next_spe;
2684 }
2685
2686 /* must be called under the spq lock */
2687 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2688 {
2689         int func = BP_FUNC(bp);
2690
2691         /* Make sure that BD data is updated before writing the producer */
2692         wmb();
2693
2694         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2695                bp->spq_prod_idx);
2696         mmiowb();
2697 }
2698
2699 /* the slow path queue is odd since completions arrive on the fastpath ring */
2700 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2701                          u32 data_hi, u32 data_lo, int common)
2702 {
2703         struct eth_spe *spe;
2704
2705         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2706            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2707            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2708            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2709            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2710
2711 #ifdef BNX2X_STOP_ON_ERROR
2712         if (unlikely(bp->panic))
2713                 return -EIO;
2714 #endif
2715
2716         spin_lock_bh(&bp->spq_lock);
2717
2718         if (!bp->spq_left) {
2719                 BNX2X_ERR("BUG! SPQ ring full!\n");
2720                 spin_unlock_bh(&bp->spq_lock);
2721                 bnx2x_panic();
2722                 return -EBUSY;
2723         }
2724
2725         spe = bnx2x_sp_get_next(bp);
2726
2727         /* CID needs port number to be encoded int it */
2728         spe->hdr.conn_and_cmd_data =
2729                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2730                                      HW_CID(bp, cid)));
2731         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2732         if (common)
2733                 spe->hdr.type |=
2734                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2735
2736         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2737         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2738
2739         bp->spq_left--;
2740
2741         bnx2x_sp_prod_update(bp);
2742         spin_unlock_bh(&bp->spq_lock);
2743         return 0;
2744 }
2745
2746 /* acquire split MCP access lock register */
2747 static int bnx2x_acquire_alr(struct bnx2x *bp)
2748 {
2749         u32 i, j, val;
2750         int rc = 0;
2751
2752         might_sleep();
2753         i = 100;
2754         for (j = 0; j < i*10; j++) {
2755                 val = (1UL << 31);
2756                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2757                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2758                 if (val & (1L << 31))
2759                         break;
2760
2761                 msleep(5);
2762         }
2763         if (!(val & (1L << 31))) {
2764                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2765                 rc = -EBUSY;
2766         }
2767
2768         return rc;
2769 }
2770
2771 /* release split MCP access lock register */
2772 static void bnx2x_release_alr(struct bnx2x *bp)
2773 {
2774         u32 val = 0;
2775
2776         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2777 }
2778
2779 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2780 {
2781         struct host_def_status_block *def_sb = bp->def_status_blk;
2782         u16 rc = 0;
2783
2784         barrier(); /* status block is written to by the chip */
2785         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2786                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2787                 rc |= 1;
2788         }
2789         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2790                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2791                 rc |= 2;
2792         }
2793         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2794                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2795                 rc |= 4;
2796         }
2797         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2798                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2799                 rc |= 8;
2800         }
2801         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2802                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2803                 rc |= 16;
2804         }
2805         return rc;
2806 }
2807
2808 /*
2809  * slow path service functions
2810  */
2811
2812 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2813 {
2814         int port = BP_PORT(bp);
2815         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2816                        COMMAND_REG_ATTN_BITS_SET);
2817         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2819         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820                                        NIG_REG_MASK_INTERRUPT_PORT0;
2821         u32 aeu_mask;
2822         u32 nig_mask = 0;
2823
2824         if (bp->attn_state & asserted)
2825                 BNX2X_ERR("IGU ERROR\n");
2826
2827         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2828         aeu_mask = REG_RD(bp, aeu_addr);
2829
2830         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2831            aeu_mask, asserted);
2832         aeu_mask &= ~(asserted & 0xff);
2833         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2834
2835         REG_WR(bp, aeu_addr, aeu_mask);
2836         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2837
2838         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2839         bp->attn_state |= asserted;
2840         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2841
2842         if (asserted & ATTN_HARD_WIRED_MASK) {
2843                 if (asserted & ATTN_NIG_FOR_FUNC) {
2844
2845                         bnx2x_acquire_phy_lock(bp);
2846
2847                         /* save nig interrupt mask */
2848                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2849                         REG_WR(bp, nig_int_mask_addr, 0);
2850
2851                         bnx2x_link_attn(bp);
2852
2853                         /* handle unicore attn? */
2854                 }
2855                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2856                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2857
2858                 if (asserted & GPIO_2_FUNC)
2859                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2860
2861                 if (asserted & GPIO_3_FUNC)
2862                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2863
2864                 if (asserted & GPIO_4_FUNC)
2865                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2866
2867                 if (port == 0) {
2868                         if (asserted & ATTN_GENERAL_ATTN_1) {
2869                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2870                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2871                         }
2872                         if (asserted & ATTN_GENERAL_ATTN_2) {
2873                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2874                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2875                         }
2876                         if (asserted & ATTN_GENERAL_ATTN_3) {
2877                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2878                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2879                         }
2880                 } else {
2881                         if (asserted & ATTN_GENERAL_ATTN_4) {
2882                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2883                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2884                         }
2885                         if (asserted & ATTN_GENERAL_ATTN_5) {
2886                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2887                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2888                         }
2889                         if (asserted & ATTN_GENERAL_ATTN_6) {
2890                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2891                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2892                         }
2893                 }
2894
2895         } /* if hardwired */
2896
2897         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2898            asserted, hc_addr);
2899         REG_WR(bp, hc_addr, asserted);
2900
2901         /* now set back the mask */
2902         if (asserted & ATTN_NIG_FOR_FUNC) {
2903                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2904                 bnx2x_release_phy_lock(bp);
2905         }
2906 }
2907
2908 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2909 {
2910         int port = BP_PORT(bp);
2911
2912         /* mark the failure */
2913         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2914         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2915         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2916                  bp->link_params.ext_phy_config);
2917
2918         /* log the failure */
2919         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2920                " the driver to shutdown the card to prevent permanent"
2921                " damage.  Please contact Dell Support for assistance\n",
2922                bp->dev->name);
2923 }
2924
2925 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2926 {
2927         int port = BP_PORT(bp);
2928         int reg_offset;
2929         u32 val, swap_val, swap_override;
2930
2931         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2932                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2933
2934         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2935
2936                 val = REG_RD(bp, reg_offset);
2937                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2938                 REG_WR(bp, reg_offset, val);
2939
2940                 BNX2X_ERR("SPIO5 hw attention\n");
2941
2942                 /* Fan failure attention */
2943                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2944                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2945                         /* Low power mode is controlled by GPIO 2 */
2946                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2947                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2948                         /* The PHY reset is controlled by GPIO 1 */
2949                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2950                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2951                         break;
2952
2953                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2954                         /* The PHY reset is controlled by GPIO 1 */
2955                         /* fake the port number to cancel the swap done in
2956                            set_gpio() */
2957                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2958                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2959                         port = (swap_val && swap_override) ^ 1;
2960                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2961                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2962                         break;
2963
2964                 default:
2965                         break;
2966                 }
2967                 bnx2x_fan_failure(bp);
2968         }
2969
2970         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2971                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2972                 bnx2x_acquire_phy_lock(bp);
2973                 bnx2x_handle_module_detect_int(&bp->link_params);
2974                 bnx2x_release_phy_lock(bp);
2975         }
2976
2977         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2978
2979                 val = REG_RD(bp, reg_offset);
2980                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2981                 REG_WR(bp, reg_offset, val);
2982
2983                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2984                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2985                 bnx2x_panic();
2986         }
2987 }
2988
2989 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2990 {
2991         u32 val;
2992
2993         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2994
2995                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2996                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2997                 /* DORQ discard attention */
2998                 if (val & 0x2)
2999                         BNX2X_ERR("FATAL error from DORQ\n");
3000         }
3001
3002         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3003
3004                 int port = BP_PORT(bp);
3005                 int reg_offset;
3006
3007                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3008                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3009
3010                 val = REG_RD(bp, reg_offset);
3011                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3012                 REG_WR(bp, reg_offset, val);
3013
3014                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3015                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3016                 bnx2x_panic();
3017         }
3018 }
3019
3020 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3021 {
3022         u32 val;
3023
3024         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3025
3026                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3027                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3028                 /* CFC error attention */
3029                 if (val & 0x2)
3030                         BNX2X_ERR("FATAL error from CFC\n");
3031         }
3032
3033         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3034
3035                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3036                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3037                 /* RQ_USDMDP_FIFO_OVERFLOW */
3038                 if (val & 0x18000)
3039                         BNX2X_ERR("FATAL error from PXP\n");
3040         }
3041
3042         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3043
3044                 int port = BP_PORT(bp);
3045                 int reg_offset;
3046
3047                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3048                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3049
3050                 val = REG_RD(bp, reg_offset);
3051                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3052                 REG_WR(bp, reg_offset, val);
3053
3054                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3055                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3056                 bnx2x_panic();
3057         }
3058 }
3059
3060 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3061 {
3062         u32 val;
3063
3064         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3065
3066                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3067                         int func = BP_FUNC(bp);
3068
3069                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3070                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3071                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3072                                 bnx2x_dcc_event(bp,
3073                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3074                         bnx2x__link_status_update(bp);
3075                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3076                                 bnx2x_pmf_update(bp);
3077
3078                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3079
3080                         BNX2X_ERR("MC assert!\n");
3081                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3082                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3083                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3084                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3085                         bnx2x_panic();
3086
3087                 } else if (attn & BNX2X_MCP_ASSERT) {
3088
3089                         BNX2X_ERR("MCP assert!\n");
3090                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3091                         bnx2x_fw_dump(bp);
3092
3093                 } else
3094                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3095         }
3096
3097         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3098                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3099                 if (attn & BNX2X_GRC_TIMEOUT) {
3100                         val = CHIP_IS_E1H(bp) ?
3101                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3102                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3103                 }
3104                 if (attn & BNX2X_GRC_RSV) {
3105                         val = CHIP_IS_E1H(bp) ?
3106                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3107                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3108                 }
3109                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3110         }
3111 }
3112
3113 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3114 {
3115         struct attn_route attn;
3116         struct attn_route group_mask;
3117         int port = BP_PORT(bp);
3118         int index;
3119         u32 reg_addr;
3120         u32 val;
3121         u32 aeu_mask;
3122
3123         /* need to take HW lock because MCP or other port might also
3124            try to handle this event */
3125         bnx2x_acquire_alr(bp);
3126
3127         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3128         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3129         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3130         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3131         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3132            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3133
3134         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3135                 if (deasserted & (1 << index)) {
3136                         group_mask = bp->attn_group[index];
3137
3138                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3139                            index, group_mask.sig[0], group_mask.sig[1],
3140                            group_mask.sig[2], group_mask.sig[3]);
3141
3142                         bnx2x_attn_int_deasserted3(bp,
3143                                         attn.sig[3] & group_mask.sig[3]);
3144                         bnx2x_attn_int_deasserted1(bp,
3145                                         attn.sig[1] & group_mask.sig[1]);
3146                         bnx2x_attn_int_deasserted2(bp,
3147                                         attn.sig[2] & group_mask.sig[2]);
3148                         bnx2x_attn_int_deasserted0(bp,
3149                                         attn.sig[0] & group_mask.sig[0]);
3150
3151                         if ((attn.sig[0] & group_mask.sig[0] &
3152                                                 HW_PRTY_ASSERT_SET_0) ||
3153                             (attn.sig[1] & group_mask.sig[1] &
3154                                                 HW_PRTY_ASSERT_SET_1) ||
3155                             (attn.sig[2] & group_mask.sig[2] &
3156                                                 HW_PRTY_ASSERT_SET_2))
3157                                 BNX2X_ERR("FATAL HW block parity attention\n");
3158                 }
3159         }
3160
3161         bnx2x_release_alr(bp);
3162
3163         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3164
3165         val = ~deasserted;
3166         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3167            val, reg_addr);
3168         REG_WR(bp, reg_addr, val);
3169
3170         if (~bp->attn_state & deasserted)
3171                 BNX2X_ERR("IGU ERROR\n");
3172
3173         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3174                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3175
3176         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3177         aeu_mask = REG_RD(bp, reg_addr);
3178
3179         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3180            aeu_mask, deasserted);
3181         aeu_mask |= (deasserted & 0xff);
3182         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3183
3184         REG_WR(bp, reg_addr, aeu_mask);
3185         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3186
3187         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3188         bp->attn_state &= ~deasserted;
3189         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3190 }
3191
3192 static void bnx2x_attn_int(struct bnx2x *bp)
3193 {
3194         /* read local copy of bits */
3195         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3196                                                                 attn_bits);
3197         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3198                                                                 attn_bits_ack);
3199         u32 attn_state = bp->attn_state;
3200
3201         /* look for changed bits */
3202         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3203         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3204
3205         DP(NETIF_MSG_HW,
3206            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3207            attn_bits, attn_ack, asserted, deasserted);
3208
3209         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3210                 BNX2X_ERR("BAD attention state\n");
3211
3212         /* handle bits that were raised */
3213         if (asserted)
3214                 bnx2x_attn_int_asserted(bp, asserted);
3215
3216         if (deasserted)
3217                 bnx2x_attn_int_deasserted(bp, deasserted);
3218 }
3219
3220 static void bnx2x_sp_task(struct work_struct *work)
3221 {
3222         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3223         u16 status;
3224
3225
3226         /* Return here if interrupt is disabled */
3227         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3228                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3229                 return;
3230         }
3231
3232         status = bnx2x_update_dsb_idx(bp);
3233 /*      if (status == 0)                                     */
3234 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3235
3236         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3237
3238         /* HW attentions */
3239         if (status & 0x1)
3240                 bnx2x_attn_int(bp);
3241
3242         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3243                      IGU_INT_NOP, 1);
3244         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3245                      IGU_INT_NOP, 1);
3246         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3247                      IGU_INT_NOP, 1);
3248         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3249                      IGU_INT_NOP, 1);
3250         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3251                      IGU_INT_ENABLE, 1);
3252
3253 }
3254
3255 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3256 {
3257         struct net_device *dev = dev_instance;
3258         struct bnx2x *bp = netdev_priv(dev);
3259
3260         /* Return here if interrupt is disabled */
3261         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3262                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3263                 return IRQ_HANDLED;
3264         }
3265
3266         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3267
3268 #ifdef BNX2X_STOP_ON_ERROR
3269         if (unlikely(bp->panic))
3270                 return IRQ_HANDLED;
3271 #endif
3272
3273 #ifdef BCM_CNIC
3274         {
3275                 struct cnic_ops *c_ops;
3276
3277                 rcu_read_lock();
3278                 c_ops = rcu_dereference(bp->cnic_ops);
3279                 if (c_ops)
3280                         c_ops->cnic_handler(bp->cnic_data, NULL);
3281                 rcu_read_unlock();
3282         }
3283 #endif
3284         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3285
3286         return IRQ_HANDLED;
3287 }
3288
3289 /* end of slow path */
3290
3291 /* Statistics */
3292
3293 /****************************************************************************
3294 * Macros
3295 ****************************************************************************/
3296
3297 /* sum[hi:lo] += add[hi:lo] */
3298 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3299         do { \
3300                 s_lo += a_lo; \
3301                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3302         } while (0)
3303
3304 /* difference = minuend - subtrahend */
3305 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3306         do { \
3307                 if (m_lo < s_lo) { \
3308                         /* underflow */ \
3309                         d_hi = m_hi - s_hi; \
3310                         if (d_hi > 0) { \
3311                                 /* we can 'loan' 1 */ \
3312                                 d_hi--; \
3313                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3314                         } else { \
3315                                 /* m_hi <= s_hi */ \
3316                                 d_hi = 0; \
3317                                 d_lo = 0; \
3318                         } \
3319                 } else { \
3320                         /* m_lo >= s_lo */ \
3321                         if (m_hi < s_hi) { \
3322                                 d_hi = 0; \
3323                                 d_lo = 0; \
3324                         } else { \
3325                                 /* m_hi >= s_hi */ \
3326                                 d_hi = m_hi - s_hi; \
3327                                 d_lo = m_lo - s_lo; \
3328                         } \
3329                 } \
3330         } while (0)
3331
3332 #define UPDATE_STAT64(s, t) \
3333         do { \
3334                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3335                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3336                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3337                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3338                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3339                        pstats->mac_stx[1].t##_lo, diff.lo); \
3340         } while (0)
3341
3342 #define UPDATE_STAT64_NIG(s, t) \
3343         do { \
3344                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3345                         diff.lo, new->s##_lo, old->s##_lo); \
3346                 ADD_64(estats->t##_hi, diff.hi, \
3347                        estats->t##_lo, diff.lo); \
3348         } while (0)
3349
3350 /* sum[hi:lo] += add */
3351 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3352         do { \
3353                 s_lo += a; \
3354                 s_hi += (s_lo < a) ? 1 : 0; \
3355         } while (0)
3356
3357 #define UPDATE_EXTEND_STAT(s) \
3358         do { \
3359                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3360                               pstats->mac_stx[1].s##_lo, \
3361                               new->s); \
3362         } while (0)
3363
3364 #define UPDATE_EXTEND_TSTAT(s, t) \
3365         do { \
3366                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3367                 old_tclient->s = tclient->s; \
3368                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3369         } while (0)
3370
3371 #define UPDATE_EXTEND_USTAT(s, t) \
3372         do { \
3373                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3374                 old_uclient->s = uclient->s; \
3375                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3376         } while (0)
3377
3378 #define UPDATE_EXTEND_XSTAT(s, t) \
3379         do { \
3380                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3381                 old_xclient->s = xclient->s; \
3382                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3383         } while (0)
3384
3385 /* minuend -= subtrahend */
3386 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3387         do { \
3388                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3389         } while (0)
3390
3391 /* minuend[hi:lo] -= subtrahend */
3392 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3393         do { \
3394                 SUB_64(m_hi, 0, m_lo, s); \
3395         } while (0)
3396
3397 #define SUB_EXTEND_USTAT(s, t) \
3398         do { \
3399                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3400                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3401         } while (0)
3402
3403 /*
3404  * General service functions
3405  */
3406
3407 static inline long bnx2x_hilo(u32 *hiref)
3408 {
3409         u32 lo = *(hiref + 1);
3410 #if (BITS_PER_LONG == 64)
3411         u32 hi = *hiref;
3412
3413         return HILO_U64(hi, lo);
3414 #else
3415         return lo;
3416 #endif
3417 }
3418
3419 /*
3420  * Init service functions
3421  */
3422
3423 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3424 {
3425         if (!bp->stats_pending) {
3426                 struct eth_query_ramrod_data ramrod_data = {0};
3427                 int i, rc;
3428
3429                 ramrod_data.drv_counter = bp->stats_counter++;
3430                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3431                 for_each_queue(bp, i)
3432                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3433
3434                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3435                                    ((u32 *)&ramrod_data)[1],
3436                                    ((u32 *)&ramrod_data)[0], 0);
3437                 if (rc == 0) {
3438                         /* stats ramrod has it's own slot on the spq */
3439                         bp->spq_left++;
3440                         bp->stats_pending = 1;
3441                 }
3442         }
3443 }
3444
3445 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3446 {
3447         struct dmae_command *dmae = &bp->stats_dmae;
3448         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3449
3450         *stats_comp = DMAE_COMP_VAL;
3451         if (CHIP_REV_IS_SLOW(bp))
3452                 return;
3453
3454         /* loader */
3455         if (bp->executer_idx) {
3456                 int loader_idx = PMF_DMAE_C(bp);
3457
3458                 memset(dmae, 0, sizeof(struct dmae_command));
3459
3460                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3461                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3462                                 DMAE_CMD_DST_RESET |
3463 #ifdef __BIG_ENDIAN
3464                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3465 #else
3466                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3467 #endif
3468                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3469                                                DMAE_CMD_PORT_0) |
3470                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3471                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3472                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3473                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3474                                      sizeof(struct dmae_command) *
3475                                      (loader_idx + 1)) >> 2;
3476                 dmae->dst_addr_hi = 0;
3477                 dmae->len = sizeof(struct dmae_command) >> 2;
3478                 if (CHIP_IS_E1(bp))
3479                         dmae->len--;
3480                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3481                 dmae->comp_addr_hi = 0;
3482                 dmae->comp_val = 1;
3483
3484                 *stats_comp = 0;
3485                 bnx2x_post_dmae(bp, dmae, loader_idx);
3486
3487         } else if (bp->func_stx) {
3488                 *stats_comp = 0;
3489                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3490         }
3491 }
3492
3493 static int bnx2x_stats_comp(struct bnx2x *bp)
3494 {
3495         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3496         int cnt = 10;
3497
3498         might_sleep();
3499         while (*stats_comp != DMAE_COMP_VAL) {
3500                 if (!cnt) {
3501                         BNX2X_ERR("timeout waiting for stats finished\n");
3502                         break;
3503                 }
3504                 cnt--;
3505                 msleep(1);
3506         }
3507         return 1;
3508 }
3509
3510 /*
3511  * Statistics service functions
3512  */
3513
3514 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3515 {
3516         struct dmae_command *dmae;
3517         u32 opcode;
3518         int loader_idx = PMF_DMAE_C(bp);
3519         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3520
3521         /* sanity */
3522         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3523                 BNX2X_ERR("BUG!\n");
3524                 return;
3525         }
3526
3527         bp->executer_idx = 0;
3528
3529         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3530                   DMAE_CMD_C_ENABLE |
3531                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3532 #ifdef __BIG_ENDIAN
3533                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3534 #else
3535                   DMAE_CMD_ENDIANITY_DW_SWAP |
3536 #endif
3537                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3538                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3539
3540         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3541         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3542         dmae->src_addr_lo = bp->port.port_stx >> 2;
3543         dmae->src_addr_hi = 0;
3544         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3545         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3546         dmae->len = DMAE_LEN32_RD_MAX;
3547         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3548         dmae->comp_addr_hi = 0;
3549         dmae->comp_val = 1;
3550
3551         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3552         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3553         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3554         dmae->src_addr_hi = 0;
3555         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3556                                    DMAE_LEN32_RD_MAX * 4);
3557         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3558                                    DMAE_LEN32_RD_MAX * 4);
3559         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3560         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3561         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3562         dmae->comp_val = DMAE_COMP_VAL;
3563
3564         *stats_comp = 0;
3565         bnx2x_hw_stats_post(bp);
3566         bnx2x_stats_comp(bp);
3567 }
3568
3569 static void bnx2x_port_stats_init(struct bnx2x *bp)
3570 {
3571         struct dmae_command *dmae;
3572         int port = BP_PORT(bp);
3573         int vn = BP_E1HVN(bp);
3574         u32 opcode;
3575         int loader_idx = PMF_DMAE_C(bp);
3576         u32 mac_addr;
3577         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3578
3579         /* sanity */
3580         if (!bp->link_vars.link_up || !bp->port.pmf) {
3581                 BNX2X_ERR("BUG!\n");
3582                 return;
3583         }
3584
3585         bp->executer_idx = 0;
3586
3587         /* MCP */
3588         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3589                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3591 #ifdef __BIG_ENDIAN
3592                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3593 #else
3594                   DMAE_CMD_ENDIANITY_DW_SWAP |
3595 #endif
3596                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597                   (vn << DMAE_CMD_E1HVN_SHIFT));
3598
3599         if (bp->port.port_stx) {
3600
3601                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3602                 dmae->opcode = opcode;
3603                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3604                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3605                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3606                 dmae->dst_addr_hi = 0;
3607                 dmae->len = sizeof(struct host_port_stats) >> 2;
3608                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3609                 dmae->comp_addr_hi = 0;
3610                 dmae->comp_val = 1;
3611         }
3612
3613         if (bp->func_stx) {
3614
3615                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3616                 dmae->opcode = opcode;
3617                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3618                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3619                 dmae->dst_addr_lo = bp->func_stx >> 2;
3620                 dmae->dst_addr_hi = 0;
3621                 dmae->len = sizeof(struct host_func_stats) >> 2;
3622                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3623                 dmae->comp_addr_hi = 0;
3624                 dmae->comp_val = 1;
3625         }
3626
3627         /* MAC */
3628         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3629                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3630                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3631 #ifdef __BIG_ENDIAN
3632                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3633 #else
3634                   DMAE_CMD_ENDIANITY_DW_SWAP |
3635 #endif
3636                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3637                   (vn << DMAE_CMD_E1HVN_SHIFT));
3638
3639         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3640
3641                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3642                                    NIG_REG_INGRESS_BMAC0_MEM);
3643
3644                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3645                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3646                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647                 dmae->opcode = opcode;
3648                 dmae->src_addr_lo = (mac_addr +
3649                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650                 dmae->src_addr_hi = 0;
3651                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3652                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3653                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3654                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3655                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3656                 dmae->comp_addr_hi = 0;
3657                 dmae->comp_val = 1;
3658
3659                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3660                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3661                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3662                 dmae->opcode = opcode;
3663                 dmae->src_addr_lo = (mac_addr +
3664                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3665                 dmae->src_addr_hi = 0;
3666                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3667                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3668                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3669                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3670                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3671                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3673                 dmae->comp_addr_hi = 0;
3674                 dmae->comp_val = 1;
3675
3676         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3677
3678                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3679
3680                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3681                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682                 dmae->opcode = opcode;
3683                 dmae->src_addr_lo = (mac_addr +
3684                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3685                 dmae->src_addr_hi = 0;
3686                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3687                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3688                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3689                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690                 dmae->comp_addr_hi = 0;
3691                 dmae->comp_val = 1;
3692
3693                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3694                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695                 dmae->opcode = opcode;
3696                 dmae->src_addr_lo = (mac_addr +
3697                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3698                 dmae->src_addr_hi = 0;
3699                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3700                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3701                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3702                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3703                 dmae->len = 1;
3704                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705                 dmae->comp_addr_hi = 0;
3706                 dmae->comp_val = 1;
3707
3708                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3709                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710                 dmae->opcode = opcode;
3711                 dmae->src_addr_lo = (mac_addr +
3712                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3713                 dmae->src_addr_hi = 0;
3714                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3715                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3716                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3717                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3718                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3719                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720                 dmae->comp_addr_hi = 0;
3721                 dmae->comp_val = 1;
3722         }
3723
3724         /* NIG */
3725         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3726         dmae->opcode = opcode;
3727         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3728                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3729         dmae->src_addr_hi = 0;
3730         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3731         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3732         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3733         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3734         dmae->comp_addr_hi = 0;
3735         dmae->comp_val = 1;
3736
3737         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3738         dmae->opcode = opcode;
3739         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3740                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3741         dmae->src_addr_hi = 0;
3742         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3743                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3744         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3745                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3746         dmae->len = (2*sizeof(u32)) >> 2;
3747         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3748         dmae->comp_addr_hi = 0;
3749         dmae->comp_val = 1;
3750
3751         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3752         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3753                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3754                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3755 #ifdef __BIG_ENDIAN
3756                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3757 #else
3758                         DMAE_CMD_ENDIANITY_DW_SWAP |
3759 #endif
3760                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3761                         (vn << DMAE_CMD_E1HVN_SHIFT));
3762         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3763                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3764         dmae->src_addr_hi = 0;
3765         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3766                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3767         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3768                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3769         dmae->len = (2*sizeof(u32)) >> 2;
3770         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3771         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3772         dmae->comp_val = DMAE_COMP_VAL;
3773
3774         *stats_comp = 0;
3775 }
3776
3777 static void bnx2x_func_stats_init(struct bnx2x *bp)
3778 {
3779         struct dmae_command *dmae = &bp->stats_dmae;
3780         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3781
3782         /* sanity */
3783         if (!bp->func_stx) {
3784                 BNX2X_ERR("BUG!\n");
3785                 return;
3786         }
3787
3788         bp->executer_idx = 0;
3789         memset(dmae, 0, sizeof(struct dmae_command));
3790
3791         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3792                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3793                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3794 #ifdef __BIG_ENDIAN
3795                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3796 #else
3797                         DMAE_CMD_ENDIANITY_DW_SWAP |
3798 #endif
3799                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3800                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3801         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3802         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3803         dmae->dst_addr_lo = bp->func_stx >> 2;
3804         dmae->dst_addr_hi = 0;
3805         dmae->len = sizeof(struct host_func_stats) >> 2;
3806         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3807         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3808         dmae->comp_val = DMAE_COMP_VAL;
3809
3810         *stats_comp = 0;
3811 }
3812
3813 static void bnx2x_stats_start(struct bnx2x *bp)
3814 {
3815         if (bp->port.pmf)
3816                 bnx2x_port_stats_init(bp);
3817
3818         else if (bp->func_stx)
3819                 bnx2x_func_stats_init(bp);
3820
3821         bnx2x_hw_stats_post(bp);
3822         bnx2x_storm_stats_post(bp);
3823 }
3824
3825 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3826 {
3827         bnx2x_stats_comp(bp);
3828         bnx2x_stats_pmf_update(bp);
3829         bnx2x_stats_start(bp);
3830 }
3831
3832 static void bnx2x_stats_restart(struct bnx2x *bp)
3833 {
3834         bnx2x_stats_comp(bp);
3835         bnx2x_stats_start(bp);
3836 }
3837
3838 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3839 {
3840         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3841         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3842         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3843         struct {
3844                 u32 lo;
3845                 u32 hi;
3846         } diff;
3847
3848         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3849         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3850         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3851         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3852         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3853         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3854         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3855         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3856         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3857         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3858         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3859         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3860         UPDATE_STAT64(tx_stat_gt127,
3861                                 tx_stat_etherstatspkts65octetsto127octets);
3862         UPDATE_STAT64(tx_stat_gt255,
3863                                 tx_stat_etherstatspkts128octetsto255octets);
3864         UPDATE_STAT64(tx_stat_gt511,
3865                                 tx_stat_etherstatspkts256octetsto511octets);
3866         UPDATE_STAT64(tx_stat_gt1023,
3867                                 tx_stat_etherstatspkts512octetsto1023octets);
3868         UPDATE_STAT64(tx_stat_gt1518,
3869                                 tx_stat_etherstatspkts1024octetsto1522octets);
3870         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3871         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3872         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3873         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3874         UPDATE_STAT64(tx_stat_gterr,
3875                                 tx_stat_dot3statsinternalmactransmiterrors);
3876         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3877
3878         estats->pause_frames_received_hi =
3879                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3880         estats->pause_frames_received_lo =
3881                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3882
3883         estats->pause_frames_sent_hi =
3884                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3885         estats->pause_frames_sent_lo =
3886                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3887 }
3888
3889 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3890 {
3891         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3892         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3893         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3894
3895         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3896         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3897         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3898         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3899         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3900         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3901         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3902         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3903         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3904         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3905         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3906         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3907         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3908         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3909         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3910         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3911         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3912         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3913         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3914         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3915         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3916         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3917         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3918         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3919         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3920         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3921         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3922         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3923         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3924         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3925         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3926
3927         estats->pause_frames_received_hi =
3928                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3929         estats->pause_frames_received_lo =
3930                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3931         ADD_64(estats->pause_frames_received_hi,
3932                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3933                estats->pause_frames_received_lo,
3934                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3935
3936         estats->pause_frames_sent_hi =
3937                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3938         estats->pause_frames_sent_lo =
3939                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3940         ADD_64(estats->pause_frames_sent_hi,
3941                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3942                estats->pause_frames_sent_lo,
3943                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3944 }
3945
3946 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3947 {
3948         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3949         struct nig_stats *old = &(bp->port.old_nig_stats);
3950         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3951         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3952         struct {
3953                 u32 lo;
3954                 u32 hi;
3955         } diff;
3956         u32 nig_timer_max;
3957
3958         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3959                 bnx2x_bmac_stats_update(bp);
3960
3961         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3962                 bnx2x_emac_stats_update(bp);
3963
3964         else { /* unreached */
3965                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3966                 return -1;
3967         }
3968
3969         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3970                       new->brb_discard - old->brb_discard);
3971         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3972                       new->brb_truncate - old->brb_truncate);
3973
3974         UPDATE_STAT64_NIG(egress_mac_pkt0,
3975                                         etherstatspkts1024octetsto1522octets);
3976         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3977
3978         memcpy(old, new, sizeof(struct nig_stats));
3979
3980         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3981                sizeof(struct mac_stx));
3982         estats->brb_drop_hi = pstats->brb_drop_hi;
3983         estats->brb_drop_lo = pstats->brb_drop_lo;
3984
3985         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3986
3987         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3988         if (nig_timer_max != estats->nig_timer_max) {
3989                 estats->nig_timer_max = nig_timer_max;
3990                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3991         }
3992
3993         return 0;
3994 }
3995
3996 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3997 {
3998         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3999         struct tstorm_per_port_stats *tport =
4000                                         &stats->tstorm_common.port_statistics;
4001         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4002         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4003         int i;
4004
4005         memcpy(&(fstats->total_bytes_received_hi),
4006                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4007                sizeof(struct host_func_stats) - 2*sizeof(u32));
4008         estats->error_bytes_received_hi = 0;
4009         estats->error_bytes_received_lo = 0;
4010         estats->etherstatsoverrsizepkts_hi = 0;
4011         estats->etherstatsoverrsizepkts_lo = 0;
4012         estats->no_buff_discard_hi = 0;
4013         estats->no_buff_discard_lo = 0;
4014
4015         for_each_rx_queue(bp, i) {
4016                 struct bnx2x_fastpath *fp = &bp->fp[i];
4017                 int cl_id = fp->cl_id;
4018                 struct tstorm_per_client_stats *tclient =
4019                                 &stats->tstorm_common.client_statistics[cl_id];
4020                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4021                 struct ustorm_per_client_stats *uclient =
4022                                 &stats->ustorm_common.client_statistics[cl_id];
4023                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4024                 struct xstorm_per_client_stats *xclient =
4025                                 &stats->xstorm_common.client_statistics[cl_id];
4026                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4027                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4028                 u32 diff;
4029
4030                 /* are storm stats valid? */
4031                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4032                                                         bp->stats_counter) {
4033                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4034                            "  xstorm counter (%d) != stats_counter (%d)\n",
4035                            i, xclient->stats_counter, bp->stats_counter);
4036                         return -1;
4037                 }
4038                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4039                                                         bp->stats_counter) {
4040                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4041                            "  tstorm counter (%d) != stats_counter (%d)\n",
4042                            i, tclient->stats_counter, bp->stats_counter);
4043                         return -2;
4044                 }
4045                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4046                                                         bp->stats_counter) {
4047                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4048                            "  ustorm counter (%d) != stats_counter (%d)\n",
4049                            i, uclient->stats_counter, bp->stats_counter);
4050                         return -4;
4051                 }
4052
4053                 qstats->total_bytes_received_hi =
4054                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4055                 qstats->total_bytes_received_lo =
4056                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4057
4058                 ADD_64(qstats->total_bytes_received_hi,
4059                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4060                        qstats->total_bytes_received_lo,
4061                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4062
4063                 ADD_64(qstats->total_bytes_received_hi,
4064                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4065                        qstats->total_bytes_received_lo,
4066                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4067
4068                 qstats->valid_bytes_received_hi =
4069                                         qstats->total_bytes_received_hi;
4070                 qstats->valid_bytes_received_lo =
4071                                         qstats->total_bytes_received_lo;
4072
4073                 qstats->error_bytes_received_hi =
4074                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4075                 qstats->error_bytes_received_lo =
4076                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4077
4078                 ADD_64(qstats->total_bytes_received_hi,
4079                        qstats->error_bytes_received_hi,
4080                        qstats->total_bytes_received_lo,
4081                        qstats->error_bytes_received_lo);
4082
4083                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4084                                         total_unicast_packets_received);
4085                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4086                                         total_multicast_packets_received);
4087                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4088                                         total_broadcast_packets_received);
4089                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4090                                         etherstatsoverrsizepkts);
4091                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4092
4093                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4094                                         total_unicast_packets_received);
4095                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4096                                         total_multicast_packets_received);
4097                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4098                                         total_broadcast_packets_received);
4099                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4100                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4101                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4102
4103                 qstats->total_bytes_transmitted_hi =
4104                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4105                 qstats->total_bytes_transmitted_lo =
4106                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4107
4108                 ADD_64(qstats->total_bytes_transmitted_hi,
4109                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4110                        qstats->total_bytes_transmitted_lo,
4111                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4112
4113                 ADD_64(qstats->total_bytes_transmitted_hi,
4114                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4115                        qstats->total_bytes_transmitted_lo,
4116                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4117
4118                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4119                                         total_unicast_packets_transmitted);
4120                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4121                                         total_multicast_packets_transmitted);
4122                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4123                                         total_broadcast_packets_transmitted);
4124
4125                 old_tclient->checksum_discard = tclient->checksum_discard;
4126                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4127
4128                 ADD_64(fstats->total_bytes_received_hi,
4129                        qstats->total_bytes_received_hi,
4130                        fstats->total_bytes_received_lo,
4131                        qstats->total_bytes_received_lo);
4132                 ADD_64(fstats->total_bytes_transmitted_hi,
4133                        qstats->total_bytes_transmitted_hi,
4134                        fstats->total_bytes_transmitted_lo,
4135                        qstats->total_bytes_transmitted_lo);
4136                 ADD_64(fstats->total_unicast_packets_received_hi,
4137                        qstats->total_unicast_packets_received_hi,
4138                        fstats->total_unicast_packets_received_lo,
4139                        qstats->total_unicast_packets_received_lo);
4140                 ADD_64(fstats->total_multicast_packets_received_hi,
4141                        qstats->total_multicast_packets_received_hi,
4142                        fstats->total_multicast_packets_received_lo,
4143                        qstats->total_multicast_packets_received_lo);
4144                 ADD_64(fstats->total_broadcast_packets_received_hi,
4145                        qstats->total_broadcast_packets_received_hi,
4146                        fstats->total_broadcast_packets_received_lo,
4147                        qstats->total_broadcast_packets_received_lo);
4148                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4149                        qstats->total_unicast_packets_transmitted_hi,
4150                        fstats->total_unicast_packets_transmitted_lo,
4151                        qstats->total_unicast_packets_transmitted_lo);
4152                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4153                        qstats->total_multicast_packets_transmitted_hi,
4154                        fstats->total_multicast_packets_transmitted_lo,
4155                        qstats->total_multicast_packets_transmitted_lo);
4156                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4157                        qstats->total_broadcast_packets_transmitted_hi,
4158                        fstats->total_broadcast_packets_transmitted_lo,
4159                        qstats->total_broadcast_packets_transmitted_lo);
4160                 ADD_64(fstats->valid_bytes_received_hi,
4161                        qstats->valid_bytes_received_hi,
4162                        fstats->valid_bytes_received_lo,
4163                        qstats->valid_bytes_received_lo);
4164
4165                 ADD_64(estats->error_bytes_received_hi,
4166                        qstats->error_bytes_received_hi,
4167                        estats->error_bytes_received_lo,
4168                        qstats->error_bytes_received_lo);
4169                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170                        qstats->etherstatsoverrsizepkts_hi,
4171                        estats->etherstatsoverrsizepkts_lo,
4172                        qstats->etherstatsoverrsizepkts_lo);
4173                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4174                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4175         }
4176
4177         ADD_64(fstats->total_bytes_received_hi,
4178                estats->rx_stat_ifhcinbadoctets_hi,
4179                fstats->total_bytes_received_lo,
4180                estats->rx_stat_ifhcinbadoctets_lo);
4181
4182         memcpy(estats, &(fstats->total_bytes_received_hi),
4183                sizeof(struct host_func_stats) - 2*sizeof(u32));
4184
4185         ADD_64(estats->etherstatsoverrsizepkts_hi,
4186                estats->rx_stat_dot3statsframestoolong_hi,
4187                estats->etherstatsoverrsizepkts_lo,
4188                estats->rx_stat_dot3statsframestoolong_lo);
4189         ADD_64(estats->error_bytes_received_hi,
4190                estats->rx_stat_ifhcinbadoctets_hi,
4191                estats->error_bytes_received_lo,
4192                estats->rx_stat_ifhcinbadoctets_lo);
4193
4194         if (bp->port.pmf) {
4195                 estats->mac_filter_discard =
4196                                 le32_to_cpu(tport->mac_filter_discard);
4197                 estats->xxoverflow_discard =
4198                                 le32_to_cpu(tport->xxoverflow_discard);
4199                 estats->brb_truncate_discard =
4200                                 le32_to_cpu(tport->brb_truncate_discard);
4201                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4202         }
4203
4204         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4205
4206         bp->stats_pending = 0;
4207
4208         return 0;
4209 }
4210
4211 static void bnx2x_net_stats_update(struct bnx2x *bp)
4212 {
4213         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4214         struct net_device_stats *nstats = &bp->dev->stats;
4215         int i;
4216
4217         nstats->rx_packets =
4218                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4219                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4220                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4221
4222         nstats->tx_packets =
4223                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4224                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4225                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4226
4227         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4228
4229         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4230
4231         nstats->rx_dropped = estats->mac_discard;
4232         for_each_rx_queue(bp, i)
4233                 nstats->rx_dropped +=
4234                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4235
4236         nstats->tx_dropped = 0;
4237
4238         nstats->multicast =
4239                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4240
4241         nstats->collisions =
4242                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4243
4244         nstats->rx_length_errors =
4245                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4246                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4247         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4248                                  bnx2x_hilo(&estats->brb_truncate_hi);
4249         nstats->rx_crc_errors =
4250                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4251         nstats->rx_frame_errors =
4252                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4253         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4254         nstats->rx_missed_errors = estats->xxoverflow_discard;
4255
4256         nstats->rx_errors = nstats->rx_length_errors +
4257                             nstats->rx_over_errors +
4258                             nstats->rx_crc_errors +
4259                             nstats->rx_frame_errors +
4260                             nstats->rx_fifo_errors +
4261                             nstats->rx_missed_errors;
4262
4263         nstats->tx_aborted_errors =
4264                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4265                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4266         nstats->tx_carrier_errors =
4267                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4268         nstats->tx_fifo_errors = 0;
4269         nstats->tx_heartbeat_errors = 0;
4270         nstats->tx_window_errors = 0;
4271
4272         nstats->tx_errors = nstats->tx_aborted_errors +
4273                             nstats->tx_carrier_errors +
4274             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4275 }
4276
4277 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4278 {
4279         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4280         int i;
4281
4282         estats->driver_xoff = 0;
4283         estats->rx_err_discard_pkt = 0;
4284         estats->rx_skb_alloc_failed = 0;
4285         estats->hw_csum_err = 0;
4286         for_each_rx_queue(bp, i) {
4287                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4288
4289                 estats->driver_xoff += qstats->driver_xoff;
4290                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4291                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4292                 estats->hw_csum_err += qstats->hw_csum_err;
4293         }
4294 }
4295
4296 static void bnx2x_stats_update(struct bnx2x *bp)
4297 {
4298         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4299
4300         if (*stats_comp != DMAE_COMP_VAL)
4301                 return;
4302
4303         if (bp->port.pmf)
4304                 bnx2x_hw_stats_update(bp);
4305
4306         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4307                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4308                 bnx2x_panic();
4309                 return;
4310         }
4311
4312         bnx2x_net_stats_update(bp);
4313         bnx2x_drv_stats_update(bp);
4314
4315         if (bp->msglevel & NETIF_MSG_TIMER) {
4316                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4317                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4318                 struct tstorm_per_client_stats *old_tclient =
4319                                                         &bp->fp->old_tclient;
4320                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4321                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4322                 struct net_device_stats *nstats = &bp->dev->stats;
4323                 int i;
4324
4325                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4326                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4327                                   "  tx pkt (%lx)\n",
4328                        bnx2x_tx_avail(fp0_tx),
4329                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4330                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4331                                   "  rx pkt (%lx)\n",
4332                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4333                              fp0_rx->rx_comp_cons),
4334                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4335                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4336                                   "brb truncate %u\n",
4337                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4338                        qstats->driver_xoff,
4339                        estats->brb_drop_lo, estats->brb_truncate_lo);
4340                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4341                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4342                         "mac_discard %u  mac_filter_discard %u  "
4343                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4344                         "ttl0_discard %u\n",
4345                        le32_to_cpu(old_tclient->checksum_discard),
4346                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4347                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4348                        estats->mac_discard, estats->mac_filter_discard,
4349                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4350                        le32_to_cpu(old_tclient->ttl0_discard));
4351
4352                 for_each_queue(bp, i) {
4353                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4354                                bnx2x_fp(bp, i, tx_pkt),
4355                                bnx2x_fp(bp, i, rx_pkt),
4356                                bnx2x_fp(bp, i, rx_calls));
4357                 }
4358         }
4359
4360         bnx2x_hw_stats_post(bp);
4361         bnx2x_storm_stats_post(bp);
4362 }
4363
4364 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4365 {
4366         struct dmae_command *dmae;
4367         u32 opcode;
4368         int loader_idx = PMF_DMAE_C(bp);
4369         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4370
4371         bp->executer_idx = 0;
4372
4373         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4374                   DMAE_CMD_C_ENABLE |
4375                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4376 #ifdef __BIG_ENDIAN
4377                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4378 #else
4379                   DMAE_CMD_ENDIANITY_DW_SWAP |
4380 #endif
4381                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4382                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4383
4384         if (bp->port.port_stx) {
4385
4386                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4387                 if (bp->func_stx)
4388                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4389                 else
4390                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4391                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4392                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4393                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4394                 dmae->dst_addr_hi = 0;
4395                 dmae->len = sizeof(struct host_port_stats) >> 2;
4396                 if (bp->func_stx) {
4397                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4398                         dmae->comp_addr_hi = 0;
4399                         dmae->comp_val = 1;
4400                 } else {
4401                         dmae->comp_addr_lo =
4402                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403                         dmae->comp_addr_hi =
4404                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4405                         dmae->comp_val = DMAE_COMP_VAL;
4406
4407                         *stats_comp = 0;
4408                 }
4409         }
4410
4411         if (bp->func_stx) {
4412
4413                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4414                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4415                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4416                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4417                 dmae->dst_addr_lo = bp->func_stx >> 2;
4418                 dmae->dst_addr_hi = 0;
4419                 dmae->len = sizeof(struct host_func_stats) >> 2;
4420                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4421                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4422                 dmae->comp_val = DMAE_COMP_VAL;
4423
4424                 *stats_comp = 0;
4425         }
4426 }
4427
4428 static void bnx2x_stats_stop(struct bnx2x *bp)
4429 {
4430         int update = 0;
4431
4432         bnx2x_stats_comp(bp);
4433
4434         if (bp->port.pmf)
4435                 update = (bnx2x_hw_stats_update(bp) == 0);
4436
4437         update |= (bnx2x_storm_stats_update(bp) == 0);
4438
4439         if (update) {
4440                 bnx2x_net_stats_update(bp);
4441
4442                 if (bp->port.pmf)
4443                         bnx2x_port_stats_stop(bp);
4444
4445                 bnx2x_hw_stats_post(bp);
4446                 bnx2x_stats_comp(bp);
4447         }
4448 }
4449
4450 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4451 {
4452 }
4453
4454 static const struct {
4455         void (*action)(struct bnx2x *bp);
4456         enum bnx2x_stats_state next_state;
4457 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4458 /* state        event   */
4459 {
4460 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4461 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4462 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4463 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4464 },
4465 {
4466 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4467 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4468 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4469 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4470 }
4471 };
4472
4473 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4474 {
4475         enum bnx2x_stats_state state = bp->stats_state;
4476
4477         bnx2x_stats_stm[state][event].action(bp);
4478         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4479
4480         /* Make sure the state has been "changed" */
4481         smp_wmb();
4482
4483         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4484                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4485                    state, event, bp->stats_state);
4486 }
4487
4488 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4489 {
4490         struct dmae_command *dmae;
4491         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4492
4493         /* sanity */
4494         if (!bp->port.pmf || !bp->port.port_stx) {
4495                 BNX2X_ERR("BUG!\n");
4496                 return;
4497         }
4498
4499         bp->executer_idx = 0;
4500
4501         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4502         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4503                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4504                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4505 #ifdef __BIG_ENDIAN
4506                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4507 #else
4508                         DMAE_CMD_ENDIANITY_DW_SWAP |
4509 #endif
4510                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4511                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4512         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4513         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4514         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4515         dmae->dst_addr_hi = 0;
4516         dmae->len = sizeof(struct host_port_stats) >> 2;
4517         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4518         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4519         dmae->comp_val = DMAE_COMP_VAL;
4520
4521         *stats_comp = 0;
4522         bnx2x_hw_stats_post(bp);
4523         bnx2x_stats_comp(bp);
4524 }
4525
4526 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4527 {
4528         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4529         int port = BP_PORT(bp);
4530         int func;
4531         u32 func_stx;
4532
4533         /* sanity */
4534         if (!bp->port.pmf || !bp->func_stx) {
4535                 BNX2X_ERR("BUG!\n");
4536                 return;
4537         }
4538
4539         /* save our func_stx */
4540         func_stx = bp->func_stx;
4541
4542         for (vn = VN_0; vn < vn_max; vn++) {
4543                 func = 2*vn + port;
4544
4545                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4546                 bnx2x_func_stats_init(bp);
4547                 bnx2x_hw_stats_post(bp);
4548                 bnx2x_stats_comp(bp);
4549         }
4550
4551         /* restore our func_stx */
4552         bp->func_stx = func_stx;
4553 }
4554
4555 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4556 {
4557         struct dmae_command *dmae = &bp->stats_dmae;
4558         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4559
4560         /* sanity */
4561         if (!bp->func_stx) {
4562                 BNX2X_ERR("BUG!\n");
4563                 return;
4564         }
4565
4566         bp->executer_idx = 0;
4567         memset(dmae, 0, sizeof(struct dmae_command));
4568
4569         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4570                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4571                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4572 #ifdef __BIG_ENDIAN
4573                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4574 #else
4575                         DMAE_CMD_ENDIANITY_DW_SWAP |
4576 #endif
4577                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4578                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4579         dmae->src_addr_lo = bp->func_stx >> 2;
4580         dmae->src_addr_hi = 0;
4581         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4582         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4583         dmae->len = sizeof(struct host_func_stats) >> 2;
4584         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4585         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4586         dmae->comp_val = DMAE_COMP_VAL;
4587
4588         *stats_comp = 0;
4589         bnx2x_hw_stats_post(bp);
4590         bnx2x_stats_comp(bp);
4591 }
4592
4593 static void bnx2x_stats_init(struct bnx2x *bp)
4594 {
4595         int port = BP_PORT(bp);
4596         int func = BP_FUNC(bp);
4597         int i;
4598
4599         bp->stats_pending = 0;
4600         bp->executer_idx = 0;
4601         bp->stats_counter = 0;
4602
4603         /* port and func stats for management */
4604         if (!BP_NOMCP(bp)) {
4605                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4606                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4607
4608         } else {
4609                 bp->port.port_stx = 0;
4610                 bp->func_stx = 0;
4611         }
4612         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4613            bp->port.port_stx, bp->func_stx);
4614
4615         /* port stats */
4616         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4617         bp->port.old_nig_stats.brb_discard =
4618                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4619         bp->port.old_nig_stats.brb_truncate =
4620                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4621         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4622                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4623         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4624                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4625
4626         /* function stats */
4627         for_each_queue(bp, i) {
4628                 struct bnx2x_fastpath *fp = &bp->fp[i];
4629
4630                 memset(&fp->old_tclient, 0,
4631                        sizeof(struct tstorm_per_client_stats));
4632                 memset(&fp->old_uclient, 0,
4633                        sizeof(struct ustorm_per_client_stats));
4634                 memset(&fp->old_xclient, 0,
4635                        sizeof(struct xstorm_per_client_stats));
4636                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4637         }
4638
4639         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4640         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4641
4642         bp->stats_state = STATS_STATE_DISABLED;
4643
4644         if (bp->port.pmf) {
4645                 if (bp->port.port_stx)
4646                         bnx2x_port_stats_base_init(bp);
4647
4648                 if (bp->func_stx)
4649                         bnx2x_func_stats_base_init(bp);
4650
4651         } else if (bp->func_stx)
4652                 bnx2x_func_stats_base_update(bp);
4653 }
4654
4655 static void bnx2x_timer(unsigned long data)
4656 {
4657         struct bnx2x *bp = (struct bnx2x *) data;
4658
4659         if (!netif_running(bp->dev))
4660                 return;
4661
4662         if (atomic_read(&bp->intr_sem) != 0)
4663                 goto timer_restart;
4664
4665         if (poll) {
4666                 struct bnx2x_fastpath *fp = &bp->fp[0];
4667                 int rc;
4668
4669                 bnx2x_tx_int(fp);
4670                 rc = bnx2x_rx_int(fp, 1000);
4671         }
4672
4673         if (!BP_NOMCP(bp)) {
4674                 int func = BP_FUNC(bp);
4675                 u32 drv_pulse;
4676                 u32 mcp_pulse;
4677
4678                 ++bp->fw_drv_pulse_wr_seq;
4679                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4680                 /* TBD - add SYSTEM_TIME */
4681                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4682                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4683
4684                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4685                              MCP_PULSE_SEQ_MASK);
4686                 /* The delta between driver pulse and mcp response
4687                  * should be 1 (before mcp response) or 0 (after mcp response)
4688                  */
4689                 if ((drv_pulse != mcp_pulse) &&
4690                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4691                         /* someone lost a heartbeat... */
4692                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4693                                   drv_pulse, mcp_pulse);
4694                 }
4695         }
4696
4697         if ((bp->state == BNX2X_STATE_OPEN) ||
4698             (bp->state == BNX2X_STATE_DISABLED))
4699                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4700
4701 timer_restart:
4702         mod_timer(&bp->timer, jiffies + bp->current_interval);
4703 }
4704
4705 /* end of Statistics */
4706
4707 /* nic init */
4708
4709 /*
4710  * nic init service functions
4711  */
4712
4713 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4714 {
4715         int port = BP_PORT(bp);
4716
4717         /* "CSTORM" */
4718         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4719                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4720                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4721         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4722                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4723                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4724 }
4725
4726 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4727                           dma_addr_t mapping, int sb_id)
4728 {
4729         int port = BP_PORT(bp);
4730         int func = BP_FUNC(bp);
4731         int index;
4732         u64 section;
4733
4734         /* USTORM */
4735         section = ((u64)mapping) + offsetof(struct host_status_block,
4736                                             u_status_block);
4737         sb->u_status_block.status_block_id = sb_id;
4738
4739         REG_WR(bp, BAR_CSTRORM_INTMEM +
4740                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4741         REG_WR(bp, BAR_CSTRORM_INTMEM +
4742                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4743                U64_HI(section));
4744         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4745                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4746
4747         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4748                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4750
4751         /* CSTORM */
4752         section = ((u64)mapping) + offsetof(struct host_status_block,
4753                                             c_status_block);
4754         sb->c_status_block.status_block_id = sb_id;
4755
4756         REG_WR(bp, BAR_CSTRORM_INTMEM +
4757                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4758         REG_WR(bp, BAR_CSTRORM_INTMEM +
4759                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4760                U64_HI(section));
4761         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4762                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4763
4764         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4765                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4767
4768         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4769 }
4770
4771 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4772 {
4773         int func = BP_FUNC(bp);
4774
4775         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4776                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4777                         sizeof(struct tstorm_def_status_block)/4);
4778         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4779                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4780                         sizeof(struct cstorm_def_status_block_u)/4);
4781         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4782                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4783                         sizeof(struct cstorm_def_status_block_c)/4);
4784         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4785                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4786                         sizeof(struct xstorm_def_status_block)/4);
4787 }
4788
4789 static void bnx2x_init_def_sb(struct bnx2x *bp,
4790                               struct host_def_status_block *def_sb,
4791                               dma_addr_t mapping, int sb_id)
4792 {
4793         int port = BP_PORT(bp);
4794         int func = BP_FUNC(bp);
4795         int index, val, reg_offset;
4796         u64 section;
4797
4798         /* ATTN */
4799         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4800                                             atten_status_block);
4801         def_sb->atten_status_block.status_block_id = sb_id;
4802
4803         bp->attn_state = 0;
4804
4805         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4806                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4807
4808         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4809                 bp->attn_group[index].sig[0] = REG_RD(bp,
4810                                                      reg_offset + 0x10*index);
4811                 bp->attn_group[index].sig[1] = REG_RD(bp,
4812                                                reg_offset + 0x4 + 0x10*index);
4813                 bp->attn_group[index].sig[2] = REG_RD(bp,
4814                                                reg_offset + 0x8 + 0x10*index);
4815                 bp->attn_group[index].sig[3] = REG_RD(bp,
4816                                                reg_offset + 0xc + 0x10*index);
4817         }
4818
4819         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4820                              HC_REG_ATTN_MSG0_ADDR_L);
4821
4822         REG_WR(bp, reg_offset, U64_LO(section));
4823         REG_WR(bp, reg_offset + 4, U64_HI(section));
4824
4825         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4826
4827         val = REG_RD(bp, reg_offset);
4828         val |= sb_id;
4829         REG_WR(bp, reg_offset, val);
4830
4831         /* USTORM */
4832         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833                                             u_def_status_block);
4834         def_sb->u_def_status_block.status_block_id = sb_id;
4835
4836         REG_WR(bp, BAR_CSTRORM_INTMEM +
4837                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4838         REG_WR(bp, BAR_CSTRORM_INTMEM +
4839                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4840                U64_HI(section));
4841         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4842                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4843
4844         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4845                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4847
4848         /* CSTORM */
4849         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850                                             c_def_status_block);
4851         def_sb->c_def_status_block.status_block_id = sb_id;
4852
4853         REG_WR(bp, BAR_CSTRORM_INTMEM +
4854                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4855         REG_WR(bp, BAR_CSTRORM_INTMEM +
4856                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4857                U64_HI(section));
4858         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4859                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4860
4861         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4862                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4863                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4864
4865         /* TSTORM */
4866         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867                                             t_def_status_block);
4868         def_sb->t_def_status_block.status_block_id = sb_id;
4869
4870         REG_WR(bp, BAR_TSTRORM_INTMEM +
4871                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872         REG_WR(bp, BAR_TSTRORM_INTMEM +
4873                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4874                U64_HI(section));
4875         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4876                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4877
4878         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4879                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4880                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4881
4882         /* XSTORM */
4883         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4884                                             x_def_status_block);
4885         def_sb->x_def_status_block.status_block_id = sb_id;
4886
4887         REG_WR(bp, BAR_XSTRORM_INTMEM +
4888                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4889         REG_WR(bp, BAR_XSTRORM_INTMEM +
4890                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4891                U64_HI(section));
4892         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4893                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4894
4895         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4896                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4897                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4898
4899         bp->stats_pending = 0;
4900         bp->set_mac_pending = 0;
4901
4902         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4903 }
4904
4905 static void bnx2x_update_coalesce(struct bnx2x *bp)
4906 {
4907         int port = BP_PORT(bp);
4908         int i;
4909
4910         for_each_queue(bp, i) {
4911                 int sb_id = bp->fp[i].sb_id;
4912
4913                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4914                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4915                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4916                                                       U_SB_ETH_RX_CQ_INDEX),
4917                         bp->rx_ticks/12);
4918                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4919                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4920                                                        U_SB_ETH_RX_CQ_INDEX),
4921                          (bp->rx_ticks/12) ? 0 : 1);
4922
4923                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4924                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4925                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4926                                                       C_SB_ETH_TX_CQ_INDEX),
4927                         bp->tx_ticks/12);
4928                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4929                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4930                                                        C_SB_ETH_TX_CQ_INDEX),
4931                          (bp->tx_ticks/12) ? 0 : 1);
4932         }
4933 }
4934
4935 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4936                                        struct bnx2x_fastpath *fp, int last)
4937 {
4938         int i;
4939
4940         for (i = 0; i < last; i++) {
4941                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4942                 struct sk_buff *skb = rx_buf->skb;
4943
4944                 if (skb == NULL) {
4945                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4946                         continue;
4947                 }
4948
4949                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4950                         pci_unmap_single(bp->pdev,
4951                                          pci_unmap_addr(rx_buf, mapping),
4952                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4953
4954                 dev_kfree_skb(skb);
4955                 rx_buf->skb = NULL;
4956         }
4957 }
4958
4959 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4960 {
4961         int func = BP_FUNC(bp);
4962         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4963                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4964         u16 ring_prod, cqe_ring_prod;
4965         int i, j;
4966
4967         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4968         DP(NETIF_MSG_IFUP,
4969            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4970
4971         if (bp->flags & TPA_ENABLE_FLAG) {
4972
4973                 for_each_rx_queue(bp, j) {
4974                         struct bnx2x_fastpath *fp = &bp->fp[j];
4975
4976                         for (i = 0; i < max_agg_queues; i++) {
4977                                 fp->tpa_pool[i].skb =
4978                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4979                                 if (!fp->tpa_pool[i].skb) {
4980                                         BNX2X_ERR("Failed to allocate TPA "
4981                                                   "skb pool for queue[%d] - "
4982                                                   "disabling TPA on this "
4983                                                   "queue!\n", j);
4984                                         bnx2x_free_tpa_pool(bp, fp, i);
4985                                         fp->disable_tpa = 1;
4986                                         break;
4987                                 }
4988                                 pci_unmap_addr_set((struct sw_rx_bd *)
4989                                                         &bp->fp->tpa_pool[i],
4990                                                    mapping, 0);
4991                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4992                         }
4993                 }
4994         }
4995
4996         for_each_rx_queue(bp, j) {
4997                 struct bnx2x_fastpath *fp = &bp->fp[j];
4998
4999                 fp->rx_bd_cons = 0;
5000                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5001                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5002
5003                 /* Mark queue as Rx */
5004                 fp->is_rx_queue = 1;
5005
5006                 /* "next page" elements initialization */
5007                 /* SGE ring */
5008                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5009                         struct eth_rx_sge *sge;
5010
5011                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5012                         sge->addr_hi =
5013                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5014                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5015                         sge->addr_lo =
5016                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5017                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5018                 }
5019
5020                 bnx2x_init_sge_ring_bit_mask(fp);
5021
5022                 /* RX BD ring */
5023                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5024                         struct eth_rx_bd *rx_bd;
5025
5026                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5027                         rx_bd->addr_hi =
5028                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5029                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5030                         rx_bd->addr_lo =
5031                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5032                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5033                 }
5034
5035                 /* CQ ring */
5036                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5037                         struct eth_rx_cqe_next_page *nextpg;
5038
5039                         nextpg = (struct eth_rx_cqe_next_page *)
5040                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5041                         nextpg->addr_hi =
5042                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5043                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5044                         nextpg->addr_lo =
5045                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5046                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5047                 }
5048
5049                 /* Allocate SGEs and initialize the ring elements */
5050                 for (i = 0, ring_prod = 0;
5051                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5052
5053                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5054                                 BNX2X_ERR("was only able to allocate "
5055                                           "%d rx sges\n", i);
5056                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5057                                 /* Cleanup already allocated elements */
5058                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5059                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5060                                 fp->disable_tpa = 1;
5061                                 ring_prod = 0;
5062                                 break;
5063                         }
5064                         ring_prod = NEXT_SGE_IDX(ring_prod);
5065                 }
5066                 fp->rx_sge_prod = ring_prod;
5067
5068                 /* Allocate BDs and initialize BD ring */
5069                 fp->rx_comp_cons = 0;
5070                 cqe_ring_prod = ring_prod = 0;
5071                 for (i = 0; i < bp->rx_ring_size; i++) {
5072                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5073                                 BNX2X_ERR("was only able to allocate "
5074                                           "%d rx skbs on queue[%d]\n", i, j);
5075                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5076                                 break;
5077                         }
5078                         ring_prod = NEXT_RX_IDX(ring_prod);
5079                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5080                         WARN_ON(ring_prod <= i);
5081                 }
5082
5083                 fp->rx_bd_prod = ring_prod;
5084                 /* must not have more available CQEs than BDs */
5085                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5086                                        cqe_ring_prod);
5087                 fp->rx_pkt = fp->rx_calls = 0;
5088
5089                 /* Warning!
5090                  * this will generate an interrupt (to the TSTORM)
5091                  * must only be done after chip is initialized
5092                  */
5093                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5094                                      fp->rx_sge_prod);
5095                 if (j != 0)
5096                         continue;
5097
5098                 REG_WR(bp, BAR_USTRORM_INTMEM +
5099                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5100                        U64_LO(fp->rx_comp_mapping));
5101                 REG_WR(bp, BAR_USTRORM_INTMEM +
5102                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5103                        U64_HI(fp->rx_comp_mapping));
5104         }
5105 }
5106
5107 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5108 {
5109         int i, j;
5110
5111         for_each_tx_queue(bp, j) {
5112                 struct bnx2x_fastpath *fp = &bp->fp[j];
5113
5114                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5115                         struct eth_tx_next_bd *tx_next_bd =
5116                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5117
5118                         tx_next_bd->addr_hi =
5119                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5120                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5121                         tx_next_bd->addr_lo =
5122                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5123                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5124                 }
5125
5126                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5127                 fp->tx_db.data.zero_fill1 = 0;
5128                 fp->tx_db.data.prod = 0;
5129
5130                 fp->tx_pkt_prod = 0;
5131                 fp->tx_pkt_cons = 0;
5132                 fp->tx_bd_prod = 0;
5133                 fp->tx_bd_cons = 0;
5134                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5135                 fp->tx_pkt = 0;
5136         }
5137
5138         /* clean tx statistics */
5139         for_each_rx_queue(bp, i)
5140                 bnx2x_fp(bp, i, tx_pkt) = 0;
5141 }
5142
5143 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5144 {
5145         int func = BP_FUNC(bp);
5146
5147         spin_lock_init(&bp->spq_lock);
5148
5149         bp->spq_left = MAX_SPQ_PENDING;
5150         bp->spq_prod_idx = 0;
5151         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5152         bp->spq_prod_bd = bp->spq;
5153         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5154
5155         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5156                U64_LO(bp->spq_mapping));
5157         REG_WR(bp,
5158                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5159                U64_HI(bp->spq_mapping));
5160
5161         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5162                bp->spq_prod_idx);
5163 }
5164
5165 static void bnx2x_init_context(struct bnx2x *bp)
5166 {
5167         int i;
5168
5169         for_each_rx_queue(bp, i) {
5170                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5171                 struct bnx2x_fastpath *fp = &bp->fp[i];
5172                 u8 cl_id = fp->cl_id;
5173
5174                 context->ustorm_st_context.common.sb_index_numbers =
5175                                                 BNX2X_RX_SB_INDEX_NUM;
5176                 context->ustorm_st_context.common.clientId = cl_id;
5177                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5178                 context->ustorm_st_context.common.flags =
5179                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5180                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5181                 context->ustorm_st_context.common.statistics_counter_id =
5182                                                 cl_id;
5183                 context->ustorm_st_context.common.mc_alignment_log_size =
5184                                                 BNX2X_RX_ALIGN_SHIFT;
5185                 context->ustorm_st_context.common.bd_buff_size =
5186                                                 bp->rx_buf_size;
5187                 context->ustorm_st_context.common.bd_page_base_hi =
5188                                                 U64_HI(fp->rx_desc_mapping);
5189                 context->ustorm_st_context.common.bd_page_base_lo =
5190                                                 U64_LO(fp->rx_desc_mapping);
5191                 if (!fp->disable_tpa) {
5192                         context->ustorm_st_context.common.flags |=
5193                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5194                         context->ustorm_st_context.common.sge_buff_size =
5195                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5196                                          (u32)0xffff);
5197                         context->ustorm_st_context.common.sge_page_base_hi =
5198                                                 U64_HI(fp->rx_sge_mapping);
5199                         context->ustorm_st_context.common.sge_page_base_lo =
5200                                                 U64_LO(fp->rx_sge_mapping);
5201
5202                         context->ustorm_st_context.common.max_sges_for_packet =
5203                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5204                         context->ustorm_st_context.common.max_sges_for_packet =
5205                                 ((context->ustorm_st_context.common.
5206                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5207                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5208                 }
5209
5210                 context->ustorm_ag_context.cdu_usage =
5211                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5212                                                CDU_REGION_NUMBER_UCM_AG,
5213                                                ETH_CONNECTION_TYPE);
5214
5215                 context->xstorm_ag_context.cdu_reserved =
5216                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5217                                                CDU_REGION_NUMBER_XCM_AG,
5218                                                ETH_CONNECTION_TYPE);
5219         }
5220
5221         for_each_tx_queue(bp, i) {
5222                 struct bnx2x_fastpath *fp = &bp->fp[i];
5223                 struct eth_context *context =
5224                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5225
5226                 context->cstorm_st_context.sb_index_number =
5227                                                 C_SB_ETH_TX_CQ_INDEX;
5228                 context->cstorm_st_context.status_block_id = fp->sb_id;
5229
5230                 context->xstorm_st_context.tx_bd_page_base_hi =
5231                                                 U64_HI(fp->tx_desc_mapping);
5232                 context->xstorm_st_context.tx_bd_page_base_lo =
5233                                                 U64_LO(fp->tx_desc_mapping);
5234                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5235                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5236         }
5237 }
5238
5239 static void bnx2x_init_ind_table(struct bnx2x *bp)
5240 {
5241         int func = BP_FUNC(bp);
5242         int i;
5243
5244         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5245                 return;
5246
5247         DP(NETIF_MSG_IFUP,
5248            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5249         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5250                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5251                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5252                         bp->fp->cl_id + (i % bp->num_rx_queues));
5253 }
5254
5255 static void bnx2x_set_client_config(struct bnx2x *bp)
5256 {
5257         struct tstorm_eth_client_config tstorm_client = {0};
5258         int port = BP_PORT(bp);
5259         int i;
5260
5261         tstorm_client.mtu = bp->dev->mtu;
5262         tstorm_client.config_flags =
5263                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5264                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5265 #ifdef BCM_VLAN
5266         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5267                 tstorm_client.config_flags |=
5268                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5269                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5270         }
5271 #endif
5272
5273         for_each_queue(bp, i) {
5274                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5275
5276                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5277                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5278                        ((u32 *)&tstorm_client)[0]);
5279                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5280                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5281                        ((u32 *)&tstorm_client)[1]);
5282         }
5283
5284         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5285            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5286 }
5287
5288 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5289 {
5290         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5291         int mode = bp->rx_mode;
5292         int mask = bp->rx_mode_cl_mask;
5293         int func = BP_FUNC(bp);
5294         int port = BP_PORT(bp);
5295         int i;
5296         /* All but management unicast packets should pass to the host as well */
5297         u32 llh_mask =
5298                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5299                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5300                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5301                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5302
5303         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5304
5305         switch (mode) {
5306         case BNX2X_RX_MODE_NONE: /* no Rx */
5307                 tstorm_mac_filter.ucast_drop_all = mask;
5308                 tstorm_mac_filter.mcast_drop_all = mask;
5309                 tstorm_mac_filter.bcast_drop_all = mask;
5310                 break;
5311
5312         case BNX2X_RX_MODE_NORMAL:
5313                 tstorm_mac_filter.bcast_accept_all = mask;
5314                 break;
5315
5316         case BNX2X_RX_MODE_ALLMULTI:
5317                 tstorm_mac_filter.mcast_accept_all = mask;
5318                 tstorm_mac_filter.bcast_accept_all = mask;
5319                 break;
5320
5321         case BNX2X_RX_MODE_PROMISC:
5322                 tstorm_mac_filter.ucast_accept_all = mask;
5323                 tstorm_mac_filter.mcast_accept_all = mask;
5324                 tstorm_mac_filter.bcast_accept_all = mask;
5325                 /* pass management unicast packets as well */
5326                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5327                 break;
5328
5329         default:
5330                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5331                 break;
5332         }
5333
5334         REG_WR(bp,
5335                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5336                llh_mask);
5337
5338         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5339                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5340                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5341                        ((u32 *)&tstorm_mac_filter)[i]);
5342
5343 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5344                    ((u32 *)&tstorm_mac_filter)[i]); */
5345         }
5346
5347         if (mode != BNX2X_RX_MODE_NONE)
5348                 bnx2x_set_client_config(bp);
5349 }
5350
5351 static void bnx2x_init_internal_common(struct bnx2x *bp)
5352 {
5353         int i;
5354
5355         /* Zero this manually as its initialization is
5356            currently missing in the initTool */
5357         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5358                 REG_WR(bp, BAR_USTRORM_INTMEM +
5359                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5360 }
5361
5362 static void bnx2x_init_internal_port(struct bnx2x *bp)
5363 {
5364         int port = BP_PORT(bp);
5365
5366         REG_WR(bp,
5367                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5368         REG_WR(bp,
5369                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5370         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5371         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372 }
5373
5374 static void bnx2x_init_internal_func(struct bnx2x *bp)
5375 {
5376         struct tstorm_eth_function_common_config tstorm_config = {0};
5377         struct stats_indication_flags stats_flags = {0};
5378         int port = BP_PORT(bp);
5379         int func = BP_FUNC(bp);
5380         int i, j;
5381         u32 offset;
5382         u16 max_agg_size;
5383
5384         if (is_multi(bp)) {
5385                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5386                 tstorm_config.rss_result_mask = MULTI_MASK;
5387         }
5388
5389         /* Enable TPA if needed */
5390         if (bp->flags & TPA_ENABLE_FLAG)
5391                 tstorm_config.config_flags |=
5392                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5393
5394         if (IS_E1HMF(bp))
5395                 tstorm_config.config_flags |=
5396                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5397
5398         tstorm_config.leading_client_id = BP_L_ID(bp);
5399
5400         REG_WR(bp, BAR_TSTRORM_INTMEM +
5401                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5402                (*(u32 *)&tstorm_config));
5403
5404         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5405         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5406         bnx2x_set_storm_rx_mode(bp);
5407
5408         for_each_queue(bp, i) {
5409                 u8 cl_id = bp->fp[i].cl_id;
5410
5411                 /* reset xstorm per client statistics */
5412                 offset = BAR_XSTRORM_INTMEM +
5413                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5414                 for (j = 0;
5415                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5416                         REG_WR(bp, offset + j*4, 0);
5417
5418                 /* reset tstorm per client statistics */
5419                 offset = BAR_TSTRORM_INTMEM +
5420                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5421                 for (j = 0;
5422                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5423                         REG_WR(bp, offset + j*4, 0);
5424
5425                 /* reset ustorm per client statistics */
5426                 offset = BAR_USTRORM_INTMEM +
5427                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5428                 for (j = 0;
5429                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5430                         REG_WR(bp, offset + j*4, 0);
5431         }
5432
5433         /* Init statistics related context */
5434         stats_flags.collect_eth = 1;
5435
5436         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5437                ((u32 *)&stats_flags)[0]);
5438         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5439                ((u32 *)&stats_flags)[1]);
5440
5441         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5442                ((u32 *)&stats_flags)[0]);
5443         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5444                ((u32 *)&stats_flags)[1]);
5445
5446         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5447                ((u32 *)&stats_flags)[0]);
5448         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5449                ((u32 *)&stats_flags)[1]);
5450
5451         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5452                ((u32 *)&stats_flags)[0]);
5453         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5454                ((u32 *)&stats_flags)[1]);
5455
5456         REG_WR(bp, BAR_XSTRORM_INTMEM +
5457                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5458                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5459         REG_WR(bp, BAR_XSTRORM_INTMEM +
5460                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5461                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5462
5463         REG_WR(bp, BAR_TSTRORM_INTMEM +
5464                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5465                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5466         REG_WR(bp, BAR_TSTRORM_INTMEM +
5467                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5468                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5469
5470         REG_WR(bp, BAR_USTRORM_INTMEM +
5471                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5472                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5473         REG_WR(bp, BAR_USTRORM_INTMEM +
5474                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5475                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5476
5477         if (CHIP_IS_E1H(bp)) {
5478                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5479                         IS_E1HMF(bp));
5480                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5481                         IS_E1HMF(bp));
5482                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5483                         IS_E1HMF(bp));
5484                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5485                         IS_E1HMF(bp));
5486
5487                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5488                          bp->e1hov);
5489         }
5490
5491         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5492         max_agg_size =
5493                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5494                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5495                     (u32)0xffff);
5496         for_each_rx_queue(bp, i) {
5497                 struct bnx2x_fastpath *fp = &bp->fp[i];
5498
5499                 REG_WR(bp, BAR_USTRORM_INTMEM +
5500                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5501                        U64_LO(fp->rx_comp_mapping));
5502                 REG_WR(bp, BAR_USTRORM_INTMEM +
5503                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5504                        U64_HI(fp->rx_comp_mapping));
5505
5506                 /* Next page */
5507                 REG_WR(bp, BAR_USTRORM_INTMEM +
5508                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5509                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5510                 REG_WR(bp, BAR_USTRORM_INTMEM +
5511                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5512                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5513
5514                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5515                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5516                          max_agg_size);
5517         }
5518
5519         /* dropless flow control */
5520         if (CHIP_IS_E1H(bp)) {
5521                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5522
5523                 rx_pause.bd_thr_low = 250;
5524                 rx_pause.cqe_thr_low = 250;
5525                 rx_pause.cos = 1;
5526                 rx_pause.sge_thr_low = 0;
5527                 rx_pause.bd_thr_high = 350;
5528                 rx_pause.cqe_thr_high = 350;
5529                 rx_pause.sge_thr_high = 0;
5530
5531                 for_each_rx_queue(bp, i) {
5532                         struct bnx2x_fastpath *fp = &bp->fp[i];
5533
5534                         if (!fp->disable_tpa) {
5535                                 rx_pause.sge_thr_low = 150;
5536                                 rx_pause.sge_thr_high = 250;
5537                         }
5538
5539
5540                         offset = BAR_USTRORM_INTMEM +
5541                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5542                                                                    fp->cl_id);
5543                         for (j = 0;
5544                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5545                              j++)
5546                                 REG_WR(bp, offset + j*4,
5547                                        ((u32 *)&rx_pause)[j]);
5548                 }
5549         }
5550
5551         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5552
5553         /* Init rate shaping and fairness contexts */
5554         if (IS_E1HMF(bp)) {
5555                 int vn;
5556
5557                 /* During init there is no active link
5558                    Until link is up, set link rate to 10Gbps */
5559                 bp->link_vars.line_speed = SPEED_10000;
5560                 bnx2x_init_port_minmax(bp);
5561
5562                 bnx2x_calc_vn_weight_sum(bp);
5563
5564                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5565                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5566
5567                 /* Enable rate shaping and fairness */
5568                 bp->cmng.flags.cmng_enables =
5569                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5570                 if (bp->vn_weight_sum)
5571                         bp->cmng.flags.cmng_enables |=
5572                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5573                 else
5574                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5575                            "  fairness will be disabled\n");
5576         } else {
5577                 /* rate shaping and fairness are disabled */
5578                 DP(NETIF_MSG_IFUP,
5579                    "single function mode  minmax will be disabled\n");
5580         }
5581
5582
5583         /* Store it to internal memory */
5584         if (bp->port.pmf)
5585                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5586                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5587                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5588                                ((u32 *)(&bp->cmng))[i]);
5589 }
5590
5591 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5592 {
5593         switch (load_code) {
5594         case FW_MSG_CODE_DRV_LOAD_COMMON:
5595                 bnx2x_init_internal_common(bp);
5596                 /* no break */
5597
5598         case FW_MSG_CODE_DRV_LOAD_PORT:
5599                 bnx2x_init_internal_port(bp);
5600                 /* no break */
5601
5602         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5603                 bnx2x_init_internal_func(bp);
5604                 break;
5605
5606         default:
5607                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5608                 break;
5609         }
5610 }
5611
5612 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5613 {
5614         int i;
5615
5616         for_each_queue(bp, i) {
5617                 struct bnx2x_fastpath *fp = &bp->fp[i];
5618
5619                 fp->bp = bp;
5620                 fp->state = BNX2X_FP_STATE_CLOSED;
5621                 fp->index = i;
5622                 fp->cl_id = BP_L_ID(bp) + i;
5623 #ifdef BCM_CNIC
5624                 fp->sb_id = fp->cl_id + 1;
5625 #else
5626                 fp->sb_id = fp->cl_id;
5627 #endif
5628                 /* Suitable Rx and Tx SBs are served by the same client */
5629                 if (i >= bp->num_rx_queues)
5630                         fp->cl_id -= bp->num_rx_queues;
5631                 DP(NETIF_MSG_IFUP,
5632                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5633                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5634                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5635                               fp->sb_id);
5636                 bnx2x_update_fpsb_idx(fp);
5637         }
5638
5639         /* ensure status block indices were read */
5640         rmb();
5641
5642
5643         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5644                           DEF_SB_ID);
5645         bnx2x_update_dsb_idx(bp);
5646         bnx2x_update_coalesce(bp);
5647         bnx2x_init_rx_rings(bp);
5648         bnx2x_init_tx_ring(bp);
5649         bnx2x_init_sp_ring(bp);
5650         bnx2x_init_context(bp);
5651         bnx2x_init_internal(bp, load_code);
5652         bnx2x_init_ind_table(bp);
5653         bnx2x_stats_init(bp);
5654
5655         /* At this point, we are ready for interrupts */
5656         atomic_set(&bp->intr_sem, 0);
5657
5658         /* flush all before enabling interrupts */
5659         mb();
5660         mmiowb();
5661
5662         bnx2x_int_enable(bp);
5663
5664         /* Check for SPIO5 */
5665         bnx2x_attn_int_deasserted0(bp,
5666                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5667                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5668 }
5669
5670 /* end of nic init */
5671
5672 /*
5673  * gzip service functions
5674  */
5675
5676 static int bnx2x_gunzip_init(struct bnx2x *bp)
5677 {
5678         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5679                                               &bp->gunzip_mapping);
5680         if (bp->gunzip_buf  == NULL)
5681                 goto gunzip_nomem1;
5682
5683         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5684         if (bp->strm  == NULL)
5685                 goto gunzip_nomem2;
5686
5687         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5688                                       GFP_KERNEL);
5689         if (bp->strm->workspace == NULL)
5690                 goto gunzip_nomem3;
5691
5692         return 0;
5693
5694 gunzip_nomem3:
5695         kfree(bp->strm);
5696         bp->strm = NULL;
5697
5698 gunzip_nomem2:
5699         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5700                             bp->gunzip_mapping);
5701         bp->gunzip_buf = NULL;
5702
5703 gunzip_nomem1:
5704         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5705                " un-compression\n", bp->dev->name);
5706         return -ENOMEM;
5707 }
5708
5709 static void bnx2x_gunzip_end(struct bnx2x *bp)
5710 {
5711         kfree(bp->strm->workspace);
5712
5713         kfree(bp->strm);
5714         bp->strm = NULL;
5715
5716         if (bp->gunzip_buf) {
5717                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5718                                     bp->gunzip_mapping);
5719                 bp->gunzip_buf = NULL;
5720         }
5721 }
5722
5723 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5724 {
5725         int n, rc;
5726
5727         /* check gzip header */
5728         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5729                 BNX2X_ERR("Bad gzip header\n");
5730                 return -EINVAL;
5731         }
5732
5733         n = 10;
5734
5735 #define FNAME                           0x8
5736
5737         if (zbuf[3] & FNAME)
5738                 while ((zbuf[n++] != 0) && (n < len));
5739
5740         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5741         bp->strm->avail_in = len - n;
5742         bp->strm->next_out = bp->gunzip_buf;
5743         bp->strm->avail_out = FW_BUF_SIZE;
5744
5745         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5746         if (rc != Z_OK)
5747                 return rc;
5748
5749         rc = zlib_inflate(bp->strm, Z_FINISH);
5750         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5751                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5752                        bp->dev->name, bp->strm->msg);
5753
5754         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5755