bnx2x: Allowing 0 as initial fairness value
[linux-3.10.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.52.1"
60 #define DRV_MODULE_RELDATE      "2009/08/12"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1       "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H      "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350                                u32 addr, u32 len)
351 {
352         int offset = 0;
353
354         while (len > DMAE_LEN32_WR_MAX) {
355                 bnx2x_write_dmae(bp, phys_addr + offset,
356                                  addr + offset, DMAE_LEN32_WR_MAX);
357                 offset += DMAE_LEN32_WR_MAX * 4;
358                 len -= DMAE_LEN32_WR_MAX;
359         }
360
361         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362 }
363
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366 {
367         u32 wb_write[2];
368
369         wb_write[0] = val_hi;
370         wb_write[1] = val_lo;
371         REG_WR_DMAE(bp, reg, wb_write, 2);
372 }
373
374 #ifdef USE_WB_RD
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376 {
377         u32 wb_data[2];
378
379         REG_RD_DMAE(bp, reg, wb_data, 2);
380
381         return HILO_U64(wb_data[0], wb_data[1]);
382 }
383 #endif
384
385 static int bnx2x_mc_assert(struct bnx2x *bp)
386 {
387         char last_idx;
388         int i, rc = 0;
389         u32 row0, row1, row2, row3;
390
391         /* XSTORM */
392         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
394         if (last_idx)
395                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397         /* print the asserts */
398         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401                               XSTORM_ASSERT_LIST_OFFSET(i));
402                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411                                   " 0x%08x 0x%08x 0x%08x\n",
412                                   i, row3, row2, row1, row0);
413                         rc++;
414                 } else {
415                         break;
416                 }
417         }
418
419         /* TSTORM */
420         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
422         if (last_idx)
423                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425         /* print the asserts */
426         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429                               TSTORM_ASSERT_LIST_OFFSET(i));
430                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439                                   " 0x%08x 0x%08x 0x%08x\n",
440                                   i, row3, row2, row1, row0);
441                         rc++;
442                 } else {
443                         break;
444                 }
445         }
446
447         /* CSTORM */
448         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
450         if (last_idx)
451                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453         /* print the asserts */
454         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457                               CSTORM_ASSERT_LIST_OFFSET(i));
458                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467                                   " 0x%08x 0x%08x 0x%08x\n",
468                                   i, row3, row2, row1, row0);
469                         rc++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* USTORM */
476         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477                            USTORM_ASSERT_LIST_INDEX_OFFSET);
478         if (last_idx)
479                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481         /* print the asserts */
482         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485                               USTORM_ASSERT_LIST_OFFSET(i));
486                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
488                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
490                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495                                   " 0x%08x 0x%08x 0x%08x\n",
496                                   i, row3, row2, row1, row0);
497                         rc++;
498                 } else {
499                         break;
500                 }
501         }
502
503         return rc;
504 }
505
506 static void bnx2x_fw_dump(struct bnx2x *bp)
507 {
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513         mark = ((mark + 0x3) & ~0x3);
514         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
515
516         printk(KERN_ERR PFX);
517         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518                 for (word = 0; word < 8; word++)
519                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520                                                   offset + 4*word));
521                 data[8] = 0x0;
522                 printk(KERN_CONT "%s", (char *)data);
523         }
524         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525                 for (word = 0; word < 8; word++)
526                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527                                                   offset + 4*word));
528                 data[8] = 0x0;
529                 printk(KERN_CONT "%s", (char *)data);
530         }
531         printk(KERN_ERR PFX "end of fw dump\n");
532 }
533
534 static void bnx2x_panic_dump(struct bnx2x *bp)
535 {
536         int i;
537         u16 j, start, end;
538
539         bp->stats_state = STATS_STATE_DISABLED;
540         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
542         BNX2X_ERR("begin crash dump -----------------\n");
543
544         /* Indices */
545         /* Common */
546         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
547                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
548                   "  spq_prod_idx(%u)\n",
549                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552         /* Rx */
553         for_each_rx_queue(bp, i) {
554                 struct bnx2x_fastpath *fp = &bp->fp[i];
555
556                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
557                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
558                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
559                           i, fp->rx_bd_prod, fp->rx_bd_cons,
560                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
563                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
564                           fp->rx_sge_prod, fp->last_max_sge,
565                           le16_to_cpu(fp->fp_u_idx),
566                           fp->status_blk->u_status_block.status_block_index);
567         }
568
569         /* Tx */
570         for_each_tx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
574                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
575                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
578                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579                           fp->status_blk->c_status_block.status_block_index,
580                           fp->tx_db.data.prod);
581         }
582
583         /* Rings */
584         /* Rx */
585         for_each_rx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590                 for (j = start; j != end; j = RX_BD(j + 1)) {
591                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
594                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
595                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
596                 }
597
598                 start = RX_SGE(fp->rx_sge_prod);
599                 end = RX_SGE(fp->last_max_sge);
600                 for (j = start; j != end; j = RX_SGE(j + 1)) {
601                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
605                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
606                 }
607
608                 start = RCQ_BD(fp->rx_comp_cons - 10);
609                 end = RCQ_BD(fp->rx_comp_cons + 503);
610                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
613                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
615                 }
616         }
617
618         /* Tx */
619         for_each_tx_queue(bp, i) {
620                 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624                 for (j = start; j != end; j = TX_BD(j + 1)) {
625                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
627                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628                                   i, j, sw_bd->skb, sw_bd->first_bd);
629                 }
630
631                 start = TX_BD(fp->tx_bd_cons - 10);
632                 end = TX_BD(fp->tx_bd_cons + 254);
633                 for (j = start; j != end; j = TX_BD(j + 1)) {
634                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
636                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
638                 }
639         }
640
641         bnx2x_fw_dump(bp);
642         bnx2x_mc_assert(bp);
643         BNX2X_ERR("end crash dump -----------------\n");
644 }
645
646 static void bnx2x_int_enable(struct bnx2x *bp)
647 {
648         int port = BP_PORT(bp);
649         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650         u32 val = REG_RD(bp, addr);
651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
653
654         if (msix) {
655                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656                          HC_CONFIG_0_REG_INT_LINE_EN_0);
657                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659         } else if (msi) {
660                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else {
665                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669
670                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671                    val, port, addr);
672
673                 REG_WR(bp, addr, val);
674
675                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676         }
677
678         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
679            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
680
681         REG_WR(bp, addr, val);
682         /*
683          * Ensure that HC_CONFIG is written before leading/trailing edge config
684          */
685         mmiowb();
686         barrier();
687
688         if (CHIP_IS_E1H(bp)) {
689                 /* init leading/trailing edge */
690                 if (IS_E1HMF(bp)) {
691                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
692                         if (bp->port.pmf)
693                                 /* enable nig and gpio3 attention */
694                                 val |= 0x1100;
695                 } else
696                         val = 0xffff;
697
698                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700         }
701
702         /* Make sure that interrupts are indeed enabled from here on */
703         mmiowb();
704 }
705
706 static void bnx2x_int_disable(struct bnx2x *bp)
707 {
708         int port = BP_PORT(bp);
709         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710         u32 val = REG_RD(bp, addr);
711
712         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
715                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718            val, port, addr);
719
720         /* flush all outstanding writes */
721         mmiowb();
722
723         REG_WR(bp, addr, val);
724         if (REG_RD(bp, addr) != val)
725                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726 }
727
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
729 {
730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
731         int i, offset;
732
733         /* disable interrupt handling */
734         atomic_inc(&bp->intr_sem);
735         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
737         if (disable_hw)
738                 /* prevent the HW from sending interrupts */
739                 bnx2x_int_disable(bp);
740
741         /* make sure all ISRs are done */
742         if (msix) {
743                 synchronize_irq(bp->msix_table[0].vector);
744                 offset = 1;
745 #ifdef BCM_CNIC
746                 offset++;
747 #endif
748                 for_each_queue(bp, i)
749                         synchronize_irq(bp->msix_table[i + offset].vector);
750         } else
751                 synchronize_irq(bp->pdev->irq);
752
753         /* make sure sp_task is not running */
754         cancel_delayed_work(&bp->sp_task);
755         flush_workqueue(bnx2x_wq);
756 }
757
758 /* fast path */
759
760 /*
761  * General service functions
762  */
763
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765                                 u8 storm, u16 index, u8 op, u8 update)
766 {
767         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768                        COMMAND_REG_INT_ACK);
769         struct igu_ack_register igu_ack;
770
771         igu_ack.status_block_index = index;
772         igu_ack.sb_id_and_flags =
773                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
778         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779            (*(u32 *)&igu_ack), hc_addr);
780         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
781
782         /* Make sure that ACK is written */
783         mmiowb();
784         barrier();
785 }
786
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788 {
789         struct host_status_block *fpsb = fp->status_blk;
790         u16 rc = 0;
791
792         barrier(); /* status block is written to by the chip */
793         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795                 rc |= 1;
796         }
797         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799                 rc |= 2;
800         }
801         return rc;
802 }
803
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
805 {
806         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807                        COMMAND_REG_SIMD_MASK);
808         u32 result = REG_RD(bp, hc_addr);
809
810         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811            result, hc_addr);
812
813         return result;
814 }
815
816
817 /*
818  * fast path service functions
819  */
820
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822 {
823         /* Tell compiler that consumer and producer can change */
824         barrier();
825         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
826 }
827
828 /* free skb in the packet ring at pos idx
829  * return idx of last bd freed
830  */
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832                              u16 idx)
833 {
834         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835         struct eth_tx_start_bd *tx_start_bd;
836         struct eth_tx_bd *tx_data_bd;
837         struct sk_buff *skb = tx_buf->skb;
838         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
839         int nbd;
840
841         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
842            idx, tx_buf, skb);
843
844         /* unmap first bd */
845         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
849
850         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853                 BNX2X_ERR("BAD nbd!\n");
854                 bnx2x_panic();
855         }
856 #endif
857         new_cons = nbd + tx_buf->first_bd;
858
859         /* Get the next bd */
860         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861
862         /* Skip a parse bd... */
863         --nbd;
864         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866         /* ...and the TSO split header bd since they have no mapping */
867         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868                 --nbd;
869                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
870         }
871
872         /* now free frags */
873         while (nbd > 0) {
874
875                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
879                 if (--nbd)
880                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881         }
882
883         /* release skb */
884         WARN_ON(!skb);
885         dev_kfree_skb_any(skb);
886         tx_buf->first_bd = 0;
887         tx_buf->skb = NULL;
888
889         return new_cons;
890 }
891
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
893 {
894         s16 used;
895         u16 prod;
896         u16 cons;
897
898         barrier(); /* Tell compiler that prod and cons can change */
899         prod = fp->tx_bd_prod;
900         cons = fp->tx_bd_cons;
901
902         /* NUM_TX_RINGS = number of "next-page" entries
903            It will be used as a threshold */
904         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
905
906 #ifdef BNX2X_STOP_ON_ERROR
907         WARN_ON(used < 0);
908         WARN_ON(used > fp->bp->tx_ring_size);
909         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 #endif
911
912         return (s16)(fp->bp->tx_ring_size) - used;
913 }
914
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
916 {
917         struct bnx2x *bp = fp->bp;
918         struct netdev_queue *txq;
919         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920         int done = 0;
921
922 #ifdef BNX2X_STOP_ON_ERROR
923         if (unlikely(bp->panic))
924                 return;
925 #endif
926
927         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929         sw_cons = fp->tx_pkt_cons;
930
931         while (sw_cons != hw_cons) {
932                 u16 pkt_cons;
933
934                 pkt_cons = TX_BD(sw_cons);
935
936                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
938                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
939                    hw_cons, sw_cons, pkt_cons);
940
941 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
942                         rmb();
943                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944                 }
945 */
946                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947                 sw_cons++;
948                 done++;
949         }
950
951         fp->tx_pkt_cons = sw_cons;
952         fp->tx_bd_cons = bd_cons;
953
954         /* TBD need a thresh? */
955         if (unlikely(netif_tx_queue_stopped(txq))) {
956
957                 /* Need to make the tx_bd_cons update visible to start_xmit()
958                  * before checking for netif_tx_queue_stopped().  Without the
959                  * memory barrier, there is a small possibility that
960                  * start_xmit() will miss it and cause the queue to be stopped
961                  * forever.
962                  */
963                 smp_mb();
964
965                 if ((netif_tx_queue_stopped(txq)) &&
966                     (bp->state == BNX2X_STATE_OPEN) &&
967                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968                         netif_tx_wake_queue(txq);
969         }
970 }
971
972 #ifdef BCM_CNIC
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974 #endif
975
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977                            union eth_rx_cqe *rr_cqe)
978 {
979         struct bnx2x *bp = fp->bp;
980         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
983         DP(BNX2X_MSG_SP,
984            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
985            fp->index, cid, command, bp->state,
986            rr_cqe->ramrod_cqe.ramrod_type);
987
988         bp->spq_left++;
989
990         if (fp->index) {
991                 switch (command | fp->state) {
992                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993                                                 BNX2X_FP_STATE_OPENING):
994                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995                            cid);
996                         fp->state = BNX2X_FP_STATE_OPEN;
997                         break;
998
999                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001                            cid);
1002                         fp->state = BNX2X_FP_STATE_HALTED;
1003                         break;
1004
1005                 default:
1006                         BNX2X_ERR("unexpected MC reply (%d)  "
1007                                   "fp->state is %x\n", command, fp->state);
1008                         break;
1009                 }
1010                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1011                 return;
1012         }
1013
1014         switch (command | bp->state) {
1015         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017                 bp->state = BNX2X_STATE_OPEN;
1018                 break;
1019
1020         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023                 fp->state = BNX2X_FP_STATE_HALTED;
1024                 break;
1025
1026         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1029                 break;
1030
1031 #ifdef BCM_CNIC
1032         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034                 bnx2x_cnic_cfc_comp(bp, cid);
1035                 break;
1036 #endif
1037
1038         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041                 bp->set_mac_pending--;
1042                 smp_wmb();
1043                 break;
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         default:
1053                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1054                           command, bp->state);
1055                 break;
1056         }
1057         mb(); /* force bnx2x_wait_ramrod() to see the change */
1058 }
1059
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061                                      struct bnx2x_fastpath *fp, u16 index)
1062 {
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct page *page = sw_buf->page;
1065         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066
1067         /* Skip "next page" elements */
1068         if (!page)
1069                 return;
1070
1071         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073         __free_pages(page, PAGES_PER_SGE_SHIFT);
1074
1075         sw_buf->page = NULL;
1076         sge->addr_hi = 0;
1077         sge->addr_lo = 0;
1078 }
1079
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081                                            struct bnx2x_fastpath *fp, int last)
1082 {
1083         int i;
1084
1085         for (i = 0; i < last; i++)
1086                 bnx2x_free_rx_sge(bp, fp, i);
1087 }
1088
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090                                      struct bnx2x_fastpath *fp, u16 index)
1091 {
1092         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1095         dma_addr_t mapping;
1096
1097         if (unlikely(page == NULL))
1098                 return -ENOMEM;
1099
1100         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101                                PCI_DMA_FROMDEVICE);
1102         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1104                 return -ENOMEM;
1105         }
1106
1107         sw_buf->page = page;
1108         pci_unmap_addr_set(sw_buf, mapping, mapping);
1109
1110         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1112
1113         return 0;
1114 }
1115
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117                                      struct bnx2x_fastpath *fp, u16 index)
1118 {
1119         struct sk_buff *skb;
1120         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1122         dma_addr_t mapping;
1123
1124         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125         if (unlikely(skb == NULL))
1126                 return -ENOMEM;
1127
1128         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129                                  PCI_DMA_FROMDEVICE);
1130         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1131                 dev_kfree_skb(skb);
1132                 return -ENOMEM;
1133         }
1134
1135         rx_buf->skb = skb;
1136         pci_unmap_addr_set(rx_buf, mapping, mapping);
1137
1138         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1140
1141         return 0;
1142 }
1143
1144 /* note that we are not allocating a new skb,
1145  * we are just moving one from cons to prod
1146  * we are not creating a new mapping,
1147  * so there is no need to check for dma_mapping_error().
1148  */
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150                                struct sk_buff *skb, u16 cons, u16 prod)
1151 {
1152         struct bnx2x *bp = fp->bp;
1153         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157
1158         pci_dma_sync_single_for_device(bp->pdev,
1159                                        pci_unmap_addr(cons_rx_buf, mapping),
1160                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1161
1162         prod_rx_buf->skb = cons_rx_buf->skb;
1163         pci_unmap_addr_set(prod_rx_buf, mapping,
1164                            pci_unmap_addr(cons_rx_buf, mapping));
1165         *prod_bd = *cons_bd;
1166 }
1167
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1169                                              u16 idx)
1170 {
1171         u16 last_max = fp->last_max_sge;
1172
1173         if (SUB_S16(idx, last_max) > 0)
1174                 fp->last_max_sge = idx;
1175 }
1176
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1178 {
1179         int i, j;
1180
1181         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182                 int idx = RX_SGE_CNT * i - 1;
1183
1184                 for (j = 0; j < 2; j++) {
1185                         SGE_MASK_CLEAR_BIT(fp, idx);
1186                         idx--;
1187                 }
1188         }
1189 }
1190
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192                                   struct eth_fast_path_rx_cqe *fp_cqe)
1193 {
1194         struct bnx2x *bp = fp->bp;
1195         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1197                       SGE_PAGE_SHIFT;
1198         u16 last_max, last_elem, first_elem;
1199         u16 delta = 0;
1200         u16 i;
1201
1202         if (!sge_len)
1203                 return;
1204
1205         /* First mark all used pages */
1206         for (i = 0; i < sge_len; i++)
1207                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208
1209         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211
1212         /* Here we assume that the last SGE index is the biggest */
1213         prefetch((void *)(fp->sge_mask));
1214         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215
1216         last_max = RX_SGE(fp->last_max_sge);
1217         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219
1220         /* If ring is not full */
1221         if (last_elem + 1 != first_elem)
1222                 last_elem++;
1223
1224         /* Now update the prod */
1225         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226                 if (likely(fp->sge_mask[i]))
1227                         break;
1228
1229                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230                 delta += RX_SGE_MASK_ELEM_SZ;
1231         }
1232
1233         if (delta > 0) {
1234                 fp->rx_sge_prod += delta;
1235                 /* clear page-end entries */
1236                 bnx2x_clear_sge_mask_next_elems(fp);
1237         }
1238
1239         DP(NETIF_MSG_RX_STATUS,
1240            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1241            fp->last_max_sge, fp->rx_sge_prod);
1242 }
1243
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245 {
1246         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247         memset(fp->sge_mask, 0xff,
1248                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249
1250         /* Clear the two last indices in the page to 1:
1251            these are the indices that correspond to the "next" element,
1252            hence will never be indicated and should be removed from
1253            the calculations. */
1254         bnx2x_clear_sge_mask_next_elems(fp);
1255 }
1256
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258                             struct sk_buff *skb, u16 cons, u16 prod)
1259 {
1260         struct bnx2x *bp = fp->bp;
1261         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1264         dma_addr_t mapping;
1265
1266         /* move empty skb from pool to prod and map it */
1267         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271
1272         /* move partial skb from cons to pool (don't unmap yet) */
1273         fp->tpa_pool[queue] = *cons_rx_buf;
1274
1275         /* mark bin state as start - print error if current state != stop */
1276         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278
1279         fp->tpa_state[queue] = BNX2X_TPA_START;
1280
1281         /* point prod_bd to new skb */
1282         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289 #else
1290         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291 #endif
1292            fp->tpa_queue_used);
1293 #endif
1294 }
1295
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297                                struct sk_buff *skb,
1298                                struct eth_fast_path_rx_cqe *fp_cqe,
1299                                u16 cqe_idx)
1300 {
1301         struct sw_rx_page *rx_pg, old_rx_pg;
1302         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303         u32 i, frag_len, frag_size, pages;
1304         int err;
1305         int j;
1306
1307         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1309
1310         /* This is needed in order to enable forwarding support */
1311         if (frag_size)
1312                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313                                                max(frag_size, (u32)len_on_bd));
1314
1315 #ifdef BNX2X_STOP_ON_ERROR
1316         if (pages >
1317             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319                           pages, cqe_idx);
1320                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1321                           fp_cqe->pkt_len, len_on_bd);
1322                 bnx2x_panic();
1323                 return -EINVAL;
1324         }
1325 #endif
1326
1327         /* Run through the SGL and compose the fragmented skb */
1328         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330
1331                 /* FW gives the indices of the SGE as if the ring is an array
1332                    (meaning that "next" element will consume 2 indices) */
1333                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334                 rx_pg = &fp->rx_page_ring[sge_idx];
1335                 old_rx_pg = *rx_pg;
1336
1337                 /* If we fail to allocate a substitute page, we simply stop
1338                    where we are and drop the whole packet */
1339                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340                 if (unlikely(err)) {
1341                         fp->eth_q_stats.rx_skb_alloc_failed++;
1342                         return err;
1343                 }
1344
1345                 /* Unmap the page as we r going to pass it to the stack */
1346                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1348
1349                 /* Add one frag and update the appropriate fields in the skb */
1350                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351
1352                 skb->data_len += frag_len;
1353                 skb->truesize += frag_len;
1354                 skb->len += frag_len;
1355
1356                 frag_size -= frag_len;
1357         }
1358
1359         return 0;
1360 }
1361
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1364                            u16 cqe_idx)
1365 {
1366         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367         struct sk_buff *skb = rx_buf->skb;
1368         /* alloc new skb */
1369         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370
1371         /* Unmap skb in the pool anyway, as we are going to change
1372            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373            fails. */
1374         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1376
1377         if (likely(new_skb)) {
1378                 /* fix ip xsum and give it to the stack */
1379                 /* (no need to map the new skb) */
1380 #ifdef BCM_VLAN
1381                 int is_vlan_cqe =
1382                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383                          PARSING_FLAGS_VLAN);
1384                 int is_not_hwaccel_vlan_cqe =
1385                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1386 #endif
1387
1388                 prefetch(skb);
1389                 prefetch(((char *)(skb)) + 128);
1390
1391 #ifdef BNX2X_STOP_ON_ERROR
1392                 if (pad + len > bp->rx_buf_size) {
1393                         BNX2X_ERR("skb_put is about to fail...  "
1394                                   "pad %d  len %d  rx_buf_size %d\n",
1395                                   pad, len, bp->rx_buf_size);
1396                         bnx2x_panic();
1397                         return;
1398                 }
1399 #endif
1400
1401                 skb_reserve(skb, pad);
1402                 skb_put(skb, len);
1403
1404                 skb->protocol = eth_type_trans(skb, bp->dev);
1405                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1406
1407                 {
1408                         struct iphdr *iph;
1409
1410                         iph = (struct iphdr *)skb->data;
1411 #ifdef BCM_VLAN
1412                         /* If there is no Rx VLAN offloading -
1413                            take VLAN tag into an account */
1414                         if (unlikely(is_not_hwaccel_vlan_cqe))
1415                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1416 #endif
1417                         iph->check = 0;
1418                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1419                 }
1420
1421                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422                                          &cqe->fast_path_cqe, cqe_idx)) {
1423 #ifdef BCM_VLAN
1424                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425                             (!is_not_hwaccel_vlan_cqe))
1426                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427                                                 le16_to_cpu(cqe->fast_path_cqe.
1428                                                             vlan_tag));
1429                         else
1430 #endif
1431                                 netif_receive_skb(skb);
1432                 } else {
1433                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434                            " - dropping packet!\n");
1435                         dev_kfree_skb(skb);
1436                 }
1437
1438
1439                 /* put new skb in bin */
1440                 fp->tpa_pool[queue].skb = new_skb;
1441
1442         } else {
1443                 /* else drop the packet and keep the buffer in the bin */
1444                 DP(NETIF_MSG_RX_STATUS,
1445                    "Failed to allocate new skb - dropping packet!\n");
1446                 fp->eth_q_stats.rx_skb_alloc_failed++;
1447         }
1448
1449         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1450 }
1451
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453                                         struct bnx2x_fastpath *fp,
1454                                         u16 bd_prod, u16 rx_comp_prod,
1455                                         u16 rx_sge_prod)
1456 {
1457         struct ustorm_eth_rx_producers rx_prods = {0};
1458         int i;
1459
1460         /* Update producers */
1461         rx_prods.bd_prod = bd_prod;
1462         rx_prods.cqe_prod = rx_comp_prod;
1463         rx_prods.sge_prod = rx_sge_prod;
1464
1465         /*
1466          * Make sure that the BD and SGE data is updated before updating the
1467          * producers since FW might read the BD/SGE right after the producer
1468          * is updated.
1469          * This is only applicable for weak-ordered memory model archs such
1470          * as IA-64. The following barrier is also mandatory since FW will
1471          * assumes BDs must have buffers.
1472          */
1473         wmb();
1474
1475         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476                 REG_WR(bp, BAR_USTRORM_INTMEM +
1477                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478                        ((u32 *)&rx_prods)[i]);
1479
1480         mmiowb(); /* keep prod updates ordered */
1481
1482         DP(NETIF_MSG_RX_STATUS,
1483            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1484            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1485 }
1486
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488 {
1489         struct bnx2x *bp = fp->bp;
1490         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1492         int rx_pkt = 0;
1493
1494 #ifdef BNX2X_STOP_ON_ERROR
1495         if (unlikely(bp->panic))
1496                 return 0;
1497 #endif
1498
1499         /* CQ "next element" is of the size of the regular element,
1500            that's why it's ok here */
1501         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1503                 hw_comp_cons++;
1504
1505         bd_cons = fp->rx_bd_cons;
1506         bd_prod = fp->rx_bd_prod;
1507         bd_prod_fw = bd_prod;
1508         sw_comp_cons = fp->rx_comp_cons;
1509         sw_comp_prod = fp->rx_comp_prod;
1510
1511         /* Memory barrier necessary as speculative reads of the rx
1512          * buffer can be ahead of the index in the status block
1513          */
1514         rmb();
1515
1516         DP(NETIF_MSG_RX_STATUS,
1517            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1518            fp->index, hw_comp_cons, sw_comp_cons);
1519
1520         while (sw_comp_cons != hw_comp_cons) {
1521                 struct sw_rx_bd *rx_buf = NULL;
1522                 struct sk_buff *skb;
1523                 union eth_rx_cqe *cqe;
1524                 u8 cqe_fp_flags;
1525                 u16 len, pad;
1526
1527                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528                 bd_prod = RX_BD(bd_prod);
1529                 bd_cons = RX_BD(bd_cons);
1530
1531                 /* Prefetch the page containing the BD descriptor
1532                    at producer's index. It will be needed when new skb is
1533                    allocated */
1534                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535                                              (&fp->rx_desc_ring[bd_prod])) -
1536                                   PAGE_SIZE + 1));
1537
1538                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1540
1541                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1542                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1543                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1547
1548                 /* is this a slowpath msg? */
1549                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550                         bnx2x_sp_event(fp, cqe);
1551                         goto next_cqe;
1552
1553                 /* this is an rx packet */
1554                 } else {
1555                         rx_buf = &fp->rx_buf_ring[bd_cons];
1556                         skb = rx_buf->skb;
1557                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558                         pad = cqe->fast_path_cqe.placement_offset;
1559
1560                         /* If CQE is marked both TPA_START and TPA_END
1561                            it is a non-TPA CQE */
1562                         if ((!fp->disable_tpa) &&
1563                             (TPA_TYPE(cqe_fp_flags) !=
1564                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1565                                 u16 queue = cqe->fast_path_cqe.queue_index;
1566
1567                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568                                         DP(NETIF_MSG_RX_STATUS,
1569                                            "calling tpa_start on queue %d\n",
1570                                            queue);
1571
1572                                         bnx2x_tpa_start(fp, queue, skb,
1573                                                         bd_cons, bd_prod);
1574                                         goto next_rx;
1575                                 }
1576
1577                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578                                         DP(NETIF_MSG_RX_STATUS,
1579                                            "calling tpa_stop on queue %d\n",
1580                                            queue);
1581
1582                                         if (!BNX2X_RX_SUM_FIX(cqe))
1583                                                 BNX2X_ERR("STOP on none TCP "
1584                                                           "data\n");
1585
1586                                         /* This is a size of the linear data
1587                                            on this skb */
1588                                         len = le16_to_cpu(cqe->fast_path_cqe.
1589                                                                 len_on_bd);
1590                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1591                                                     len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1593                                         if (bp->panic)
1594                                                 return 0;
1595 #endif
1596
1597                                         bnx2x_update_sge_prod(fp,
1598                                                         &cqe->fast_path_cqe);
1599                                         goto next_cqe;
1600                                 }
1601                         }
1602
1603                         pci_dma_sync_single_for_device(bp->pdev,
1604                                         pci_unmap_addr(rx_buf, mapping),
1605                                                        pad + RX_COPY_THRESH,
1606                                                        PCI_DMA_FROMDEVICE);
1607                         prefetch(skb);
1608                         prefetch(((char *)(skb)) + 128);
1609
1610                         /* is this an error packet? */
1611                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612                                 DP(NETIF_MSG_RX_ERR,
1613                                    "ERROR  flags %x  rx packet %u\n",
1614                                    cqe_fp_flags, sw_comp_cons);
1615                                 fp->eth_q_stats.rx_err_discard_pkt++;
1616                                 goto reuse_rx;
1617                         }
1618
1619                         /* Since we don't have a jumbo ring
1620                          * copy small packets if mtu > 1500
1621                          */
1622                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623                             (len <= RX_COPY_THRESH)) {
1624                                 struct sk_buff *new_skb;
1625
1626                                 new_skb = netdev_alloc_skb(bp->dev,
1627                                                            len + pad);
1628                                 if (new_skb == NULL) {
1629                                         DP(NETIF_MSG_RX_ERR,
1630                                            "ERROR  packet dropped "
1631                                            "because of alloc failure\n");
1632                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1633                                         goto reuse_rx;
1634                                 }
1635
1636                                 /* aligned copy */
1637                                 skb_copy_from_linear_data_offset(skb, pad,
1638                                                     new_skb->data + pad, len);
1639                                 skb_reserve(new_skb, pad);
1640                                 skb_put(new_skb, len);
1641
1642                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1643
1644                                 skb = new_skb;
1645
1646                         } else
1647                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648                                 pci_unmap_single(bp->pdev,
1649                                         pci_unmap_addr(rx_buf, mapping),
1650                                                  bp->rx_buf_size,
1651                                                  PCI_DMA_FROMDEVICE);
1652                                 skb_reserve(skb, pad);
1653                                 skb_put(skb, len);
1654
1655                         } else {
1656                                 DP(NETIF_MSG_RX_ERR,
1657                                    "ERROR  packet dropped because "
1658                                    "of alloc failure\n");
1659                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1660 reuse_rx:
1661                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1662                                 goto next_rx;
1663                         }
1664
1665                         skb->protocol = eth_type_trans(skb, bp->dev);
1666
1667                         skb->ip_summed = CHECKSUM_NONE;
1668                         if (bp->rx_csum) {
1669                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1671                                 else
1672                                         fp->eth_q_stats.hw_csum_err++;
1673                         }
1674                 }
1675
1676                 skb_record_rx_queue(skb, fp->index);
1677
1678 #ifdef BCM_VLAN
1679                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681                      PARSING_FLAGS_VLAN))
1682                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1684                 else
1685 #endif
1686                         netif_receive_skb(skb);
1687
1688
1689 next_rx:
1690                 rx_buf->skb = NULL;
1691
1692                 bd_cons = NEXT_RX_IDX(bd_cons);
1693                 bd_prod = NEXT_RX_IDX(bd_prod);
1694                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1695                 rx_pkt++;
1696 next_cqe:
1697                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1699
1700                 if (rx_pkt == budget)
1701                         break;
1702         } /* while */
1703
1704         fp->rx_bd_cons = bd_cons;
1705         fp->rx_bd_prod = bd_prod_fw;
1706         fp->rx_comp_cons = sw_comp_cons;
1707         fp->rx_comp_prod = sw_comp_prod;
1708
1709         /* Update producers */
1710         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1711                              fp->rx_sge_prod);
1712
1713         fp->rx_pkt += rx_pkt;
1714         fp->rx_calls++;
1715
1716         return rx_pkt;
1717 }
1718
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 {
1721         struct bnx2x_fastpath *fp = fp_cookie;
1722         struct bnx2x *bp = fp->bp;
1723
1724         /* Return here if interrupt is disabled */
1725         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1727                 return IRQ_HANDLED;
1728         }
1729
1730         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731            fp->index, fp->sb_id);
1732         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1733
1734 #ifdef BNX2X_STOP_ON_ERROR
1735         if (unlikely(bp->panic))
1736                 return IRQ_HANDLED;
1737 #endif
1738         /* Handle Rx or Tx according to MSI-X vector */
1739         if (fp->is_rx_queue) {
1740                 prefetch(fp->rx_cons_sb);
1741                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1742
1743                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1744
1745         } else {
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748
1749                 bnx2x_update_fpsb_idx(fp);
1750                 rmb();
1751                 bnx2x_tx_int(fp);
1752
1753                 /* Re-enable interrupts */
1754                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1758         }
1759
1760         return IRQ_HANDLED;
1761 }
1762
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764 {
1765         struct bnx2x *bp = netdev_priv(dev_instance);
1766         u16 status = bnx2x_ack_int(bp);
1767         u16 mask;
1768         int i;
1769
1770         /* Return here if interrupt is shared and it's not for us */
1771         if (unlikely(status == 0)) {
1772                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1773                 return IRQ_NONE;
1774         }
1775         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1776
1777         /* Return here if interrupt is disabled */
1778         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1780                 return IRQ_HANDLED;
1781         }
1782
1783 #ifdef BNX2X_STOP_ON_ERROR
1784         if (unlikely(bp->panic))
1785                 return IRQ_HANDLED;
1786 #endif
1787
1788         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789                 struct bnx2x_fastpath *fp = &bp->fp[i];
1790
1791                 mask = 0x2 << fp->sb_id;
1792                 if (status & mask) {
1793                         /* Handle Rx or Tx according to SB id */
1794                         if (fp->is_rx_queue) {
1795                                 prefetch(fp->rx_cons_sb);
1796                                 prefetch(&fp->status_blk->u_status_block.
1797                                                         status_block_index);
1798
1799                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1800
1801                         } else {
1802                                 prefetch(fp->tx_cons_sb);
1803                                 prefetch(&fp->status_blk->c_status_block.
1804                                                         status_block_index);
1805
1806                                 bnx2x_update_fpsb_idx(fp);
1807                                 rmb();
1808                                 bnx2x_tx_int(fp);
1809
1810                                 /* Re-enable interrupts */
1811                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812                                              le16_to_cpu(fp->fp_u_idx),
1813                                              IGU_INT_NOP, 1);
1814                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815                                              le16_to_cpu(fp->fp_c_idx),
1816                                              IGU_INT_ENABLE, 1);
1817                         }
1818                         status &= ~mask;
1819                 }
1820         }
1821
1822 #ifdef BCM_CNIC
1823         mask = 0x2 << CNIC_SB_ID(bp);
1824         if (status & (mask | 0x1)) {
1825                 struct cnic_ops *c_ops = NULL;
1826
1827                 rcu_read_lock();
1828                 c_ops = rcu_dereference(bp->cnic_ops);
1829                 if (c_ops)
1830                         c_ops->cnic_handler(bp->cnic_data, NULL);
1831                 rcu_read_unlock();
1832
1833                 status &= ~mask;
1834         }
1835 #endif
1836
1837         if (unlikely(status & 0x1)) {
1838                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1839
1840                 status &= ~0x1;
1841                 if (!status)
1842                         return IRQ_HANDLED;
1843         }
1844
1845         if (status)
1846                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1847                    status);
1848
1849         return IRQ_HANDLED;
1850 }
1851
1852 /* end of fast path */
1853
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1855
1856 /* Link */
1857
1858 /*
1859  * General service functions
1860  */
1861
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1863 {
1864         u32 lock_status;
1865         u32 resource_bit = (1 << resource);
1866         int func = BP_FUNC(bp);
1867         u32 hw_lock_control_reg;
1868         int cnt;
1869
1870         /* Validating that the resource is within range */
1871         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872                 DP(NETIF_MSG_HW,
1873                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1875                 return -EINVAL;
1876         }
1877
1878         if (func <= 5) {
1879                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880         } else {
1881                 hw_lock_control_reg =
1882                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1883         }
1884
1885         /* Validating that the resource is not already taken */
1886         lock_status = REG_RD(bp, hw_lock_control_reg);
1887         if (lock_status & resource_bit) {
1888                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1889                    lock_status, resource_bit);
1890                 return -EEXIST;
1891         }
1892
1893         /* Try for 5 second every 5ms */
1894         for (cnt = 0; cnt < 1000; cnt++) {
1895                 /* Try to acquire the lock */
1896                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897                 lock_status = REG_RD(bp, hw_lock_control_reg);
1898                 if (lock_status & resource_bit)
1899                         return 0;
1900
1901                 msleep(5);
1902         }
1903         DP(NETIF_MSG_HW, "Timeout\n");
1904         return -EAGAIN;
1905 }
1906
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1908 {
1909         u32 lock_status;
1910         u32 resource_bit = (1 << resource);
1911         int func = BP_FUNC(bp);
1912         u32 hw_lock_control_reg;
1913
1914         /* Validating that the resource is within range */
1915         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916                 DP(NETIF_MSG_HW,
1917                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1919                 return -EINVAL;
1920         }
1921
1922         if (func <= 5) {
1923                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924         } else {
1925                 hw_lock_control_reg =
1926                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1927         }
1928
1929         /* Validating that the resource is currently taken */
1930         lock_status = REG_RD(bp, hw_lock_control_reg);
1931         if (!(lock_status & resource_bit)) {
1932                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1933                    lock_status, resource_bit);
1934                 return -EFAULT;
1935         }
1936
1937         REG_WR(bp, hw_lock_control_reg, resource_bit);
1938         return 0;
1939 }
1940
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1943 {
1944         mutex_lock(&bp->port.phy_mutex);
1945
1946         if (bp->port.need_hw_lock)
1947                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1948 }
1949
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1951 {
1952         if (bp->port.need_hw_lock)
1953                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1954
1955         mutex_unlock(&bp->port.phy_mutex);
1956 }
1957
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959 {
1960         /* The GPIO should be swapped if swap register is set and active */
1961         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963         int gpio_shift = gpio_num +
1964                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965         u32 gpio_mask = (1 << gpio_shift);
1966         u32 gpio_reg;
1967         int value;
1968
1969         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971                 return -EINVAL;
1972         }
1973
1974         /* read GPIO value */
1975         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976
1977         /* get the requested pin value */
1978         if ((gpio_reg & gpio_mask) == gpio_mask)
1979                 value = 1;
1980         else
1981                 value = 0;
1982
1983         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1984
1985         return value;
1986 }
1987
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO and mask except the float bits */
2005         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010                    gpio_num, gpio_shift);
2011                 /* clear FLOAT and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018                    gpio_num, gpio_shift);
2019                 /* clear FLOAT and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2022                 break;
2023
2024         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026                    gpio_num, gpio_shift);
2027                 /* set FLOAT */
2028                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2029                 break;
2030
2031         default:
2032                 break;
2033         }
2034
2035         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2037
2038         return 0;
2039 }
2040
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042 {
2043         /* The GPIO should be swapped if swap register is set and active */
2044         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046         int gpio_shift = gpio_num +
2047                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048         u32 gpio_mask = (1 << gpio_shift);
2049         u32 gpio_reg;
2050
2051         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053                 return -EINVAL;
2054         }
2055
2056         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057         /* read GPIO int */
2058         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2059
2060         switch (mode) {
2061         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063                                    "output low\n", gpio_num, gpio_shift);
2064                 /* clear SET and set CLR */
2065                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2067                 break;
2068
2069         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071                                    "output high\n", gpio_num, gpio_shift);
2072                 /* clear CLR and set SET */
2073                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075                 break;
2076
2077         default:
2078                 break;
2079         }
2080
2081         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083
2084         return 0;
2085 }
2086
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2088 {
2089         u32 spio_mask = (1 << spio_num);
2090         u32 spio_reg;
2091
2092         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093             (spio_num > MISC_REGISTERS_SPIO_7)) {
2094                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2095                 return -EINVAL;
2096         }
2097
2098         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099         /* read SPIO and mask except the float bits */
2100         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2101
2102         switch (mode) {
2103         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105                 /* clear FLOAT and set CLR */
2106                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2108                 break;
2109
2110         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112                 /* clear FLOAT and set SET */
2113                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2115                 break;
2116
2117         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119                 /* set FLOAT */
2120                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121                 break;
2122
2123         default:
2124                 break;
2125         }
2126
2127         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2129
2130         return 0;
2131 }
2132
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2134 {
2135         switch (bp->link_vars.ieee_fc &
2136                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2139                                           ADVERTISED_Pause);
2140                 break;
2141
2142         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2144                                          ADVERTISED_Pause);
2145                 break;
2146
2147         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2149                 break;
2150
2151         default:
2152                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2153                                           ADVERTISED_Pause);
2154                 break;
2155         }
2156 }
2157
2158 static void bnx2x_link_report(struct bnx2x *bp)
2159 {
2160         if (bp->state == BNX2X_STATE_DISABLED) {
2161                 netif_carrier_off(bp->dev);
2162                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2163                 return;
2164         }
2165
2166         if (bp->link_vars.link_up) {
2167                 if (bp->state == BNX2X_STATE_OPEN)
2168                         netif_carrier_on(bp->dev);
2169                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2170
2171                 printk("%d Mbps ", bp->link_vars.line_speed);
2172
2173                 if (bp->link_vars.duplex == DUPLEX_FULL)
2174                         printk("full duplex");
2175                 else
2176                         printk("half duplex");
2177
2178                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180                                 printk(", receive ");
2181                                 if (bp->link_vars.flow_ctrl &
2182                                     BNX2X_FLOW_CTRL_TX)
2183                                         printk("& transmit ");
2184                         } else {
2185                                 printk(", transmit ");
2186                         }
2187                         printk("flow control ON");
2188                 }
2189                 printk("\n");
2190
2191         } else { /* link_down */
2192                 netif_carrier_off(bp->dev);
2193                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2194         }
2195 }
2196
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2198 {
2199         if (!BP_NOMCP(bp)) {
2200                 u8 rc;
2201
2202                 /* Initialize link parameters structure variables */
2203                 /* It is recommended to turn off RX FC for jumbo frames
2204                    for better performance */
2205                 if (bp->dev->mtu > 5000)
2206                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2207                 else
2208                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2209
2210                 bnx2x_acquire_phy_lock(bp);
2211
2212                 if (load_mode == LOAD_DIAG)
2213                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2214
2215                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2216
2217                 bnx2x_release_phy_lock(bp);
2218
2219                 bnx2x_calc_fc_adv(bp);
2220
2221                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223                         bnx2x_link_report(bp);
2224                 }
2225
2226                 return rc;
2227         }
2228         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2229         return -EINVAL;
2230 }
2231
2232 static void bnx2x_link_set(struct bnx2x *bp)
2233 {
2234         if (!BP_NOMCP(bp)) {
2235                 bnx2x_acquire_phy_lock(bp);
2236                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237                 bnx2x_release_phy_lock(bp);
2238
2239                 bnx2x_calc_fc_adv(bp);
2240         } else
2241                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2242 }
2243
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2245 {
2246         if (!BP_NOMCP(bp)) {
2247                 bnx2x_acquire_phy_lock(bp);
2248                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249                 bnx2x_release_phy_lock(bp);
2250         } else
2251                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2252 }
2253
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2255 {
2256         u8 rc;
2257
2258         bnx2x_acquire_phy_lock(bp);
2259         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260         bnx2x_release_phy_lock(bp);
2261
2262         return rc;
2263 }
2264
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2266 {
2267         u32 r_param = bp->link_vars.line_speed / 8;
2268         u32 fair_periodic_timeout_usec;
2269         u32 t_fair;
2270
2271         memset(&(bp->cmng.rs_vars), 0,
2272                sizeof(struct rate_shaping_vars_per_port));
2273         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2274
2275         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2277
2278         /* this is the threshold below which no timer arming will occur
2279            1.25 coefficient is for the threshold to be a little bigger
2280            than the real time, to compensate for timer in-accuracy */
2281         bp->cmng.rs_vars.rs_threshold =
2282                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2283
2284         /* resolution of fairness timer */
2285         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2288
2289         /* this is the threshold below which we won't arm the timer anymore */
2290         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2291
2292         /* we multiply by 1e3/8 to get bytes/msec.
2293            We don't want the credits to pass a credit
2294            of the t_fair*FAIR_MEM (algorithm resolution) */
2295         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296         /* since each tick is 4 usec */
2297         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2298 }
2299
2300 /* Calculates the sum of vn_min_rates.
2301    It's needed for further normalizing of the min_rates.
2302    Returns:
2303      sum of vn_min_rates.
2304        or
2305      0 - if all the min_rates are 0.
2306      In the later case fainess algorithm should be deactivated.
2307      If not all min_rates are zero then those that are zeroes will be set to 1.
2308  */
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2310 {
2311         int all_zero = 1;
2312         int port = BP_PORT(bp);
2313         int vn;
2314
2315         bp->vn_weight_sum = 0;
2316         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317                 int func = 2*vn + port;
2318                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2321
2322                 /* Skip hidden vns */
2323                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2324                         continue;
2325
2326                 /* If min rate is zero - set it to 1 */
2327                 if (!vn_min_rate)
2328                         vn_min_rate = DEF_MIN_RATE;
2329                 else
2330                         all_zero = 0;
2331
2332                 bp->vn_weight_sum += vn_min_rate;
2333         }
2334
2335         /* ... only if all min rates are zeros - disable fairness */
2336         if (all_zero) {
2337                 bp->cmng.flags.cmng_enables &=
2338                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2339                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2340                    "  fairness will be disabled\n");
2341         } else
2342                 bp->cmng.flags.cmng_enables |=
2343                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2344 }
2345
2346 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2347 {
2348         struct rate_shaping_vars_per_vn m_rs_vn;
2349         struct fairness_vars_per_vn m_fair_vn;
2350         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2351         u16 vn_min_rate, vn_max_rate;
2352         int i;
2353
2354         /* If function is hidden - set min and max to zeroes */
2355         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2356                 vn_min_rate = 0;
2357                 vn_max_rate = 0;
2358
2359         } else {
2360                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2361                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2362                 /* If min rate is zero - set it to 1 */
2363                 if (!vn_min_rate)
2364                         vn_min_rate = DEF_MIN_RATE;
2365                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2366                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2367         }
2368         DP(NETIF_MSG_IFUP,
2369            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2370            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2371
2372         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2373         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2374
2375         /* global vn counter - maximal Mbps for this vn */
2376         m_rs_vn.vn_counter.rate = vn_max_rate;
2377
2378         /* quota - number of bytes transmitted in this period */
2379         m_rs_vn.vn_counter.quota =
2380                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2381
2382         if (bp->vn_weight_sum) {
2383                 /* credit for each period of the fairness algorithm:
2384                    number of bytes in T_FAIR (the vn share the port rate).
2385                    vn_weight_sum should not be larger than 10000, thus
2386                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2387                    than zero */
2388                 m_fair_vn.vn_credit_delta =
2389                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2390                                                  (8 * bp->vn_weight_sum))),
2391                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2392                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2393                    m_fair_vn.vn_credit_delta);
2394         }
2395
2396         /* Store it to internal memory */
2397         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2398                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2399                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2400                        ((u32 *)(&m_rs_vn))[i]);
2401
2402         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2403                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2404                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2405                        ((u32 *)(&m_fair_vn))[i]);
2406 }
2407
2408
2409 /* This function is called upon link interrupt */
2410 static void bnx2x_link_attn(struct bnx2x *bp)
2411 {
2412         /* Make sure that we are synced with the current statistics */
2413         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2414
2415         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2416
2417         if (bp->link_vars.link_up) {
2418
2419                 /* dropless flow control */
2420                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2421                         int port = BP_PORT(bp);
2422                         u32 pause_enabled = 0;
2423
2424                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2425                                 pause_enabled = 1;
2426
2427                         REG_WR(bp, BAR_USTRORM_INTMEM +
2428                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2429                                pause_enabled);
2430                 }
2431
2432                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2433                         struct host_port_stats *pstats;
2434
2435                         pstats = bnx2x_sp(bp, port_stats);
2436                         /* reset old bmac stats */
2437                         memset(&(pstats->mac_stx[0]), 0,
2438                                sizeof(struct mac_stx));
2439                 }
2440                 if ((bp->state == BNX2X_STATE_OPEN) ||
2441                     (bp->state == BNX2X_STATE_DISABLED))
2442                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443         }
2444
2445         /* indicate link status */
2446         bnx2x_link_report(bp);
2447
2448         if (IS_E1HMF(bp)) {
2449                 int port = BP_PORT(bp);
2450                 int func;
2451                 int vn;
2452
2453                 /* Set the attention towards other drivers on the same port */
2454                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455                         if (vn == BP_E1HVN(bp))
2456                                 continue;
2457
2458                         func = ((vn << 1) | port);
2459                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2461                 }
2462
2463                 if (bp->link_vars.link_up) {
2464                         int i;
2465
2466                         /* Init rate shaping and fairness contexts */
2467                         bnx2x_init_port_minmax(bp);
2468
2469                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2470                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2471
2472                         /* Store it to internal memory */
2473                         for (i = 0;
2474                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477                                        ((u32 *)(&bp->cmng))[i]);
2478                 }
2479         }
2480 }
2481
2482 static void bnx2x__link_status_update(struct bnx2x *bp)
2483 {
2484         int func = BP_FUNC(bp);
2485
2486         if (bp->state != BNX2X_STATE_OPEN)
2487                 return;
2488
2489         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2490
2491         if (bp->link_vars.link_up)
2492                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2493         else
2494                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2495
2496         bnx2x_calc_vn_weight_sum(bp);
2497
2498         /* indicate link status */
2499         bnx2x_link_report(bp);
2500 }
2501
2502 static void bnx2x_pmf_update(struct bnx2x *bp)
2503 {
2504         int port = BP_PORT(bp);
2505         u32 val;
2506
2507         bp->port.pmf = 1;
2508         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2509
2510         /* enable nig attention */
2511         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2512         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2513         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2514
2515         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2516 }
2517
2518 /* end of Link */
2519
2520 /* slow path */
2521
2522 /*
2523  * General service functions
2524  */
2525
2526 /* send the MCP a request, block until there is a reply */
2527 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2528 {
2529         int func = BP_FUNC(bp);
2530         u32 seq = ++bp->fw_seq;
2531         u32 rc = 0;
2532         u32 cnt = 1;
2533         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2534
2535         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537
2538         do {
2539                 /* let the FW do it's magic ... */
2540                 msleep(delay);
2541
2542                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2543
2544                 /* Give the FW up to 2 second (200*10ms) */
2545         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2546
2547         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548            cnt*delay, rc, seq);
2549
2550         /* is this a reply to our command? */
2551         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552                 rc &= FW_MSG_CODE_MASK;
2553         else {
2554                 /* FW BUG! */
2555                 BNX2X_ERR("FW failed to respond!\n");
2556                 bnx2x_fw_dump(bp);
2557                 rc = 0;
2558         }
2559
2560         return rc;
2561 }
2562
2563 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565 static void bnx2x_set_rx_mode(struct net_device *dev);
2566
2567 static void bnx2x_e1h_disable(struct bnx2x *bp)
2568 {
2569         int port = BP_PORT(bp);
2570         int i;
2571
2572         bp->rx_mode = BNX2X_RX_MODE_NONE;
2573         bnx2x_set_storm_rx_mode(bp);
2574
2575         netif_tx_disable(bp->dev);
2576         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2577
2578         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2579
2580         bnx2x_set_eth_mac_addr_e1h(bp, 0);
2581
2582         for (i = 0; i < MC_HASH_SIZE; i++)
2583                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2584
2585         netif_carrier_off(bp->dev);
2586 }
2587
2588 static void bnx2x_e1h_enable(struct bnx2x *bp)
2589 {
2590         int port = BP_PORT(bp);
2591
2592         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2593
2594         bnx2x_set_eth_mac_addr_e1h(bp, 1);
2595
2596         /* Tx queue should be only reenabled */
2597         netif_tx_wake_all_queues(bp->dev);
2598
2599         /* Initialize the receive filter. */
2600         bnx2x_set_rx_mode(bp->dev);
2601 }
2602
2603 static void bnx2x_update_min_max(struct bnx2x *bp)
2604 {
2605         int port = BP_PORT(bp);
2606         int vn, i;
2607
2608         /* Init rate shaping and fairness contexts */
2609         bnx2x_init_port_minmax(bp);
2610
2611         bnx2x_calc_vn_weight_sum(bp);
2612
2613         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2614                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2615
2616         if (bp->port.pmf) {
2617                 int func;
2618
2619                 /* Set the attention towards other drivers on the same port */
2620                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2621                         if (vn == BP_E1HVN(bp))
2622                                 continue;
2623
2624                         func = ((vn << 1) | port);
2625                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2626                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2627                 }
2628
2629                 /* Store it to internal memory */
2630                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2631                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2632                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2633                                ((u32 *)(&bp->cmng))[i]);
2634         }
2635 }
2636
2637 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2638 {
2639         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2640
2641         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2642
2643                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2644                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2645                         bp->state = BNX2X_STATE_DISABLED;
2646
2647                         bnx2x_e1h_disable(bp);
2648                 } else {
2649                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2650                         bp->state = BNX2X_STATE_OPEN;
2651
2652                         bnx2x_e1h_enable(bp);
2653                 }
2654                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2655         }
2656         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2657
2658                 bnx2x_update_min_max(bp);
2659                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2660         }
2661
2662         /* Report results to MCP */
2663         if (dcc_event)
2664                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2665         else
2666                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2667 }
2668
2669 /* must be called under the spq lock */
2670 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2671 {
2672         struct eth_spe *next_spe = bp->spq_prod_bd;
2673
2674         if (bp->spq_prod_bd == bp->spq_last_bd) {
2675                 bp->spq_prod_bd = bp->spq;
2676                 bp->spq_prod_idx = 0;
2677                 DP(NETIF_MSG_TIMER, "end of spq\n");
2678         } else {
2679                 bp->spq_prod_bd++;
2680                 bp->spq_prod_idx++;
2681         }
2682         return next_spe;
2683 }
2684
2685 /* must be called under the spq lock */
2686 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2687 {
2688         int func = BP_FUNC(bp);
2689
2690         /* Make sure that BD data is updated before writing the producer */
2691         wmb();
2692
2693         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2694                bp->spq_prod_idx);
2695         mmiowb();
2696 }
2697
2698 /* the slow path queue is odd since completions arrive on the fastpath ring */
2699 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2700                          u32 data_hi, u32 data_lo, int common)
2701 {
2702         struct eth_spe *spe;
2703
2704         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2705            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2706            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2707            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2708            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2709
2710 #ifdef BNX2X_STOP_ON_ERROR
2711         if (unlikely(bp->panic))
2712                 return -EIO;
2713 #endif
2714
2715         spin_lock_bh(&bp->spq_lock);
2716
2717         if (!bp->spq_left) {
2718                 BNX2X_ERR("BUG! SPQ ring full!\n");
2719                 spin_unlock_bh(&bp->spq_lock);
2720                 bnx2x_panic();
2721                 return -EBUSY;
2722         }
2723
2724         spe = bnx2x_sp_get_next(bp);
2725
2726         /* CID needs port number to be encoded int it */
2727         spe->hdr.conn_and_cmd_data =
2728                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2729                                      HW_CID(bp, cid)));
2730         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2731         if (common)
2732                 spe->hdr.type |=
2733                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2734
2735         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2736         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2737
2738         bp->spq_left--;
2739
2740         bnx2x_sp_prod_update(bp);
2741         spin_unlock_bh(&bp->spq_lock);
2742         return 0;
2743 }
2744
2745 /* acquire split MCP access lock register */
2746 static int bnx2x_acquire_alr(struct bnx2x *bp)
2747 {
2748         u32 i, j, val;
2749         int rc = 0;
2750
2751         might_sleep();
2752         i = 100;
2753         for (j = 0; j < i*10; j++) {
2754                 val = (1UL << 31);
2755                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2756                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2757                 if (val & (1L << 31))
2758                         break;
2759
2760                 msleep(5);
2761         }
2762         if (!(val & (1L << 31))) {
2763                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2764                 rc = -EBUSY;
2765         }
2766
2767         return rc;
2768 }
2769
2770 /* release split MCP access lock register */
2771 static void bnx2x_release_alr(struct bnx2x *bp)
2772 {
2773         u32 val = 0;
2774
2775         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2776 }
2777
2778 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2779 {
2780         struct host_def_status_block *def_sb = bp->def_status_blk;
2781         u16 rc = 0;
2782
2783         barrier(); /* status block is written to by the chip */
2784         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2785                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2786                 rc |= 1;
2787         }
2788         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2789                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2790                 rc |= 2;
2791         }
2792         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2793                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2794                 rc |= 4;
2795         }
2796         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2797                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2798                 rc |= 8;
2799         }
2800         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2801                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2802                 rc |= 16;
2803         }
2804         return rc;
2805 }
2806
2807 /*
2808  * slow path service functions
2809  */
2810
2811 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2812 {
2813         int port = BP_PORT(bp);
2814         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2815                        COMMAND_REG_ATTN_BITS_SET);
2816         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2818         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2819                                        NIG_REG_MASK_INTERRUPT_PORT0;
2820         u32 aeu_mask;
2821         u32 nig_mask = 0;
2822
2823         if (bp->attn_state & asserted)
2824                 BNX2X_ERR("IGU ERROR\n");
2825
2826         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2827         aeu_mask = REG_RD(bp, aeu_addr);
2828
2829         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2830            aeu_mask, asserted);
2831         aeu_mask &= ~(asserted & 0xff);
2832         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2833
2834         REG_WR(bp, aeu_addr, aeu_mask);
2835         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2836
2837         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2838         bp->attn_state |= asserted;
2839         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2840
2841         if (asserted & ATTN_HARD_WIRED_MASK) {
2842                 if (asserted & ATTN_NIG_FOR_FUNC) {
2843
2844                         bnx2x_acquire_phy_lock(bp);
2845
2846                         /* save nig interrupt mask */
2847                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2848                         REG_WR(bp, nig_int_mask_addr, 0);
2849
2850                         bnx2x_link_attn(bp);
2851
2852                         /* handle unicore attn? */
2853                 }
2854                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2855                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2856
2857                 if (asserted & GPIO_2_FUNC)
2858                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2859
2860                 if (asserted & GPIO_3_FUNC)
2861                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2862
2863                 if (asserted & GPIO_4_FUNC)
2864                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2865
2866                 if (port == 0) {
2867                         if (asserted & ATTN_GENERAL_ATTN_1) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_2) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2874                         }
2875                         if (asserted & ATTN_GENERAL_ATTN_3) {
2876                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2877                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2878                         }
2879                 } else {
2880                         if (asserted & ATTN_GENERAL_ATTN_4) {
2881                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2882                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2883                         }
2884                         if (asserted & ATTN_GENERAL_ATTN_5) {
2885                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2886                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2887                         }
2888                         if (asserted & ATTN_GENERAL_ATTN_6) {
2889                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2890                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2891                         }
2892                 }
2893
2894         } /* if hardwired */
2895
2896         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2897            asserted, hc_addr);
2898         REG_WR(bp, hc_addr, asserted);
2899
2900         /* now set back the mask */
2901         if (asserted & ATTN_NIG_FOR_FUNC) {
2902                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2903                 bnx2x_release_phy_lock(bp);
2904         }
2905 }
2906
2907 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2908 {
2909         int port = BP_PORT(bp);
2910
2911         /* mark the failure */
2912         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2913         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2914         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2915                  bp->link_params.ext_phy_config);
2916
2917         /* log the failure */
2918         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2919                " the driver to shutdown the card to prevent permanent"
2920                " damage.  Please contact Dell Support for assistance\n",
2921                bp->dev->name);
2922 }
2923
2924 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2925 {
2926         int port = BP_PORT(bp);
2927         int reg_offset;
2928         u32 val, swap_val, swap_override;
2929
2930         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2931                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2932
2933         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2934
2935                 val = REG_RD(bp, reg_offset);
2936                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2937                 REG_WR(bp, reg_offset, val);
2938
2939                 BNX2X_ERR("SPIO5 hw attention\n");
2940
2941                 /* Fan failure attention */
2942                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2943                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2944                         /* Low power mode is controlled by GPIO 2 */
2945                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2946                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2947                         /* The PHY reset is controlled by GPIO 1 */
2948                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2949                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2950                         break;
2951
2952                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2953                         /* The PHY reset is controlled by GPIO 1 */
2954                         /* fake the port number to cancel the swap done in
2955                            set_gpio() */
2956                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2957                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2958                         port = (swap_val && swap_override) ^ 1;
2959                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2960                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2961                         break;
2962
2963                 default:
2964                         break;
2965                 }
2966                 bnx2x_fan_failure(bp);
2967         }
2968
2969         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2970                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2971                 bnx2x_acquire_phy_lock(bp);
2972                 bnx2x_handle_module_detect_int(&bp->link_params);
2973                 bnx2x_release_phy_lock(bp);
2974         }
2975
2976         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2977
2978                 val = REG_RD(bp, reg_offset);
2979                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2980                 REG_WR(bp, reg_offset, val);
2981
2982                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2983                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2984                 bnx2x_panic();
2985         }
2986 }
2987
2988 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2989 {
2990         u32 val;
2991
2992         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2993
2994                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2995                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2996                 /* DORQ discard attention */
2997                 if (val & 0x2)
2998                         BNX2X_ERR("FATAL error from DORQ\n");
2999         }
3000
3001         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3002
3003                 int port = BP_PORT(bp);
3004                 int reg_offset;
3005
3006                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3007                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3008
3009                 val = REG_RD(bp, reg_offset);
3010                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3011                 REG_WR(bp, reg_offset, val);
3012
3013                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3014                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3015                 bnx2x_panic();
3016         }
3017 }
3018
3019 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3020 {
3021         u32 val;
3022
3023         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3024
3025                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3026                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3027                 /* CFC error attention */
3028                 if (val & 0x2)
3029                         BNX2X_ERR("FATAL error from CFC\n");
3030         }
3031
3032         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3033
3034                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3035                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3036                 /* RQ_USDMDP_FIFO_OVERFLOW */
3037                 if (val & 0x18000)
3038                         BNX2X_ERR("FATAL error from PXP\n");
3039         }
3040
3041         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3042
3043                 int port = BP_PORT(bp);
3044                 int reg_offset;
3045
3046                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3047                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3048
3049                 val = REG_RD(bp, reg_offset);
3050                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3051                 REG_WR(bp, reg_offset, val);
3052
3053                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3054                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3055                 bnx2x_panic();
3056         }
3057 }
3058
3059 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3060 {
3061         u32 val;
3062
3063         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3064
3065                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3066                         int func = BP_FUNC(bp);
3067
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3069                         bp->mf_config = SHMEM_RD(bp,
3070                                            mf_cfg.func_mf_config[func].config);
3071                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3072                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3073                                 bnx2x_dcc_event(bp,
3074                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3075                         bnx2x__link_status_update(bp);
3076                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3077                                 bnx2x_pmf_update(bp);
3078
3079                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3080
3081                         BNX2X_ERR("MC assert!\n");
3082                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3083                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3084                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3085                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3086                         bnx2x_panic();
3087
3088                 } else if (attn & BNX2X_MCP_ASSERT) {
3089
3090                         BNX2X_ERR("MCP assert!\n");
3091                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3092                         bnx2x_fw_dump(bp);
3093
3094                 } else
3095                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3096         }
3097
3098         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3099                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3100                 if (attn & BNX2X_GRC_TIMEOUT) {
3101                         val = CHIP_IS_E1H(bp) ?
3102                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3103                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3104                 }
3105                 if (attn & BNX2X_GRC_RSV) {
3106                         val = CHIP_IS_E1H(bp) ?
3107                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3108                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3109                 }
3110                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3111         }
3112 }
3113
3114 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3115 {
3116         struct attn_route attn;
3117         struct attn_route group_mask;
3118         int port = BP_PORT(bp);
3119         int index;
3120         u32 reg_addr;
3121         u32 val;
3122         u32 aeu_mask;
3123
3124         /* need to take HW lock because MCP or other port might also
3125            try to handle this event */
3126         bnx2x_acquire_alr(bp);
3127
3128         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3129         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3130         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3131         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3132         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3133            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3134
3135         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3136                 if (deasserted & (1 << index)) {
3137                         group_mask = bp->attn_group[index];
3138
3139                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3140                            index, group_mask.sig[0], group_mask.sig[1],
3141                            group_mask.sig[2], group_mask.sig[3]);
3142
3143                         bnx2x_attn_int_deasserted3(bp,
3144                                         attn.sig[3] & group_mask.sig[3]);
3145                         bnx2x_attn_int_deasserted1(bp,
3146                                         attn.sig[1] & group_mask.sig[1]);
3147                         bnx2x_attn_int_deasserted2(bp,
3148                                         attn.sig[2] & group_mask.sig[2]);
3149                         bnx2x_attn_int_deasserted0(bp,
3150                                         attn.sig[0] & group_mask.sig[0]);
3151
3152                         if ((attn.sig[0] & group_mask.sig[0] &
3153                                                 HW_PRTY_ASSERT_SET_0) ||
3154                             (attn.sig[1] & group_mask.sig[1] &
3155                                                 HW_PRTY_ASSERT_SET_1) ||
3156                             (attn.sig[2] & group_mask.sig[2] &
3157                                                 HW_PRTY_ASSERT_SET_2))
3158                                 BNX2X_ERR("FATAL HW block parity attention\n");
3159                 }
3160         }
3161
3162         bnx2x_release_alr(bp);
3163
3164         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3165
3166         val = ~deasserted;
3167         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3168            val, reg_addr);
3169         REG_WR(bp, reg_addr, val);
3170
3171         if (~bp->attn_state & deasserted)
3172                 BNX2X_ERR("IGU ERROR\n");
3173
3174         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3175                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3176
3177         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3178         aeu_mask = REG_RD(bp, reg_addr);
3179
3180         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3181            aeu_mask, deasserted);
3182         aeu_mask |= (deasserted & 0xff);
3183         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3184
3185         REG_WR(bp, reg_addr, aeu_mask);
3186         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3187
3188         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3189         bp->attn_state &= ~deasserted;
3190         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3191 }
3192
3193 static void bnx2x_attn_int(struct bnx2x *bp)
3194 {
3195         /* read local copy of bits */
3196         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3197                                                                 attn_bits);
3198         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3199                                                                 attn_bits_ack);
3200         u32 attn_state = bp->attn_state;
3201
3202         /* look for changed bits */
3203         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3204         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3205
3206         DP(NETIF_MSG_HW,
3207            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3208            attn_bits, attn_ack, asserted, deasserted);
3209
3210         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3211                 BNX2X_ERR("BAD attention state\n");
3212
3213         /* handle bits that were raised */
3214         if (asserted)
3215                 bnx2x_attn_int_asserted(bp, asserted);
3216
3217         if (deasserted)
3218                 bnx2x_attn_int_deasserted(bp, deasserted);
3219 }
3220
3221 static void bnx2x_sp_task(struct work_struct *work)
3222 {
3223         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3224         u16 status;
3225
3226
3227         /* Return here if interrupt is disabled */
3228         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3229                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3230                 return;
3231         }
3232
3233         status = bnx2x_update_dsb_idx(bp);
3234 /*      if (status == 0)                                     */
3235 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3236
3237         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3238
3239         /* HW attentions */
3240         if (status & 0x1)
3241                 bnx2x_attn_int(bp);
3242
3243         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3244                      IGU_INT_NOP, 1);
3245         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3246                      IGU_INT_NOP, 1);
3247         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3248                      IGU_INT_NOP, 1);
3249         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3250                      IGU_INT_NOP, 1);
3251         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3252                      IGU_INT_ENABLE, 1);
3253
3254 }
3255
3256 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3257 {
3258         struct net_device *dev = dev_instance;
3259         struct bnx2x *bp = netdev_priv(dev);
3260
3261         /* Return here if interrupt is disabled */
3262         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3263                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3264                 return IRQ_HANDLED;
3265         }
3266
3267         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3268
3269 #ifdef BNX2X_STOP_ON_ERROR
3270         if (unlikely(bp->panic))
3271                 return IRQ_HANDLED;
3272 #endif
3273
3274 #ifdef BCM_CNIC
3275         {
3276                 struct cnic_ops *c_ops;
3277
3278                 rcu_read_lock();
3279                 c_ops = rcu_dereference(bp->cnic_ops);
3280                 if (c_ops)
3281                         c_ops->cnic_handler(bp->cnic_data, NULL);
3282                 rcu_read_unlock();
3283         }
3284 #endif
3285         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3286
3287         return IRQ_HANDLED;
3288 }
3289
3290 /* end of slow path */
3291
3292 /* Statistics */
3293
3294 /****************************************************************************
3295 * Macros
3296 ****************************************************************************/
3297
3298 /* sum[hi:lo] += add[hi:lo] */
3299 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3300         do { \
3301                 s_lo += a_lo; \
3302                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3303         } while (0)
3304
3305 /* difference = minuend - subtrahend */
3306 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3307         do { \
3308                 if (m_lo < s_lo) { \
3309                         /* underflow */ \
3310                         d_hi = m_hi - s_hi; \
3311                         if (d_hi > 0) { \
3312                                 /* we can 'loan' 1 */ \
3313                                 d_hi--; \
3314                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3315                         } else { \
3316                                 /* m_hi <= s_hi */ \
3317                                 d_hi = 0; \
3318                                 d_lo = 0; \
3319                         } \
3320                 } else { \
3321                         /* m_lo >= s_lo */ \
3322                         if (m_hi < s_hi) { \
3323                                 d_hi = 0; \
3324                                 d_lo = 0; \
3325                         } else { \
3326                                 /* m_hi >= s_hi */ \
3327                                 d_hi = m_hi - s_hi; \
3328                                 d_lo = m_lo - s_lo; \
3329                         } \
3330                 } \
3331         } while (0)
3332
3333 #define UPDATE_STAT64(s, t) \
3334         do { \
3335                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3336                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3337                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3338                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3339                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3340                        pstats->mac_stx[1].t##_lo, diff.lo); \
3341         } while (0)
3342
3343 #define UPDATE_STAT64_NIG(s, t) \
3344         do { \
3345                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3346                         diff.lo, new->s##_lo, old->s##_lo); \
3347                 ADD_64(estats->t##_hi, diff.hi, \
3348                        estats->t##_lo, diff.lo); \
3349         } while (0)
3350
3351 /* sum[hi:lo] += add */
3352 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3353         do { \
3354                 s_lo += a; \
3355                 s_hi += (s_lo < a) ? 1 : 0; \
3356         } while (0)
3357
3358 #define UPDATE_EXTEND_STAT(s) \
3359         do { \
3360                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3361                               pstats->mac_stx[1].s##_lo, \
3362                               new->s); \
3363         } while (0)
3364
3365 #define UPDATE_EXTEND_TSTAT(s, t) \
3366         do { \
3367                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3368                 old_tclient->s = tclient->s; \
3369                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3370         } while (0)
3371
3372 #define UPDATE_EXTEND_USTAT(s, t) \
3373         do { \
3374                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3375                 old_uclient->s = uclient->s; \
3376                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3377         } while (0)
3378
3379 #define UPDATE_EXTEND_XSTAT(s, t) \
3380         do { \
3381                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3382                 old_xclient->s = xclient->s; \
3383                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3384         } while (0)
3385
3386 /* minuend -= subtrahend */
3387 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3388         do { \
3389                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3390         } while (0)
3391
3392 /* minuend[hi:lo] -= subtrahend */
3393 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3394         do { \
3395                 SUB_64(m_hi, 0, m_lo, s); \
3396         } while (0)
3397
3398 #define SUB_EXTEND_USTAT(s, t) \
3399         do { \
3400                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3401                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3402         } while (0)
3403
3404 /*
3405  * General service functions
3406  */
3407
3408 static inline long bnx2x_hilo(u32 *hiref)
3409 {
3410         u32 lo = *(hiref + 1);
3411 #if (BITS_PER_LONG == 64)
3412         u32 hi = *hiref;
3413
3414         return HILO_U64(hi, lo);
3415 #else
3416         return lo;
3417 #endif
3418 }
3419
3420 /*
3421  * Init service functions
3422  */
3423
3424 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3425 {
3426         if (!bp->stats_pending) {
3427                 struct eth_query_ramrod_data ramrod_data = {0};
3428                 int i, rc;
3429
3430                 ramrod_data.drv_counter = bp->stats_counter++;
3431                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3432                 for_each_queue(bp, i)
3433                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3434
3435                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3436                                    ((u32 *)&ramrod_data)[1],
3437                                    ((u32 *)&ramrod_data)[0], 0);
3438                 if (rc == 0) {
3439                         /* stats ramrod has it's own slot on the spq */
3440                         bp->spq_left++;
3441                         bp->stats_pending = 1;
3442                 }
3443         }
3444 }
3445
3446 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3447 {
3448         struct dmae_command *dmae = &bp->stats_dmae;
3449         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3450
3451         *stats_comp = DMAE_COMP_VAL;
3452         if (CHIP_REV_IS_SLOW(bp))
3453                 return;
3454
3455         /* loader */
3456         if (bp->executer_idx) {
3457                 int loader_idx = PMF_DMAE_C(bp);
3458
3459                 memset(dmae, 0, sizeof(struct dmae_command));
3460
3461                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3462                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3463                                 DMAE_CMD_DST_RESET |
3464 #ifdef __BIG_ENDIAN
3465                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3466 #else
3467                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3468 #endif
3469                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3470                                                DMAE_CMD_PORT_0) |
3471                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3472                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3473                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3474                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3475                                      sizeof(struct dmae_command) *
3476                                      (loader_idx + 1)) >> 2;
3477                 dmae->dst_addr_hi = 0;
3478                 dmae->len = sizeof(struct dmae_command) >> 2;
3479                 if (CHIP_IS_E1(bp))
3480                         dmae->len--;
3481                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3482                 dmae->comp_addr_hi = 0;
3483                 dmae->comp_val = 1;
3484
3485                 *stats_comp = 0;
3486                 bnx2x_post_dmae(bp, dmae, loader_idx);
3487
3488         } else if (bp->func_stx) {
3489                 *stats_comp = 0;
3490                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3491         }
3492 }
3493
3494 static int bnx2x_stats_comp(struct bnx2x *bp)
3495 {
3496         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3497         int cnt = 10;
3498
3499         might_sleep();
3500         while (*stats_comp != DMAE_COMP_VAL) {
3501                 if (!cnt) {
3502                         BNX2X_ERR("timeout waiting for stats finished\n");
3503                         break;
3504                 }
3505                 cnt--;
3506                 msleep(1);
3507         }
3508         return 1;
3509 }
3510
3511 /*
3512  * Statistics service functions
3513  */
3514
3515 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3516 {
3517         struct dmae_command *dmae;
3518         u32 opcode;
3519         int loader_idx = PMF_DMAE_C(bp);
3520         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3521
3522         /* sanity */
3523         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3524                 BNX2X_ERR("BUG!\n");
3525                 return;
3526         }
3527
3528         bp->executer_idx = 0;
3529
3530         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3531                   DMAE_CMD_C_ENABLE |
3532                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3533 #ifdef __BIG_ENDIAN
3534                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3535 #else
3536                   DMAE_CMD_ENDIANITY_DW_SWAP |
3537 #endif
3538                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3539                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3540
3541         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3542         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3543         dmae->src_addr_lo = bp->port.port_stx >> 2;
3544         dmae->src_addr_hi = 0;
3545         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3546         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3547         dmae->len = DMAE_LEN32_RD_MAX;
3548         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3549         dmae->comp_addr_hi = 0;
3550         dmae->comp_val = 1;
3551
3552         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3553         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3554         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3555         dmae->src_addr_hi = 0;
3556         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3557                                    DMAE_LEN32_RD_MAX * 4);
3558         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3559                                    DMAE_LEN32_RD_MAX * 4);
3560         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3561         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3562         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3563         dmae->comp_val = DMAE_COMP_VAL;
3564
3565         *stats_comp = 0;
3566         bnx2x_hw_stats_post(bp);
3567         bnx2x_stats_comp(bp);
3568 }
3569
3570 static void bnx2x_port_stats_init(struct bnx2x *bp)
3571 {
3572         struct dmae_command *dmae;
3573         int port = BP_PORT(bp);
3574         int vn = BP_E1HVN(bp);
3575         u32 opcode;
3576         int loader_idx = PMF_DMAE_C(bp);
3577         u32 mac_addr;
3578         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3579
3580         /* sanity */
3581         if (!bp->link_vars.link_up || !bp->port.pmf) {
3582                 BNX2X_ERR("BUG!\n");
3583                 return;
3584         }
3585
3586         bp->executer_idx = 0;
3587
3588         /* MCP */
3589         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3590                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3591                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3592 #ifdef __BIG_ENDIAN
3593                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3594 #else
3595                   DMAE_CMD_ENDIANITY_DW_SWAP |
3596 #endif
3597                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3598                   (vn << DMAE_CMD_E1HVN_SHIFT));
3599
3600         if (bp->port.port_stx) {
3601
3602                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3603                 dmae->opcode = opcode;
3604                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3605                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3606                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3607                 dmae->dst_addr_hi = 0;
3608                 dmae->len = sizeof(struct host_port_stats) >> 2;
3609                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3610                 dmae->comp_addr_hi = 0;
3611                 dmae->comp_val = 1;
3612         }
3613
3614         if (bp->func_stx) {
3615
3616                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617                 dmae->opcode = opcode;
3618                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3619                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3620                 dmae->dst_addr_lo = bp->func_stx >> 2;
3621                 dmae->dst_addr_hi = 0;
3622                 dmae->len = sizeof(struct host_func_stats) >> 2;
3623                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624                 dmae->comp_addr_hi = 0;
3625                 dmae->comp_val = 1;
3626         }
3627
3628         /* MAC */
3629         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3630                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3631                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3632 #ifdef __BIG_ENDIAN
3633                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3634 #else
3635                   DMAE_CMD_ENDIANITY_DW_SWAP |
3636 #endif
3637                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3638                   (vn << DMAE_CMD_E1HVN_SHIFT));
3639
3640         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3641
3642                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3643                                    NIG_REG_INGRESS_BMAC0_MEM);
3644
3645                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3646                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3647                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648                 dmae->opcode = opcode;
3649                 dmae->src_addr_lo = (mac_addr +
3650                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3651                 dmae->src_addr_hi = 0;
3652                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3655                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3656                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657                 dmae->comp_addr_hi = 0;
3658                 dmae->comp_val = 1;
3659
3660                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3661                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3662                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3663                 dmae->opcode = opcode;
3664                 dmae->src_addr_lo = (mac_addr +
3665                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666                 dmae->src_addr_hi = 0;
3667                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3668                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3669                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3670                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3671                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3672                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3673                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674                 dmae->comp_addr_hi = 0;
3675                 dmae->comp_val = 1;
3676
3677         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3678
3679                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3680
3681                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3682                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3683                 dmae->opcode = opcode;
3684                 dmae->src_addr_lo = (mac_addr +
3685                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3686                 dmae->src_addr_hi = 0;
3687                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3688                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3689                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3690                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3691                 dmae->comp_addr_hi = 0;
3692                 dmae->comp_val = 1;
3693
3694                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3695                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696                 dmae->opcode = opcode;
3697                 dmae->src_addr_lo = (mac_addr +
3698                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3699                 dmae->src_addr_hi = 0;
3700                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3701                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3702                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3703                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3704                 dmae->len = 1;
3705                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706                 dmae->comp_addr_hi = 0;
3707                 dmae->comp_val = 1;
3708
3709                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3710                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711                 dmae->opcode = opcode;
3712                 dmae->src_addr_lo = (mac_addr +
3713                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3714                 dmae->src_addr_hi = 0;
3715                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3716                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3717                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3718                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3719                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3720                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721                 dmae->comp_addr_hi = 0;
3722                 dmae->comp_val = 1;
3723         }
3724
3725         /* NIG */
3726         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3727         dmae->opcode = opcode;
3728         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3729                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3730         dmae->src_addr_hi = 0;
3731         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3732         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3733         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3734         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735         dmae->comp_addr_hi = 0;
3736         dmae->comp_val = 1;
3737
3738         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3739         dmae->opcode = opcode;
3740         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3741                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3742         dmae->src_addr_hi = 0;
3743         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3744                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3745         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3746                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3747         dmae->len = (2*sizeof(u32)) >> 2;
3748         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749         dmae->comp_addr_hi = 0;
3750         dmae->comp_val = 1;
3751
3752         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3754                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3755                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3756 #ifdef __BIG_ENDIAN
3757                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3758 #else
3759                         DMAE_CMD_ENDIANITY_DW_SWAP |
3760 #endif
3761                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3762                         (vn << DMAE_CMD_E1HVN_SHIFT));
3763         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3764                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3765         dmae->src_addr_hi = 0;
3766         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3767                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3768         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3769                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3770         dmae->len = (2*sizeof(u32)) >> 2;
3771         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3772         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3773         dmae->comp_val = DMAE_COMP_VAL;
3774
3775         *stats_comp = 0;
3776 }
3777
3778 static void bnx2x_func_stats_init(struct bnx2x *bp)
3779 {
3780         struct dmae_command *dmae = &bp->stats_dmae;
3781         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3782
3783         /* sanity */
3784         if (!bp->func_stx) {
3785                 BNX2X_ERR("BUG!\n");
3786                 return;
3787         }
3788
3789         bp->executer_idx = 0;
3790         memset(dmae, 0, sizeof(struct dmae_command));
3791
3792         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3793                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3794                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3795 #ifdef __BIG_ENDIAN
3796                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3797 #else
3798                         DMAE_CMD_ENDIANITY_DW_SWAP |
3799 #endif
3800                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3801                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3802         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3803         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3804         dmae->dst_addr_lo = bp->func_stx >> 2;
3805         dmae->dst_addr_hi = 0;
3806         dmae->len = sizeof(struct host_func_stats) >> 2;
3807         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3808         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3809         dmae->comp_val = DMAE_COMP_VAL;
3810
3811         *stats_comp = 0;
3812 }
3813
3814 static void bnx2x_stats_start(struct bnx2x *bp)
3815 {
3816         if (bp->port.pmf)
3817                 bnx2x_port_stats_init(bp);
3818
3819         else if (bp->func_stx)
3820                 bnx2x_func_stats_init(bp);
3821
3822         bnx2x_hw_stats_post(bp);
3823         bnx2x_storm_stats_post(bp);
3824 }
3825
3826 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3827 {
3828         bnx2x_stats_comp(bp);
3829         bnx2x_stats_pmf_update(bp);
3830         bnx2x_stats_start(bp);
3831 }
3832
3833 static void bnx2x_stats_restart(struct bnx2x *bp)
3834 {
3835         bnx2x_stats_comp(bp);
3836         bnx2x_stats_start(bp);
3837 }
3838
3839 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3840 {
3841         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3842         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3843         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3844         struct {
3845                 u32 lo;
3846                 u32 hi;
3847         } diff;
3848
3849         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3850         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3851         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3852         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3853         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3854         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3855         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3856         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3857         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3858         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3859         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3860         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3861         UPDATE_STAT64(tx_stat_gt127,
3862                                 tx_stat_etherstatspkts65octetsto127octets);
3863         UPDATE_STAT64(tx_stat_gt255,
3864                                 tx_stat_etherstatspkts128octetsto255octets);
3865         UPDATE_STAT64(tx_stat_gt511,
3866                                 tx_stat_etherstatspkts256octetsto511octets);
3867         UPDATE_STAT64(tx_stat_gt1023,
3868                                 tx_stat_etherstatspkts512octetsto1023octets);
3869         UPDATE_STAT64(tx_stat_gt1518,
3870                                 tx_stat_etherstatspkts1024octetsto1522octets);
3871         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3872         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3873         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3874         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3875         UPDATE_STAT64(tx_stat_gterr,
3876                                 tx_stat_dot3statsinternalmactransmiterrors);
3877         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3878
3879         estats->pause_frames_received_hi =
3880                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3881         estats->pause_frames_received_lo =
3882                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3883
3884         estats->pause_frames_sent_hi =
3885                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3886         estats->pause_frames_sent_lo =
3887                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3888 }
3889
3890 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3891 {
3892         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3893         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3894         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895
3896         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3897         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3898         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3899         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3900         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3901         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3902         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3903         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3904         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3905         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3906         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3907         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3908         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3909         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3910         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3911         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3912         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3913         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3914         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3915         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3916         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3917         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3918         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3919         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3920         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3921         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3922         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3923         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3924         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3925         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3926         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3927
3928         estats->pause_frames_received_hi =
3929                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3930         estats->pause_frames_received_lo =
3931                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3932         ADD_64(estats->pause_frames_received_hi,
3933                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3934                estats->pause_frames_received_lo,
3935                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3936
3937         estats->pause_frames_sent_hi =
3938                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3939         estats->pause_frames_sent_lo =
3940                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3941         ADD_64(estats->pause_frames_sent_hi,
3942                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3943                estats->pause_frames_sent_lo,
3944                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3945 }
3946
3947 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3948 {
3949         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3950         struct nig_stats *old = &(bp->port.old_nig_stats);
3951         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3952         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3953         struct {
3954                 u32 lo;
3955                 u32 hi;
3956         } diff;
3957         u32 nig_timer_max;
3958
3959         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3960                 bnx2x_bmac_stats_update(bp);
3961
3962         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3963                 bnx2x_emac_stats_update(bp);
3964
3965         else { /* unreached */
3966                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3967                 return -1;
3968         }
3969
3970         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3971                       new->brb_discard - old->brb_discard);
3972         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3973                       new->brb_truncate - old->brb_truncate);
3974
3975         UPDATE_STAT64_NIG(egress_mac_pkt0,
3976                                         etherstatspkts1024octetsto1522octets);
3977         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3978
3979         memcpy(old, new, sizeof(struct nig_stats));
3980
3981         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3982                sizeof(struct mac_stx));
3983         estats->brb_drop_hi = pstats->brb_drop_hi;
3984         estats->brb_drop_lo = pstats->brb_drop_lo;
3985
3986         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3987
3988         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3989         if (nig_timer_max != estats->nig_timer_max) {
3990                 estats->nig_timer_max = nig_timer_max;
3991                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3992         }
3993
3994         return 0;
3995 }
3996
3997 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3998 {
3999         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4000         struct tstorm_per_port_stats *tport =
4001                                         &stats->tstorm_common.port_statistics;
4002         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4003         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4004         int i;
4005
4006         memcpy(&(fstats->total_bytes_received_hi),
4007                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4008                sizeof(struct host_func_stats) - 2*sizeof(u32));
4009         estats->error_bytes_received_hi = 0;
4010         estats->error_bytes_received_lo = 0;
4011         estats->etherstatsoverrsizepkts_hi = 0;
4012         estats->etherstatsoverrsizepkts_lo = 0;
4013         estats->no_buff_discard_hi = 0;
4014         estats->no_buff_discard_lo = 0;
4015
4016         for_each_rx_queue(bp, i) {
4017                 struct bnx2x_fastpath *fp = &bp->fp[i];
4018                 int cl_id = fp->cl_id;
4019                 struct tstorm_per_client_stats *tclient =
4020                                 &stats->tstorm_common.client_statistics[cl_id];
4021                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4022                 struct ustorm_per_client_stats *uclient =
4023                                 &stats->ustorm_common.client_statistics[cl_id];
4024                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4025                 struct xstorm_per_client_stats *xclient =
4026                                 &stats->xstorm_common.client_statistics[cl_id];
4027                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4028                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4029                 u32 diff;
4030
4031                 /* are storm stats valid? */
4032                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4033                                                         bp->stats_counter) {
4034                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4035                            "  xstorm counter (%d) != stats_counter (%d)\n",
4036                            i, xclient->stats_counter, bp->stats_counter);
4037                         return -1;
4038                 }
4039                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4040                                                         bp->stats_counter) {
4041                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4042                            "  tstorm counter (%d) != stats_counter (%d)\n",
4043                            i, tclient->stats_counter, bp->stats_counter);
4044                         return -2;
4045                 }
4046                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4047                                                         bp->stats_counter) {
4048                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4049                            "  ustorm counter (%d) != stats_counter (%d)\n",
4050                            i, uclient->stats_counter, bp->stats_counter);
4051                         return -4;
4052                 }
4053
4054                 qstats->total_bytes_received_hi =
4055                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4056                 qstats->total_bytes_received_lo =
4057                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4058
4059                 ADD_64(qstats->total_bytes_received_hi,
4060                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4061                        qstats->total_bytes_received_lo,
4062                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4063
4064                 ADD_64(qstats->total_bytes_received_hi,
4065                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4066                        qstats->total_bytes_received_lo,
4067                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4068
4069                 qstats->valid_bytes_received_hi =
4070                                         qstats->total_bytes_received_hi;
4071                 qstats->valid_bytes_received_lo =
4072                                         qstats->total_bytes_received_lo;
4073
4074                 qstats->error_bytes_received_hi =
4075                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4076                 qstats->error_bytes_received_lo =
4077                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4078
4079                 ADD_64(qstats->total_bytes_received_hi,
4080                        qstats->error_bytes_received_hi,
4081                        qstats->total_bytes_received_lo,
4082                        qstats->error_bytes_received_lo);
4083
4084                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4085                                         total_unicast_packets_received);
4086                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4087                                         total_multicast_packets_received);
4088                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4089                                         total_broadcast_packets_received);
4090                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4091                                         etherstatsoverrsizepkts);
4092                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4093
4094                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4095                                         total_unicast_packets_received);
4096                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4097                                         total_multicast_packets_received);
4098                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4099                                         total_broadcast_packets_received);
4100                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4101                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4102                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4103
4104                 qstats->total_bytes_transmitted_hi =
4105                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4106                 qstats->total_bytes_transmitted_lo =
4107                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4108
4109                 ADD_64(qstats->total_bytes_transmitted_hi,
4110                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4111                        qstats->total_bytes_transmitted_lo,
4112                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4113
4114                 ADD_64(qstats->total_bytes_transmitted_hi,
4115                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4116                        qstats->total_bytes_transmitted_lo,
4117                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4118
4119                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4120                                         total_unicast_packets_transmitted);
4121                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4122                                         total_multicast_packets_transmitted);
4123                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4124                                         total_broadcast_packets_transmitted);
4125
4126                 old_tclient->checksum_discard = tclient->checksum_discard;
4127                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4128
4129                 ADD_64(fstats->total_bytes_received_hi,
4130                        qstats->total_bytes_received_hi,
4131                        fstats->total_bytes_received_lo,
4132                        qstats->total_bytes_received_lo);
4133                 ADD_64(fstats->total_bytes_transmitted_hi,
4134                        qstats->total_bytes_transmitted_hi,
4135                        fstats->total_bytes_transmitted_lo,
4136                        qstats->total_bytes_transmitted_lo);
4137                 ADD_64(fstats->total_unicast_packets_received_hi,
4138                        qstats->total_unicast_packets_received_hi,
4139                        fstats->total_unicast_packets_received_lo,
4140                        qstats->total_unicast_packets_received_lo);
4141                 ADD_64(fstats->total_multicast_packets_received_hi,
4142                        qstats->total_multicast_packets_received_hi,
4143                        fstats->total_multicast_packets_received_lo,
4144                        qstats->total_multicast_packets_received_lo);
4145                 ADD_64(fstats->total_broadcast_packets_received_hi,
4146                        qstats->total_broadcast_packets_received_hi,
4147                        fstats->total_broadcast_packets_received_lo,
4148                        qstats->total_broadcast_packets_received_lo);
4149                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4150                        qstats->total_unicast_packets_transmitted_hi,
4151                        fstats->total_unicast_packets_transmitted_lo,
4152                        qstats->total_unicast_packets_transmitted_lo);
4153                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4154                        qstats->total_multicast_packets_transmitted_hi,
4155                        fstats->total_multicast_packets_transmitted_lo,
4156                        qstats->total_multicast_packets_transmitted_lo);
4157                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4158                        qstats->total_broadcast_packets_transmitted_hi,
4159                        fstats->total_broadcast_packets_transmitted_lo,
4160                        qstats->total_broadcast_packets_transmitted_lo);
4161                 ADD_64(fstats->valid_bytes_received_hi,
4162                        qstats->valid_bytes_received_hi,
4163                        fstats->valid_bytes_received_lo,
4164                        qstats->valid_bytes_received_lo);
4165
4166                 ADD_64(estats->error_bytes_received_hi,
4167                        qstats->error_bytes_received_hi,
4168                        estats->error_bytes_received_lo,
4169                        qstats->error_bytes_received_lo);
4170                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171                        qstats->etherstatsoverrsizepkts_hi,
4172                        estats->etherstatsoverrsizepkts_lo,
4173                        qstats->etherstatsoverrsizepkts_lo);
4174                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4175                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4176         }
4177
4178         ADD_64(fstats->total_bytes_received_hi,
4179                estats->rx_stat_ifhcinbadoctets_hi,
4180                fstats->total_bytes_received_lo,
4181                estats->rx_stat_ifhcinbadoctets_lo);
4182
4183         memcpy(estats, &(fstats->total_bytes_received_hi),
4184                sizeof(struct host_func_stats) - 2*sizeof(u32));
4185
4186         ADD_64(estats->etherstatsoverrsizepkts_hi,
4187                estats->rx_stat_dot3statsframestoolong_hi,
4188                estats->etherstatsoverrsizepkts_lo,
4189                estats->rx_stat_dot3statsframestoolong_lo);
4190         ADD_64(estats->error_bytes_received_hi,
4191                estats->rx_stat_ifhcinbadoctets_hi,
4192                estats->error_bytes_received_lo,
4193                estats->rx_stat_ifhcinbadoctets_lo);
4194
4195         if (bp->port.pmf) {
4196                 estats->mac_filter_discard =
4197                                 le32_to_cpu(tport->mac_filter_discard);
4198                 estats->xxoverflow_discard =
4199                                 le32_to_cpu(tport->xxoverflow_discard);
4200                 estats->brb_truncate_discard =
4201                                 le32_to_cpu(tport->brb_truncate_discard);
4202                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4203         }
4204
4205         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4206
4207         bp->stats_pending = 0;
4208
4209         return 0;
4210 }
4211
4212 static void bnx2x_net_stats_update(struct bnx2x *bp)
4213 {
4214         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215         struct net_device_stats *nstats = &bp->dev->stats;
4216         int i;
4217
4218         nstats->rx_packets =
4219                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4220                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4221                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4222
4223         nstats->tx_packets =
4224                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4225                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4226                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4227
4228         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4229
4230         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4231
4232         nstats->rx_dropped = estats->mac_discard;
4233         for_each_rx_queue(bp, i)
4234                 nstats->rx_dropped +=
4235                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4236
4237         nstats->tx_dropped = 0;
4238
4239         nstats->multicast =
4240                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4241
4242         nstats->collisions =
4243                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4244
4245         nstats->rx_length_errors =
4246                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4247                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4248         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4249                                  bnx2x_hilo(&estats->brb_truncate_hi);
4250         nstats->rx_crc_errors =
4251                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4252         nstats->rx_frame_errors =
4253                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4254         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4255         nstats->rx_missed_errors = estats->xxoverflow_discard;
4256
4257         nstats->rx_errors = nstats->rx_length_errors +
4258                             nstats->rx_over_errors +
4259                             nstats->rx_crc_errors +
4260                             nstats->rx_frame_errors +
4261                             nstats->rx_fifo_errors +
4262                             nstats->rx_missed_errors;
4263
4264         nstats->tx_aborted_errors =
4265                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4266                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4267         nstats->tx_carrier_errors =
4268                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4269         nstats->tx_fifo_errors = 0;
4270         nstats->tx_heartbeat_errors = 0;
4271         nstats->tx_window_errors = 0;
4272
4273         nstats->tx_errors = nstats->tx_aborted_errors +
4274                             nstats->tx_carrier_errors +
4275             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4276 }
4277
4278 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4279 {
4280         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4281         int i;
4282
4283         estats->driver_xoff = 0;
4284         estats->rx_err_discard_pkt = 0;
4285         estats->rx_skb_alloc_failed = 0;
4286         estats->hw_csum_err = 0;
4287         for_each_rx_queue(bp, i) {
4288                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4289
4290                 estats->driver_xoff += qstats->driver_xoff;
4291                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4292                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4293                 estats->hw_csum_err += qstats->hw_csum_err;
4294         }
4295 }
4296
4297 static void bnx2x_stats_update(struct bnx2x *bp)
4298 {
4299         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4300
4301         if (*stats_comp != DMAE_COMP_VAL)
4302                 return;
4303
4304         if (bp->port.pmf)
4305                 bnx2x_hw_stats_update(bp);
4306
4307         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4308                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4309                 bnx2x_panic();
4310                 return;
4311         }
4312
4313         bnx2x_net_stats_update(bp);
4314         bnx2x_drv_stats_update(bp);
4315
4316         if (bp->msglevel & NETIF_MSG_TIMER) {
4317                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4318                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4319                 struct tstorm_per_client_stats *old_tclient =
4320                                                         &bp->fp->old_tclient;
4321                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4322                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4323                 struct net_device_stats *nstats = &bp->dev->stats;
4324                 int i;
4325
4326                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4327                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4328                                   "  tx pkt (%lx)\n",
4329                        bnx2x_tx_avail(fp0_tx),
4330                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4331                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4332                                   "  rx pkt (%lx)\n",
4333                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4334                              fp0_rx->rx_comp_cons),
4335                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4336                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4337                                   "brb truncate %u\n",
4338                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4339                        qstats->driver_xoff,
4340                        estats->brb_drop_lo, estats->brb_truncate_lo);
4341                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4342                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4343                         "mac_discard %u  mac_filter_discard %u  "
4344                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4345                         "ttl0_discard %u\n",
4346                        le32_to_cpu(old_tclient->checksum_discard),
4347                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4348                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4349                        estats->mac_discard, estats->mac_filter_discard,
4350                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4351                        le32_to_cpu(old_tclient->ttl0_discard));
4352
4353                 for_each_queue(bp, i) {
4354                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4355                                bnx2x_fp(bp, i, tx_pkt),
4356                                bnx2x_fp(bp, i, rx_pkt),
4357                                bnx2x_fp(bp, i, rx_calls));
4358                 }
4359         }
4360
4361         bnx2x_hw_stats_post(bp);
4362         bnx2x_storm_stats_post(bp);
4363 }
4364
4365 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4366 {
4367         struct dmae_command *dmae;
4368         u32 opcode;
4369         int loader_idx = PMF_DMAE_C(bp);
4370         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4371
4372         bp->executer_idx = 0;
4373
4374         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4375                   DMAE_CMD_C_ENABLE |
4376                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4377 #ifdef __BIG_ENDIAN
4378                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4379 #else
4380                   DMAE_CMD_ENDIANITY_DW_SWAP |
4381 #endif
4382                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4383                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4384
4385         if (bp->port.port_stx) {
4386
4387                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4388                 if (bp->func_stx)
4389                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4390                 else
4391                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4392                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4393                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4394                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4395                 dmae->dst_addr_hi = 0;
4396                 dmae->len = sizeof(struct host_port_stats) >> 2;
4397                 if (bp->func_stx) {
4398                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4399                         dmae->comp_addr_hi = 0;
4400                         dmae->comp_val = 1;
4401                 } else {
4402                         dmae->comp_addr_lo =
4403                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4404                         dmae->comp_addr_hi =
4405                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406                         dmae->comp_val = DMAE_COMP_VAL;
4407
4408                         *stats_comp = 0;
4409                 }
4410         }
4411
4412         if (bp->func_stx) {
4413
4414                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4415                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4416                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4417                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4418                 dmae->dst_addr_lo = bp->func_stx >> 2;
4419                 dmae->dst_addr_hi = 0;
4420                 dmae->len = sizeof(struct host_func_stats) >> 2;
4421                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4422                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4423                 dmae->comp_val = DMAE_COMP_VAL;
4424
4425                 *stats_comp = 0;
4426         }
4427 }
4428
4429 static void bnx2x_stats_stop(struct bnx2x *bp)
4430 {
4431         int update = 0;
4432
4433         bnx2x_stats_comp(bp);
4434
4435         if (bp->port.pmf)
4436                 update = (bnx2x_hw_stats_update(bp) == 0);
4437
4438         update |= (bnx2x_storm_stats_update(bp) == 0);
4439
4440         if (update) {
4441                 bnx2x_net_stats_update(bp);
4442
4443                 if (bp->port.pmf)
4444                         bnx2x_port_stats_stop(bp);
4445
4446                 bnx2x_hw_stats_post(bp);
4447                 bnx2x_stats_comp(bp);
4448         }
4449 }
4450
4451 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4452 {
4453 }
4454
4455 static const struct {
4456         void (*action)(struct bnx2x *bp);
4457         enum bnx2x_stats_state next_state;
4458 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4459 /* state        event   */
4460 {
4461 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4462 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4463 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4464 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4465 },
4466 {
4467 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4468 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4469 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4470 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4471 }
4472 };
4473
4474 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4475 {
4476         enum bnx2x_stats_state state = bp->stats_state;
4477
4478         bnx2x_stats_stm[state][event].action(bp);
4479         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4480
4481         /* Make sure the state has been "changed" */
4482         smp_wmb();
4483
4484         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4485                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4486                    state, event, bp->stats_state);
4487 }
4488
4489 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4490 {
4491         struct dmae_command *dmae;
4492         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4493
4494         /* sanity */
4495         if (!bp->port.pmf || !bp->port.port_stx) {
4496                 BNX2X_ERR("BUG!\n");
4497                 return;
4498         }
4499
4500         bp->executer_idx = 0;
4501
4502         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4503         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4504                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4505                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4506 #ifdef __BIG_ENDIAN
4507                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4508 #else
4509                         DMAE_CMD_ENDIANITY_DW_SWAP |
4510 #endif
4511                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4512                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4513         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4514         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4515         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4516         dmae->dst_addr_hi = 0;
4517         dmae->len = sizeof(struct host_port_stats) >> 2;
4518         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4519         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4520         dmae->comp_val = DMAE_COMP_VAL;
4521
4522         *stats_comp = 0;
4523         bnx2x_hw_stats_post(bp);
4524         bnx2x_stats_comp(bp);
4525 }
4526
4527 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4528 {
4529         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4530         int port = BP_PORT(bp);
4531         int func;
4532         u32 func_stx;
4533
4534         /* sanity */
4535         if (!bp->port.pmf || !bp->func_stx) {
4536                 BNX2X_ERR("BUG!\n");
4537                 return;
4538         }
4539
4540         /* save our func_stx */
4541         func_stx = bp->func_stx;
4542
4543         for (vn = VN_0; vn < vn_max; vn++) {
4544                 func = 2*vn + port;
4545
4546                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4547                 bnx2x_func_stats_init(bp);
4548                 bnx2x_hw_stats_post(bp);
4549                 bnx2x_stats_comp(bp);
4550         }
4551
4552         /* restore our func_stx */
4553         bp->func_stx = func_stx;
4554 }
4555
4556 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4557 {
4558         struct dmae_command *dmae = &bp->stats_dmae;
4559         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4560
4561         /* sanity */
4562         if (!bp->func_stx) {
4563                 BNX2X_ERR("BUG!\n");
4564                 return;
4565         }
4566
4567         bp->executer_idx = 0;
4568         memset(dmae, 0, sizeof(struct dmae_command));
4569
4570         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4571                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4572                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4573 #ifdef __BIG_ENDIAN
4574                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4575 #else
4576                         DMAE_CMD_ENDIANITY_DW_SWAP |
4577 #endif
4578                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4579                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4580         dmae->src_addr_lo = bp->func_stx >> 2;
4581         dmae->src_addr_hi = 0;
4582         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4583         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4584         dmae->len = sizeof(struct host_func_stats) >> 2;
4585         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4586         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4587         dmae->comp_val = DMAE_COMP_VAL;
4588
4589         *stats_comp = 0;
4590         bnx2x_hw_stats_post(bp);
4591         bnx2x_stats_comp(bp);
4592 }
4593
4594 static void bnx2x_stats_init(struct bnx2x *bp)
4595 {
4596         int port = BP_PORT(bp);
4597         int func = BP_FUNC(bp);
4598         int i;
4599
4600         bp->stats_pending = 0;
4601         bp->executer_idx = 0;
4602         bp->stats_counter = 0;
4603
4604         /* port and func stats for management */
4605         if (!BP_NOMCP(bp)) {
4606                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4607                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4608
4609         } else {
4610                 bp->port.port_stx = 0;
4611                 bp->func_stx = 0;
4612         }
4613         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4614            bp->port.port_stx, bp->func_stx);
4615
4616         /* port stats */
4617         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4618         bp->port.old_nig_stats.brb_discard =
4619                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4620         bp->port.old_nig_stats.brb_truncate =
4621                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4622         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4623                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4624         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4625                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4626
4627         /* function stats */
4628         for_each_queue(bp, i) {
4629                 struct bnx2x_fastpath *fp = &bp->fp[i];
4630
4631                 memset(&fp->old_tclient, 0,
4632                        sizeof(struct tstorm_per_client_stats));
4633                 memset(&fp->old_uclient, 0,
4634                        sizeof(struct ustorm_per_client_stats));
4635                 memset(&fp->old_xclient, 0,
4636                        sizeof(struct xstorm_per_client_stats));
4637                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4638         }
4639
4640         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4641         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4642
4643         bp->stats_state = STATS_STATE_DISABLED;
4644
4645         if (bp->port.pmf) {
4646                 if (bp->port.port_stx)
4647                         bnx2x_port_stats_base_init(bp);
4648
4649                 if (bp->func_stx)
4650                         bnx2x_func_stats_base_init(bp);
4651
4652         } else if (bp->func_stx)
4653                 bnx2x_func_stats_base_update(bp);
4654 }
4655
4656 static void bnx2x_timer(unsigned long data)
4657 {
4658         struct bnx2x *bp = (struct bnx2x *) data;
4659
4660         if (!netif_running(bp->dev))
4661                 return;
4662
4663         if (atomic_read(&bp->intr_sem) != 0)
4664                 goto timer_restart;
4665
4666         if (poll) {
4667                 struct bnx2x_fastpath *fp = &bp->fp[0];
4668                 int rc;
4669
4670                 bnx2x_tx_int(fp);
4671                 rc = bnx2x_rx_int(fp, 1000);
4672         }
4673
4674         if (!BP_NOMCP(bp)) {
4675                 int func = BP_FUNC(bp);
4676                 u32 drv_pulse;
4677                 u32 mcp_pulse;
4678
4679                 ++bp->fw_drv_pulse_wr_seq;
4680                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4681                 /* TBD - add SYSTEM_TIME */
4682                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4683                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4684
4685                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4686                              MCP_PULSE_SEQ_MASK);
4687                 /* The delta between driver pulse and mcp response
4688                  * should be 1 (before mcp response) or 0 (after mcp response)
4689                  */
4690                 if ((drv_pulse != mcp_pulse) &&
4691                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4692                         /* someone lost a heartbeat... */
4693                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4694                                   drv_pulse, mcp_pulse);
4695                 }
4696         }
4697
4698         if ((bp->state == BNX2X_STATE_OPEN) ||
4699             (bp->state == BNX2X_STATE_DISABLED))
4700                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4701
4702 timer_restart:
4703         mod_timer(&bp->timer, jiffies + bp->current_interval);
4704 }
4705
4706 /* end of Statistics */
4707
4708 /* nic init */
4709
4710 /*
4711  * nic init service functions
4712  */
4713
4714 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4715 {
4716         int port = BP_PORT(bp);
4717
4718         /* "CSTORM" */
4719         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4720                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4721                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4722         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4723                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4724                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4725 }
4726
4727 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4728                           dma_addr_t mapping, int sb_id)
4729 {
4730         int port = BP_PORT(bp);
4731         int func = BP_FUNC(bp);
4732         int index;
4733         u64 section;
4734
4735         /* USTORM */
4736         section = ((u64)mapping) + offsetof(struct host_status_block,
4737                                             u_status_block);
4738         sb->u_status_block.status_block_id = sb_id;
4739
4740         REG_WR(bp, BAR_CSTRORM_INTMEM +
4741                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4742         REG_WR(bp, BAR_CSTRORM_INTMEM +
4743                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4744                U64_HI(section));
4745         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4746                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4747
4748         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4749                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4751
4752         /* CSTORM */
4753         section = ((u64)mapping) + offsetof(struct host_status_block,
4754                                             c_status_block);
4755         sb->c_status_block.status_block_id = sb_id;
4756
4757         REG_WR(bp, BAR_CSTRORM_INTMEM +
4758                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4759         REG_WR(bp, BAR_CSTRORM_INTMEM +
4760                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4761                U64_HI(section));
4762         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4763                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4764
4765         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4766                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4767                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4768
4769         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4770 }
4771
4772 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4773 {
4774         int func = BP_FUNC(bp);
4775
4776         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4777                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4778                         sizeof(struct tstorm_def_status_block)/4);
4779         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4780                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4781                         sizeof(struct cstorm_def_status_block_u)/4);
4782         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4783                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4784                         sizeof(struct cstorm_def_status_block_c)/4);
4785         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4786                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4787                         sizeof(struct xstorm_def_status_block)/4);
4788 }
4789
4790 static void bnx2x_init_def_sb(struct bnx2x *bp,
4791                               struct host_def_status_block *def_sb,
4792                               dma_addr_t mapping, int sb_id)
4793 {
4794         int port = BP_PORT(bp);
4795         int func = BP_FUNC(bp);
4796         int index, val, reg_offset;
4797         u64 section;
4798
4799         /* ATTN */
4800         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4801                                             atten_status_block);
4802         def_sb->atten_status_block.status_block_id = sb_id;
4803
4804         bp->attn_state = 0;
4805
4806         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4807                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4808
4809         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4810                 bp->attn_group[index].sig[0] = REG_RD(bp,
4811                                                      reg_offset + 0x10*index);
4812                 bp->attn_group[index].sig[1] = REG_RD(bp,
4813                                                reg_offset + 0x4 + 0x10*index);
4814                 bp->attn_group[index].sig[2] = REG_RD(bp,
4815                                                reg_offset + 0x8 + 0x10*index);
4816                 bp->attn_group[index].sig[3] = REG_RD(bp,
4817                                                reg_offset + 0xc + 0x10*index);
4818         }
4819
4820         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4821                              HC_REG_ATTN_MSG0_ADDR_L);
4822
4823         REG_WR(bp, reg_offset, U64_LO(section));
4824         REG_WR(bp, reg_offset + 4, U64_HI(section));
4825
4826         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4827
4828         val = REG_RD(bp, reg_offset);
4829         val |= sb_id;
4830         REG_WR(bp, reg_offset, val);
4831
4832         /* USTORM */
4833         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834                                             u_def_status_block);
4835         def_sb->u_def_status_block.status_block_id = sb_id;
4836
4837         REG_WR(bp, BAR_CSTRORM_INTMEM +
4838                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4839         REG_WR(bp, BAR_CSTRORM_INTMEM +
4840                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4841                U64_HI(section));
4842         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4843                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4844
4845         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4846                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4848
4849         /* CSTORM */
4850         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851                                             c_def_status_block);
4852         def_sb->c_def_status_block.status_block_id = sb_id;
4853
4854         REG_WR(bp, BAR_CSTRORM_INTMEM +
4855                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4856         REG_WR(bp, BAR_CSTRORM_INTMEM +
4857                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4858                U64_HI(section));
4859         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4860                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4861
4862         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4863                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4864                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4865
4866         /* TSTORM */
4867         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868                                             t_def_status_block);
4869         def_sb->t_def_status_block.status_block_id = sb_id;
4870
4871         REG_WR(bp, BAR_TSTRORM_INTMEM +
4872                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4873         REG_WR(bp, BAR_TSTRORM_INTMEM +
4874                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4875                U64_HI(section));
4876         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4877                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878
4879         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4880                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4881                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4882
4883         /* XSTORM */
4884         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4885                                             x_def_status_block);
4886         def_sb->x_def_status_block.status_block_id = sb_id;
4887
4888         REG_WR(bp, BAR_XSTRORM_INTMEM +
4889                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4890         REG_WR(bp, BAR_XSTRORM_INTMEM +
4891                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4892                U64_HI(section));
4893         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4894                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4895
4896         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4897                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4898                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4899
4900         bp->stats_pending = 0;
4901         bp->set_mac_pending = 0;
4902
4903         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4904 }
4905
4906 static void bnx2x_update_coalesce(struct bnx2x *bp)
4907 {
4908         int port = BP_PORT(bp);
4909         int i;
4910
4911         for_each_queue(bp, i) {
4912                 int sb_id = bp->fp[i].sb_id;
4913
4914                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4915                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4916                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4917                                                       U_SB_ETH_RX_CQ_INDEX),
4918                         bp->rx_ticks/12);
4919                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4920                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4921                                                        U_SB_ETH_RX_CQ_INDEX),
4922                          (bp->rx_ticks/12) ? 0 : 1);
4923
4924                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4925                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4926                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4927                                                       C_SB_ETH_TX_CQ_INDEX),
4928                         bp->tx_ticks/12);
4929                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4930                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4931                                                        C_SB_ETH_TX_CQ_INDEX),
4932                          (bp->tx_ticks/12) ? 0 : 1);
4933         }
4934 }
4935
4936 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4937                                        struct bnx2x_fastpath *fp, int last)
4938 {
4939         int i;
4940
4941         for (i = 0; i < last; i++) {
4942                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4943                 struct sk_buff *skb = rx_buf->skb;
4944
4945                 if (skb == NULL) {
4946                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4947                         continue;
4948                 }
4949
4950                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4951                         pci_unmap_single(bp->pdev,
4952                                          pci_unmap_addr(rx_buf, mapping),
4953                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4954
4955                 dev_kfree_skb(skb);
4956                 rx_buf->skb = NULL;
4957         }
4958 }
4959
4960 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4961 {
4962         int func = BP_FUNC(bp);
4963         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4964                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4965         u16 ring_prod, cqe_ring_prod;
4966         int i, j;
4967
4968         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4969         DP(NETIF_MSG_IFUP,
4970            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4971
4972         if (bp->flags & TPA_ENABLE_FLAG) {
4973
4974                 for_each_rx_queue(bp, j) {
4975                         struct bnx2x_fastpath *fp = &bp->fp[j];
4976
4977                         for (i = 0; i < max_agg_queues; i++) {
4978                                 fp->tpa_pool[i].skb =
4979                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4980                                 if (!fp->tpa_pool[i].skb) {
4981                                         BNX2X_ERR("Failed to allocate TPA "
4982                                                   "skb pool for queue[%d] - "
4983                                                   "disabling TPA on this "
4984                                                   "queue!\n", j);
4985                                         bnx2x_free_tpa_pool(bp, fp, i);
4986                                         fp->disable_tpa = 1;
4987                                         break;
4988                                 }
4989                                 pci_unmap_addr_set((struct sw_rx_bd *)
4990                                                         &bp->fp->tpa_pool[i],
4991                                                    mapping, 0);
4992                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4993                         }
4994                 }
4995         }
4996
4997         for_each_rx_queue(bp, j) {
4998                 struct bnx2x_fastpath *fp = &bp->fp[j];
4999
5000                 fp->rx_bd_cons = 0;
5001                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5002                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5003
5004                 /* Mark queue as Rx */
5005                 fp->is_rx_queue = 1;
5006
5007                 /* "next page" elements initialization */
5008                 /* SGE ring */
5009                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5010                         struct eth_rx_sge *sge;
5011
5012                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5013                         sge->addr_hi =
5014                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5015                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5016                         sge->addr_lo =
5017                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5018                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5019                 }
5020
5021                 bnx2x_init_sge_ring_bit_mask(fp);
5022
5023                 /* RX BD ring */
5024                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5025                         struct eth_rx_bd *rx_bd;
5026
5027                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5028                         rx_bd->addr_hi =
5029                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5030                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5031                         rx_bd->addr_lo =
5032                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5033                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5034                 }
5035
5036                 /* CQ ring */
5037                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5038                         struct eth_rx_cqe_next_page *nextpg;
5039
5040                         nextpg = (struct eth_rx_cqe_next_page *)
5041                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5042                         nextpg->addr_hi =
5043                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5044                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5045                         nextpg->addr_lo =
5046                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5047                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5048                 }
5049
5050                 /* Allocate SGEs and initialize the ring elements */
5051                 for (i = 0, ring_prod = 0;
5052                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5053
5054                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5055                                 BNX2X_ERR("was only able to allocate "
5056                                           "%d rx sges\n", i);
5057                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5058                                 /* Cleanup already allocated elements */
5059                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5060                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5061                                 fp->disable_tpa = 1;
5062                                 ring_prod = 0;
5063                                 break;
5064                         }
5065                         ring_prod = NEXT_SGE_IDX(ring_prod);
5066                 }
5067                 fp->rx_sge_prod = ring_prod;
5068
5069                 /* Allocate BDs and initialize BD ring */
5070                 fp->rx_comp_cons = 0;
5071                 cqe_ring_prod = ring_prod = 0;
5072                 for (i = 0; i < bp->rx_ring_size; i++) {
5073                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5074                                 BNX2X_ERR("was only able to allocate "
5075                                           "%d rx skbs on queue[%d]\n", i, j);
5076                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5077                                 break;
5078                         }
5079                         ring_prod = NEXT_RX_IDX(ring_prod);
5080                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5081                         WARN_ON(ring_prod <= i);
5082                 }
5083
5084                 fp->rx_bd_prod = ring_prod;
5085                 /* must not have more available CQEs than BDs */
5086                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5087                                        cqe_ring_prod);
5088                 fp->rx_pkt = fp->rx_calls = 0;
5089
5090                 /* Warning!
5091                  * this will generate an interrupt (to the TSTORM)
5092                  * must only be done after chip is initialized
5093                  */
5094                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5095                                      fp->rx_sge_prod);
5096                 if (j != 0)
5097                         continue;
5098
5099                 REG_WR(bp, BAR_USTRORM_INTMEM +
5100                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5101                        U64_LO(fp->rx_comp_mapping));
5102                 REG_WR(bp, BAR_USTRORM_INTMEM +
5103                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5104                        U64_HI(fp->rx_comp_mapping));
5105         }
5106 }
5107
5108 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5109 {
5110         int i, j;
5111
5112         for_each_tx_queue(bp, j) {
5113                 struct bnx2x_fastpath *fp = &bp->fp[j];
5114
5115                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5116                         struct eth_tx_next_bd *tx_next_bd =
5117                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5118
5119                         tx_next_bd->addr_hi =
5120                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5121                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5122                         tx_next_bd->addr_lo =
5123                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5124                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5125                 }
5126
5127                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5128                 fp->tx_db.data.zero_fill1 = 0;
5129                 fp->tx_db.data.prod = 0;
5130
5131                 fp->tx_pkt_prod = 0;
5132                 fp->tx_pkt_cons = 0;
5133                 fp->tx_bd_prod = 0;
5134                 fp->tx_bd_cons = 0;
5135                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5136                 fp->tx_pkt = 0;
5137         }
5138
5139         /* clean tx statistics */
5140         for_each_rx_queue(bp, i)
5141                 bnx2x_fp(bp, i, tx_pkt) = 0;
5142 }
5143
5144 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5145 {
5146         int func = BP_FUNC(bp);
5147
5148         spin_lock_init(&bp->spq_lock);
5149
5150         bp->spq_left = MAX_SPQ_PENDING;
5151         bp->spq_prod_idx = 0;
5152         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5153         bp->spq_prod_bd = bp->spq;
5154         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5155
5156         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5157                U64_LO(bp->spq_mapping));
5158         REG_WR(bp,
5159                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5160                U64_HI(bp->spq_mapping));
5161
5162         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5163                bp->spq_prod_idx);
5164 }
5165
5166 static void bnx2x_init_context(struct bnx2x *bp)
5167 {
5168         int i;
5169
5170         for_each_rx_queue(bp, i) {
5171                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5172                 struct bnx2x_fastpath *fp = &bp->fp[i];
5173                 u8 cl_id = fp->cl_id;
5174
5175                 context->ustorm_st_context.common.sb_index_numbers =
5176                                                 BNX2X_RX_SB_INDEX_NUM;
5177                 context->ustorm_st_context.common.clientId = cl_id;
5178                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5179                 context->ustorm_st_context.common.flags =
5180                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5181                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5182                 context->ustorm_st_context.common.statistics_counter_id =
5183                                                 cl_id;
5184                 context->ustorm_st_context.common.mc_alignment_log_size =
5185                                                 BNX2X_RX_ALIGN_SHIFT;
5186                 context->ustorm_st_context.common.bd_buff_size =
5187                                                 bp->rx_buf_size;
5188                 context->ustorm_st_context.common.bd_page_base_hi =
5189                                                 U64_HI(fp->rx_desc_mapping);
5190                 context->ustorm_st_context.common.bd_page_base_lo =
5191                                                 U64_LO(fp->rx_desc_mapping);
5192                 if (!fp->disable_tpa) {
5193                         context->ustorm_st_context.common.flags |=
5194                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5195                         context->ustorm_st_context.common.sge_buff_size =
5196                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5197                                          (u32)0xffff);
5198                         context->ustorm_st_context.common.sge_page_base_hi =
5199                                                 U64_HI(fp->rx_sge_mapping);
5200                         context->ustorm_st_context.common.sge_page_base_lo =
5201                                                 U64_LO(fp->rx_sge_mapping);
5202
5203                         context->ustorm_st_context.common.max_sges_for_packet =
5204                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5205                         context->ustorm_st_context.common.max_sges_for_packet =
5206                                 ((context->ustorm_st_context.common.
5207                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5208                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5209                 }
5210
5211                 context->ustorm_ag_context.cdu_usage =
5212                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5213                                                CDU_REGION_NUMBER_UCM_AG,
5214                                                ETH_CONNECTION_TYPE);
5215
5216                 context->xstorm_ag_context.cdu_reserved =
5217                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218                                                CDU_REGION_NUMBER_XCM_AG,
5219                                                ETH_CONNECTION_TYPE);
5220         }
5221
5222         for_each_tx_queue(bp, i) {
5223                 struct bnx2x_fastpath *fp = &bp->fp[i];
5224                 struct eth_context *context =
5225                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5226
5227                 context->cstorm_st_context.sb_index_number =
5228                                                 C_SB_ETH_TX_CQ_INDEX;
5229                 context->cstorm_st_context.status_block_id = fp->sb_id;
5230
5231                 context->xstorm_st_context.tx_bd_page_base_hi =
5232                                                 U64_HI(fp->tx_desc_mapping);
5233                 context->xstorm_st_context.tx_bd_page_base_lo =
5234                                                 U64_LO(fp->tx_desc_mapping);
5235                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5236                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5237         }
5238 }
5239
5240 static void bnx2x_init_ind_table(struct bnx2x *bp)
5241 {
5242         int func = BP_FUNC(bp);
5243         int i;
5244
5245         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5246                 return;
5247
5248         DP(NETIF_MSG_IFUP,
5249            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5250         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5251                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5252                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5253                         bp->fp->cl_id + (i % bp->num_rx_queues));
5254 }
5255
5256 static void bnx2x_set_client_config(struct bnx2x *bp)
5257 {
5258         struct tstorm_eth_client_config tstorm_client = {0};
5259         int port = BP_PORT(bp);
5260         int i;
5261
5262         tstorm_client.mtu = bp->dev->mtu;
5263         tstorm_client.config_flags =
5264                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5265                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5266 #ifdef BCM_VLAN
5267         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5268                 tstorm_client.config_flags |=
5269                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5270                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5271         }
5272 #endif
5273
5274         for_each_queue(bp, i) {
5275                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5276
5277                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5278                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5279                        ((u32 *)&tstorm_client)[0]);
5280                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5281                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5282                        ((u32 *)&tstorm_client)[1]);
5283         }
5284
5285         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5286            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5287 }
5288
5289 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5290 {
5291         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5292         int mode = bp->rx_mode;
5293         int mask = bp->rx_mode_cl_mask;
5294         int func = BP_FUNC(bp);
5295         int port = BP_PORT(bp);
5296         int i;
5297         /* All but management unicast packets should pass to the host as well */
5298         u32 llh_mask =
5299                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5300                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5301                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5302                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5303
5304         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5305
5306         switch (mode) {
5307         case BNX2X_RX_MODE_NONE: /* no Rx */
5308                 tstorm_mac_filter.ucast_drop_all = mask;
5309                 tstorm_mac_filter.mcast_drop_all = mask;
5310                 tstorm_mac_filter.bcast_drop_all = mask;
5311                 break;
5312
5313         case BNX2X_RX_MODE_NORMAL:
5314                 tstorm_mac_filter.bcast_accept_all = mask;
5315                 break;
5316
5317         case BNX2X_RX_MODE_ALLMULTI:
5318                 tstorm_mac_filter.mcast_accept_all = mask;
5319                 tstorm_mac_filter.bcast_accept_all = mask;
5320                 break;
5321
5322         case BNX2X_RX_MODE_PROMISC:
5323                 tstorm_mac_filter.ucast_accept_all = mask;
5324                 tstorm_mac_filter.mcast_accept_all = mask;
5325                 tstorm_mac_filter.bcast_accept_all = mask;
5326                 /* pass management unicast packets as well */
5327                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5328                 break;
5329
5330         default:
5331                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5332                 break;
5333         }
5334
5335         REG_WR(bp,
5336                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5337                llh_mask);
5338
5339         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5340                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5341                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5342                        ((u32 *)&tstorm_mac_filter)[i]);
5343
5344 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5345                    ((u32 *)&tstorm_mac_filter)[i]); */
5346         }
5347
5348         if (mode != BNX2X_RX_MODE_NONE)
5349                 bnx2x_set_client_config(bp);
5350 }
5351
5352 static void bnx2x_init_internal_common(struct bnx2x *bp)
5353 {
5354         int i;
5355
5356         /* Zero this manually as its initialization is
5357            currently missing in the initTool */
5358         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5359                 REG_WR(bp, BAR_USTRORM_INTMEM +
5360                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5361 }
5362
5363 static void bnx2x_init_internal_port(struct bnx2x *bp)
5364 {
5365         int port = BP_PORT(bp);
5366
5367         REG_WR(bp,
5368                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5369         REG_WR(bp,
5370                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5371         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5373 }
5374
5375 static void bnx2x_init_internal_func(struct bnx2x *bp)
5376 {
5377         struct tstorm_eth_function_common_config tstorm_config = {0};
5378         struct stats_indication_flags stats_flags = {0};
5379         int port = BP_PORT(bp);
5380         int func = BP_FUNC(bp);
5381         int i, j;
5382         u32 offset;
5383         u16 max_agg_size;
5384
5385         if (is_multi(bp)) {
5386                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5387                 tstorm_config.rss_result_mask = MULTI_MASK;
5388         }
5389
5390         /* Enable TPA if needed */
5391         if (bp->flags & TPA_ENABLE_FLAG)
5392                 tstorm_config.config_flags |=
5393                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5394
5395         if (IS_E1HMF(bp))
5396                 tstorm_config.config_flags |=
5397                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5398
5399         tstorm_config.leading_client_id = BP_L_ID(bp);
5400
5401         REG_WR(bp, BAR_TSTRORM_INTMEM +
5402                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5403                (*(u32 *)&tstorm_config));
5404
5405         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5406         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5407         bnx2x_set_storm_rx_mode(bp);
5408
5409         for_each_queue(bp, i) {
5410                 u8 cl_id = bp->fp[i].cl_id;
5411
5412                 /* reset xstorm per client statistics */
5413                 offset = BAR_XSTRORM_INTMEM +
5414                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5415                 for (j = 0;
5416                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5417                         REG_WR(bp, offset + j*4, 0);
5418
5419                 /* reset tstorm per client statistics */
5420                 offset = BAR_TSTRORM_INTMEM +
5421                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5422                 for (j = 0;
5423                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5424                         REG_WR(bp, offset + j*4, 0);
5425
5426                 /* reset ustorm per client statistics */
5427                 offset = BAR_USTRORM_INTMEM +
5428                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5429                 for (j = 0;
5430                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5431                         REG_WR(bp, offset + j*4, 0);
5432         }
5433
5434         /* Init statistics related context */
5435         stats_flags.collect_eth = 1;
5436
5437         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5438                ((u32 *)&stats_flags)[0]);
5439         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5440                ((u32 *)&stats_flags)[1]);
5441
5442         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5443                ((u32 *)&stats_flags)[0]);
5444         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5445                ((u32 *)&stats_flags)[1]);
5446
5447         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5448                ((u32 *)&stats_flags)[0]);
5449         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5450                ((u32 *)&stats_flags)[1]);
5451
5452         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5453                ((u32 *)&stats_flags)[0]);
5454         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5455                ((u32 *)&stats_flags)[1]);
5456
5457         REG_WR(bp, BAR_XSTRORM_INTMEM +
5458                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5459                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5460         REG_WR(bp, BAR_XSTRORM_INTMEM +
5461                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5462                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5463
5464         REG_WR(bp, BAR_TSTRORM_INTMEM +
5465                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5466                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5467         REG_WR(bp, BAR_TSTRORM_INTMEM +
5468                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5469                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5470
5471         REG_WR(bp, BAR_USTRORM_INTMEM +
5472                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5473                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5474         REG_WR(bp, BAR_USTRORM_INTMEM +
5475                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5476                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5477
5478         if (CHIP_IS_E1H(bp)) {
5479                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5480                         IS_E1HMF(bp));
5481                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5482                         IS_E1HMF(bp));
5483                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5484                         IS_E1HMF(bp));
5485                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5486                         IS_E1HMF(bp));
5487
5488                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5489                          bp->e1hov);
5490         }
5491
5492         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5493         max_agg_size =
5494                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5495                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5496                     (u32)0xffff);
5497         for_each_rx_queue(bp, i) {
5498                 struct bnx2x_fastpath *fp = &bp->fp[i];
5499
5500                 REG_WR(bp, BAR_USTRORM_INTMEM +
5501                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5502                        U64_LO(fp->rx_comp_mapping));
5503                 REG_WR(bp, BAR_USTRORM_INTMEM +
5504                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5505                        U64_HI(fp->rx_comp_mapping));
5506
5507                 /* Next page */
5508                 REG_WR(bp, BAR_USTRORM_INTMEM +
5509                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5510                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5511                 REG_WR(bp, BAR_USTRORM_INTMEM +
5512                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5513                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5514
5515                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5516                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5517                          max_agg_size);
5518         }
5519
5520         /* dropless flow control */
5521         if (CHIP_IS_E1H(bp)) {
5522                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5523
5524                 rx_pause.bd_thr_low = 250;
5525                 rx_pause.cqe_thr_low = 250;
5526                 rx_pause.cos = 1;
5527                 rx_pause.sge_thr_low = 0;
5528                 rx_pause.bd_thr_high = 350;
5529                 rx_pause.cqe_thr_high = 350;
5530                 rx_pause.sge_thr_high = 0;
5531
5532                 for_each_rx_queue(bp, i) {
5533                         struct bnx2x_fastpath *fp = &bp->fp[i];
5534
5535                         if (!fp->disable_tpa) {
5536                                 rx_pause.sge_thr_low = 150;
5537                                 rx_pause.sge_thr_high = 250;
5538                         }
5539
5540
5541                         offset = BAR_USTRORM_INTMEM +
5542                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5543                                                                    fp->cl_id);
5544                         for (j = 0;
5545                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5546                              j++)
5547                                 REG_WR(bp, offset + j*4,
5548                                        ((u32 *)&rx_pause)[j]);
5549                 }
5550         }
5551
5552         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5553
5554         /* Init rate shaping and fairness contexts */
5555         if (IS_E1HMF(bp)) {
5556                 int vn;
5557
5558                 /* During init there is no active link
5559                    Until link is up, set link rate to 10Gbps */
5560                 bp->link_vars.line_speed = SPEED_10000;
5561                 bnx2x_init_port_minmax(bp);
5562
5563                 if (!BP_NOMCP(bp))
5564                         bp->mf_config =
5565                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5566                 bnx2x_calc_vn_weight_sum(bp);
5567
5568                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5569                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5570
5571                 /* Enable rate shaping and fairness */
5572                 bp->cmng.flags.cmng_enables |=
5573                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5574
5575         } else {
5576                 /* rate shaping and fairness are disabled */
5577                 DP(NETIF_MSG_IFUP,
5578                    "single function mode  minmax will be disabled\n");
5579         }
5580
5581
5582         /* Store it to internal memory */
5583         if (bp->port.pmf)
5584                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5585                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5586                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5587                                ((u32 *)(&bp->cmng))[i]);
5588 }
5589
5590 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5591 {
5592         switch (load_code) {
5593         case FW_MSG_CODE_DRV_LOAD_COMMON:
5594                 bnx2x_init_internal_common(bp);
5595                 /* no break */
5596
5597         case FW_MSG_CODE_DRV_LOAD_PORT:
5598                 bnx2x_init_internal_port(bp);
5599                 /* no break */
5600
5601         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5602                 bnx2x_init_internal_func(bp);
5603                 break;
5604
5605         default:
5606                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5607                 break;
5608         }
5609 }
5610
5611 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5612 {
5613         int i;
5614
5615         for_each_queue(bp, i) {
5616                 struct bnx2x_fastpath *fp = &bp->fp[i];
5617
5618                 fp->bp = bp;
5619                 fp->state = BNX2X_FP_STATE_CLOSED;
5620                 fp->index = i;
5621                 fp->cl_id = BP_L_ID(bp) + i;
5622 #ifdef BCM_CNIC
5623                 fp->sb_id = fp->cl_id + 1;
5624 #else
5625                 fp->sb_id = fp->cl_id;
5626 #endif
5627                 /* Suitable Rx and Tx SBs are served by the same client */
5628                 if (i >= bp->num_rx_queues)
5629                         fp->cl_id -= bp->num_rx_queues;
5630                 DP(NETIF_MSG_IFUP,
5631                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5632                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5633                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5634                               fp->sb_id);
5635                 bnx2x_update_fpsb_idx(fp);
5636         }
5637
5638         /* ensure status block indices were read */
5639         rmb();
5640
5641
5642         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5643                           DEF_SB_ID);
5644         bnx2x_update_dsb_idx(bp);
5645         bnx2x_update_coalesce(bp);
5646         bnx2x_init_rx_rings(bp);
5647         bnx2x_init_tx_ring(bp);
5648         bnx2x_init_sp_ring(bp);
5649         bnx2x_init_context(bp);
5650         bnx2x_init_internal(bp, load_code);
5651         bnx2x_init_ind_table(bp);
5652         bnx2x_stats_init(bp);
5653
5654         /* At this point, we are ready for interrupts */
5655         atomic_set(&bp->intr_sem, 0);
5656
5657         /* flush all before enabling interrupts */
5658         mb();
5659         mmiowb();
5660
5661         bnx2x_int_enable(bp);
5662
5663         /* Check for SPIO5 */
5664         bnx2x_attn_int_deasserted0(bp,
5665                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5666                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5667 }
5668
5669 /* end of nic init */
5670
5671 /*
5672  * gzip service functions
5673  */
5674
5675 static int bnx2x_gunzip_init(struct bnx2x *bp)
5676 {
5677         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5678                                               &bp->gunzip_mapping);
5679         if (bp->gunzip_buf  == NULL)
5680                 goto gunzip_nomem1;
5681
5682         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5683         if (bp->strm  == NULL)
5684                 goto gunzip_nomem2;
5685
5686         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5687                                       GFP_KERNEL);
5688         if (bp->strm->workspace == NULL)
5689                 goto gunzip_nomem3;
5690
5691         return 0;
5692
5693 gunzip_nomem3:
5694         kfree(bp->strm);
5695         bp->strm = NULL;
5696
5697 gunzip_nomem2:
5698         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5699                             bp->gunzip_mapping);
5700         bp->gunzip_buf = NULL;
5701
5702 gunzip_nomem1:
5703         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5704                " un-compression\n", bp->dev->name);
5705         return -ENOMEM;
5706 }
5707
5708 static void bnx2x_gunzip_end(struct bnx2x *bp)
5709 {
5710         kfree(bp->strm->workspace);
5711
5712         kfree(bp->strm);
5713         bp->strm = NULL;
5714
5715         if (bp->gunzip_buf) {
5716                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5717                                     bp->gunzip_mapping);
5718                 bp->gunzip_buf = NULL;
5719         }
5720 }
5721
5722 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5723 {
5724         int n, rc;
5725
5726         /* check gzip header */
5727         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5728                 BNX2X_ERR("Bad gzip header\n");
5729                 return -EINVAL;
5730         }
5731
5732         n = 10;
5733
5734 #define FNAME                           0x8
5735
5736         if (zbuf[3] & FNAME)
5737                 while ((zbuf[n++] != 0) && (n < len));
5738
5739         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5740         bp->strm->avail_in = len - n;
5741         bp->strm->next_out = bp->gunzip_buf;
5742         bp->strm->avail_out = FW_BUF_SIZE;
5743
5744         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5745         if (rc != Z_OK)
5746                 return rc;