77867161968b1ee33059b48846b4a6fb69c83f5a
[linux-2.6.git] / drivers / net / bnx2x.c
1 /* bnx2x.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Eliezer Tamir <eliezert@broadcom.com>
10  * Based on code from Michael Chan's bnx2 driver
11  * UDP CSUM errata workaround by Arik Gendelman
12  * Slowpath rework by Vladislav Zolotarov
13  * Statistics and Link management by Yitchak Gertner
14  *
15  */
16
17 /* define this to make the driver freeze on error
18  * to allow getting debug info
19  * (you will need to reboot afterwards)
20  */
21 /*#define BNX2X_STOP_ON_ERROR*/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h>  /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47         #include <linux/if_vlan.h>
48         #define BCM_VLAN 1
49 #endif
50 #include <net/ip.h>
51 #include <net/tcp.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
58 #include <linux/io.h>
59
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
63 #include "bnx2x.h"
64 #include "bnx2x_init.h"
65
66 #define DRV_MODULE_VERSION      "0.40.15"
67 #define DRV_MODULE_RELDATE      "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER            0x040009
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT              (5*HZ)
72
73 static char version[] __devinitdata =
74         "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #356 $");
82
83 static int use_inta;
84 static int poll;
85 static int onefunc;
86 static int nomcp;
87 static int debug;
88 static int use_multi;
89
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
99
100 #ifdef BNX2X_MULTI
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103 #endif
104
105 enum bnx2x_board_type {
106         BCM57710 = 0,
107 };
108
109 /* indexed by board_t, above */
110 static struct {
111         char *name;
112 } board_info[] __devinitdata = {
113         { "Broadcom NetXtreme II BCM57710 XGb" }
114 };
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { 0 }
120 };
121
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
127
128 /* used only at init
129  * locking is done by mcp
130  */
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132 {
133         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136                                PCICFG_VENDOR_ID_OFFSET);
137 }
138
139 #ifdef BNX2X_IND_RD
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141 {
142         u32 val;
143
144         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147                                PCICFG_VENDOR_ID_OFFSET);
148
149         return val;
150 }
151 #endif
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171 /*              DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178                              u32 dst_addr, u32 len32)
179 {
180         struct dmae_command *dmae = &bp->dmae;
181         int port = bp->port;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int timeout = 200;
184
185         memset(dmae, 0, sizeof(struct dmae_command));
186
187         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190 #ifdef __BIG_ENDIAN
191                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
192 #else
193                         DMAE_CMD_ENDIANITY_DW_SWAP |
194 #endif
195                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196         dmae->src_addr_lo = U64_LO(dma_addr);
197         dmae->src_addr_hi = U64_HI(dma_addr);
198         dmae->dst_addr_lo = dst_addr >> 2;
199         dmae->dst_addr_hi = 0;
200         dmae->len = len32;
201         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203         dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205 /*
206         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
208                     "dst_addr [%x:%08x (%08x)]\n"
209            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
210            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213 */
214 /*
215         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218 */
219
220         *wb_comp = 0;
221
222         bnx2x_post_dmae(bp, dmae, port * 8);
223
224         udelay(5);
225         /* adjust timeout for emulation/FPGA */
226         if (CHIP_REV_IS_SLOW(bp))
227                 timeout *= 100;
228         while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /*              DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230                 udelay(5);
231                 if (!timeout) {
232                         BNX2X_ERR("dmae timeout!\n");
233                         break;
234                 }
235                 timeout--;
236         }
237 }
238
239 #ifdef BNX2X_DMAE_RD
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241 {
242         struct dmae_command *dmae = &bp->dmae;
243         int port = bp->port;
244         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245         int timeout = 200;
246
247         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248         memset(dmae, 0, sizeof(struct dmae_command));
249
250         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253 #ifdef __BIG_ENDIAN
254                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
255 #else
256                         DMAE_CMD_ENDIANITY_DW_SWAP |
257 #endif
258                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259         dmae->src_addr_lo = src_addr >> 2;
260         dmae->src_addr_hi = 0;
261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263         dmae->len = len32;
264         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266         dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268 /*
269         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
271                     "dst_addr [%x:%08x (%08x)]\n"
272            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
273            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276 */
277
278         *wb_comp = 0;
279
280         bnx2x_post_dmae(bp, dmae, port * 8);
281
282         udelay(5);
283         while (*wb_comp != BNX2X_WB_COMP_VAL) {
284                 udelay(5);
285                 if (!timeout) {
286                         BNX2X_ERR("dmae timeout!\n");
287                         break;
288                 }
289                 timeout--;
290         }
291 /*
292         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295 */
296 }
297 #endif
298
299 static int bnx2x_mc_assert(struct bnx2x *bp)
300 {
301         int i, j;
302         int rc = 0;
303         char last_idx;
304         const char storm[] = {"XTCU"};
305         const u32 intmem_base[] = {
306                 BAR_XSTRORM_INTMEM,
307                 BAR_TSTRORM_INTMEM,
308                 BAR_CSTRORM_INTMEM,
309                 BAR_USTRORM_INTMEM
310         };
311
312         /* Go through all instances of all SEMIs */
313         for (i = 0; i < 4; i++) {
314                 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315                                    intmem_base[i]);
316                 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317                           storm[i], last_idx);
318
319                 /* print the asserts */
320                 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321                         u32 row0, row1, row2, row3;
322
323                         row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324                                       intmem_base[i]);
325                         row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326                                       intmem_base[i]);
327                         row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328                                       intmem_base[i]);
329                         row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330                                       intmem_base[i]);
331
332                         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333                                 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334                                           " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335                                           storm[i], j, row3, row2, row1, row0);
336                                 rc++;
337                         } else {
338                                 break;
339                         }
340                 }
341         }
342         return rc;
343 }
344
345 static void bnx2x_fw_dump(struct bnx2x *bp)
346 {
347         u32 mark, offset;
348         u32 data[9];
349         int word;
350
351         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
353
354         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355                 for (word = 0; word < 8; word++)
356                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357                                                   offset + 4*word));
358                 data[8] = 0x0;
359                 printk(KERN_ERR PFX "%s", (char *)data);
360         }
361         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362                 for (word = 0; word < 8; word++)
363                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364                                                   offset + 4*word));
365                 data[8] = 0x0;
366                 printk(KERN_ERR PFX "%s", (char *)data);
367         }
368         printk("\n" KERN_ERR PFX "end of fw dump\n");
369 }
370
371 static void bnx2x_panic_dump(struct bnx2x *bp)
372 {
373         int i;
374         u16 j, start, end;
375
376         BNX2X_ERR("begin crash dump -----------------\n");
377
378         for_each_queue(bp, i) {
379                 struct bnx2x_fastpath *fp = &bp->fp[i];
380                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
383                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)"
384                           "  *rx_cons_sb(%x)  rx_comp_prod(%x)"
385                           "  rx_comp_cons(%x)  fp_c_idx(%x)  fp_u_idx(%x)"
386                           "  bd data(%x,%x)\n",
387                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388                           fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389                           fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390                           fp->fp_u_idx, hw_prods->packets_prod,
391                           hw_prods->bds_prod);
392
393                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395                 for (j = start; j < end; j++) {
396                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399                                   sw_bd->skb, sw_bd->first_bd);
400                 }
401
402                 start = TX_BD(fp->tx_bd_cons - 10);
403                 end = TX_BD(fp->tx_bd_cons + 254);
404                 for (j = start; j < end; j++) {
405                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409                 }
410
411                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413                 for (j = start; j < end; j++) {
414                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
418                                   j, rx_bd[0], rx_bd[1], sw_bd->skb);
419                 }
420
421                 start = RCQ_BD(fp->rx_comp_cons - 10);
422                 end = RCQ_BD(fp->rx_comp_cons + 503);
423                 for (j = start; j < end; j++) {
424                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
428                 }
429         }
430
431         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_t_idx(%u)"
432                   "  def_x_idx(%u)  def_att_idx(%u)  attn_state(%u)"
433                   "  spq_prod_idx(%u)\n",
434                   bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
435                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438         bnx2x_mc_assert(bp);
439         BNX2X_ERR("end crash dump -----------------\n");
440
441         bp->stats_state = STATS_STATE_DISABLE;
442         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443 }
444
445 static void bnx2x_enable_int(struct bnx2x *bp)
446 {
447         int port = bp->port;
448         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449         u32 val = REG_RD(bp, addr);
450         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452         if (msix) {
453                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456         } else {
457                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
459                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
460                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
461         }
462
463         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  msi %d\n",
464            val, port, addr, msix);
465
466         REG_WR(bp, addr, val);
467 }
468
469 static void bnx2x_disable_int(struct bnx2x *bp)
470 {
471         int port = bp->port;
472         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
473         u32 val = REG_RD(bp, addr);
474
475         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
476                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
477                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
478                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
479
480         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
481            val, port, addr);
482
483         REG_WR(bp, addr, val);
484         if (REG_RD(bp, addr) != val)
485                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
486 }
487
488 static void bnx2x_disable_int_sync(struct bnx2x *bp)
489 {
490
491         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
492         int i;
493
494         atomic_inc(&bp->intr_sem);
495         /* prevent the HW from sending interrupts */
496         bnx2x_disable_int(bp);
497
498         /* make sure all ISRs are done */
499         if (msix) {
500                 for_each_queue(bp, i)
501                         synchronize_irq(bp->msix_table[i].vector);
502
503                 /* one more for the Slow Path IRQ */
504                 synchronize_irq(bp->msix_table[i].vector);
505         } else
506                 synchronize_irq(bp->pdev->irq);
507
508         /* make sure sp_task is not running */
509         cancel_work_sync(&bp->sp_task);
510
511 }
512
513 /* fast path code */
514
515 /*
516  * general service functions
517  */
518
519 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
520                                 u8 storm, u16 index, u8 op, u8 update)
521 {
522         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
523         struct igu_ack_register igu_ack;
524
525         igu_ack.status_block_index = index;
526         igu_ack.sb_id_and_flags =
527                         ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
528                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
529                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
530                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
531
532 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
533            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
534         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
535 }
536
537 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
538 {
539         struct host_status_block *fpsb = fp->status_blk;
540         u16 rc = 0;
541
542         barrier(); /* status block is written to by the chip */
543         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
544                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
545                 rc |= 1;
546         }
547         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
548                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
549                 rc |= 2;
550         }
551         return rc;
552 }
553
554 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
555 {
556         u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
557
558         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
559                 rx_cons_sb++;
560
561         if ((rx_cons_sb != fp->rx_comp_cons) ||
562             (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
563                 return 1;
564
565         return 0;
566 }
567
568 static u16 bnx2x_ack_int(struct bnx2x *bp)
569 {
570         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
571         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
572
573 /*      DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
574            result, BAR_IGU_INTMEM + igu_addr); */
575
576 #ifdef IGU_DEBUG
577 #warning IGU_DEBUG active
578         if (result == 0) {
579                 BNX2X_ERR("read %x from IGU\n", result);
580                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
581         }
582 #endif
583         return result;
584 }
585
586
587 /*
588  * fast path service functions
589  */
590
591 /* free skb in the packet ring at pos idx
592  * return idx of last bd freed
593  */
594 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
595                              u16 idx)
596 {
597         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
598         struct eth_tx_bd *tx_bd;
599         struct sk_buff *skb = tx_buf->skb;
600         u16 bd_idx = tx_buf->first_bd;
601         int nbd;
602
603         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
604            idx, tx_buf, skb);
605
606         /* unmap first bd */
607         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
608         tx_bd = &fp->tx_desc_ring[bd_idx];
609         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
610                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
611
612         nbd = le16_to_cpu(tx_bd->nbd) - 1;
613 #ifdef BNX2X_STOP_ON_ERROR
614         if (nbd > (MAX_SKB_FRAGS + 2)) {
615                 BNX2X_ERR("bad nbd!\n");
616                 bnx2x_panic();
617         }
618 #endif
619
620         /* Skip a parse bd and the TSO split header bd
621            since they have no mapping */
622         if (nbd)
623                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
624
625         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
626                                            ETH_TX_BD_FLAGS_TCP_CSUM |
627                                            ETH_TX_BD_FLAGS_SW_LSO)) {
628                 if (--nbd)
629                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
630                 tx_bd = &fp->tx_desc_ring[bd_idx];
631                 /* is this a TSO split header bd? */
632                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
633                         if (--nbd)
634                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
635                 }
636         }
637
638         /* now free frags */
639         while (nbd > 0) {
640
641                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
642                 tx_bd = &fp->tx_desc_ring[bd_idx];
643                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
644                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
645                 if (--nbd)
646                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
647         }
648
649         /* release skb */
650         BUG_TRAP(skb);
651         dev_kfree_skb(skb);
652         tx_buf->first_bd = 0;
653         tx_buf->skb = NULL;
654
655         return bd_idx;
656 }
657
658 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
659 {
660         u16 used;
661         u32 prod;
662         u32 cons;
663
664         /* Tell compiler that prod and cons can change */
665         barrier();
666         prod = fp->tx_bd_prod;
667         cons = fp->tx_bd_cons;
668
669         used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
670                 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
671
672         if (prod >= cons) {
673                 /* used = prod - cons - prod/size + cons/size */
674                 used -= NUM_TX_BD - NUM_TX_RINGS;
675         }
676
677         BUG_TRAP(used <= fp->bp->tx_ring_size);
678         BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
679
680         return (fp->bp->tx_ring_size - used);
681 }
682
683 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
684 {
685         struct bnx2x *bp = fp->bp;
686         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
687         int done = 0;
688
689 #ifdef BNX2X_STOP_ON_ERROR
690         if (unlikely(bp->panic))
691                 return;
692 #endif
693
694         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
695         sw_cons = fp->tx_pkt_cons;
696
697         while (sw_cons != hw_cons) {
698                 u16 pkt_cons;
699
700                 pkt_cons = TX_BD(sw_cons);
701
702                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
703
704                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %d\n",
705                    hw_cons, sw_cons, pkt_cons);
706
707 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
708                         rmb();
709                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
710                 }
711 */
712                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
713                 sw_cons++;
714                 done++;
715
716                 if (done == work)
717                         break;
718         }
719
720         fp->tx_pkt_cons = sw_cons;
721         fp->tx_bd_cons = bd_cons;
722
723         /* Need to make the tx_cons update visible to start_xmit()
724          * before checking for netif_queue_stopped().  Without the
725          * memory barrier, there is a small possibility that start_xmit()
726          * will miss it and cause the queue to be stopped forever.
727          */
728         smp_mb();
729
730         /* TBD need a thresh? */
731         if (unlikely(netif_queue_stopped(bp->dev))) {
732
733                 netif_tx_lock(bp->dev);
734
735                 if (netif_queue_stopped(bp->dev) &&
736                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
737                         netif_wake_queue(bp->dev);
738
739                 netif_tx_unlock(bp->dev);
740
741         }
742 }
743
744 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
745                            union eth_rx_cqe *rr_cqe)
746 {
747         struct bnx2x *bp = fp->bp;
748         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750
751         DP(NETIF_MSG_RX_STATUS,
752            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
753            fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
754
755         bp->spq_left++;
756
757         if (fp->index) {
758                 switch (command | fp->state) {
759                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
760                                                 BNX2X_FP_STATE_OPENING):
761                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
762                            cid);
763                         fp->state = BNX2X_FP_STATE_OPEN;
764                         break;
765
766                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
767                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
768                            cid);
769                         fp->state = BNX2X_FP_STATE_HALTED;
770                         break;
771
772                 default:
773                         BNX2X_ERR("unexpected MC reply(%d)  state is %x\n",
774                                   command, fp->state);
775                 }
776                 mb(); /* force bnx2x_wait_ramrod to see the change */
777                 return;
778         }
779
780         switch (command | bp->state) {
781         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
782                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
783                 bp->state = BNX2X_STATE_OPEN;
784                 break;
785
786         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
787                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
788                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
789                 fp->state = BNX2X_FP_STATE_HALTED;
790                 break;
791
792         case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
793                 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
794                 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
795                 break;
796
797         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
798                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
799                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
800                 break;
801
802         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
803                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
804                 break;
805
806         default:
807                 BNX2X_ERR("unexpected ramrod (%d)  state is %x\n",
808                           command, bp->state);
809         }
810
811         mb(); /* force bnx2x_wait_ramrod to see the change */
812 }
813
814 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
815                                      struct bnx2x_fastpath *fp, u16 index)
816 {
817         struct sk_buff *skb;
818         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
819         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
820         dma_addr_t mapping;
821
822         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823         if (unlikely(skb == NULL))
824                 return -ENOMEM;
825
826         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
827                                  PCI_DMA_FROMDEVICE);
828         if (unlikely(dma_mapping_error(mapping))) {
829
830                 dev_kfree_skb(skb);
831                 return -ENOMEM;
832         }
833
834         rx_buf->skb = skb;
835         pci_unmap_addr_set(rx_buf, mapping, mapping);
836
837         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
838         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
839
840         return 0;
841 }
842
843 /* note that we are not allocating a new skb,
844  * we are just moving one from cons to prod
845  * we are not creating a new mapping,
846  * so there is no need to check for dma_mapping_error().
847  */
848 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
849                                struct sk_buff *skb, u16 cons, u16 prod)
850 {
851         struct bnx2x *bp = fp->bp;
852         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
853         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
854         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
855         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
856
857         pci_dma_sync_single_for_device(bp->pdev,
858                                        pci_unmap_addr(cons_rx_buf, mapping),
859                                        bp->rx_offset + RX_COPY_THRESH,
860                                        PCI_DMA_FROMDEVICE);
861
862         prod_rx_buf->skb = cons_rx_buf->skb;
863         pci_unmap_addr_set(prod_rx_buf, mapping,
864                            pci_unmap_addr(cons_rx_buf, mapping));
865         *prod_bd = *cons_bd;
866 }
867
868 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
869 {
870         struct bnx2x *bp = fp->bp;
871         u16 bd_cons, bd_prod, comp_ring_cons;
872         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
873         int rx_pkt = 0;
874
875 #ifdef BNX2X_STOP_ON_ERROR
876         if (unlikely(bp->panic))
877                 return 0;
878 #endif
879
880         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
881         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
882                 hw_comp_cons++;
883
884         bd_cons = fp->rx_bd_cons;
885         bd_prod = fp->rx_bd_prod;
886         sw_comp_cons = fp->rx_comp_cons;
887         sw_comp_prod = fp->rx_comp_prod;
888
889         /* Memory barrier necessary as speculative reads of the rx
890          * buffer can be ahead of the index in the status block
891          */
892         rmb();
893
894         DP(NETIF_MSG_RX_STATUS,
895            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
896            fp->index, hw_comp_cons, sw_comp_cons);
897
898         while (sw_comp_cons != hw_comp_cons) {
899                 unsigned int len, pad;
900                 struct sw_rx_bd *rx_buf;
901                 struct sk_buff *skb;
902                 union eth_rx_cqe *cqe;
903
904                 comp_ring_cons = RCQ_BD(sw_comp_cons);
905                 bd_prod = RX_BD(bd_prod);
906                 bd_cons = RX_BD(bd_cons);
907
908                 cqe = &fp->rx_comp_ring[comp_ring_cons];
909
910                 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u  sw_comp_cons %u"
911                    "  comp_ring (%u)  bd_ring (%u,%u)\n",
912                    hw_comp_cons, sw_comp_cons,
913                    comp_ring_cons, bd_prod, bd_cons);
914                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
915                    "  queue %x  vlan %x  len %x\n",
916                    cqe->fast_path_cqe.type,
917                    cqe->fast_path_cqe.error_type_flags,
918                    cqe->fast_path_cqe.status_flags,
919                    cqe->fast_path_cqe.rss_hash_result,
920                    cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
921
922                 /* is this a slowpath msg? */
923                 if (unlikely(cqe->fast_path_cqe.type)) {
924                         bnx2x_sp_event(fp, cqe);
925                         goto next_cqe;
926
927                 /* this is an rx packet */
928                 } else {
929                         rx_buf = &fp->rx_buf_ring[bd_cons];
930                         skb = rx_buf->skb;
931
932                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
933                         pad = cqe->fast_path_cqe.placement_offset;
934
935                         pci_dma_sync_single_for_device(bp->pdev,
936                                         pci_unmap_addr(rx_buf, mapping),
937                                                        pad + RX_COPY_THRESH,
938                                                        PCI_DMA_FROMDEVICE);
939                         prefetch(skb);
940                         prefetch(((char *)(skb)) + 128);
941
942                         /* is this an error packet? */
943                         if (unlikely(cqe->fast_path_cqe.error_type_flags &
944                                                         ETH_RX_ERROR_FALGS)) {
945                         /* do we sometimes forward error packets anyway? */
946                                 DP(NETIF_MSG_RX_ERR,
947                                    "ERROR flags(%u) Rx packet(%u)\n",
948                                    cqe->fast_path_cqe.error_type_flags,
949                                    sw_comp_cons);
950                                 /* TBD make sure MC counts this as a drop */
951                                 goto reuse_rx;
952                         }
953
954                         /* Since we don't have a jumbo ring
955                          * copy small packets if mtu > 1500
956                          */
957                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
958                             (len <= RX_COPY_THRESH)) {
959                                 struct sk_buff *new_skb;
960
961                                 new_skb = netdev_alloc_skb(bp->dev,
962                                                            len + pad);
963                                 if (new_skb == NULL) {
964                                         DP(NETIF_MSG_RX_ERR,
965                                            "ERROR packet dropped "
966                                            "because of alloc failure\n");
967                                         /* TBD count this as a drop? */
968                                         goto reuse_rx;
969                                 }
970
971                                 /* aligned copy */
972                                 skb_copy_from_linear_data_offset(skb, pad,
973                                                     new_skb->data + pad, len);
974                                 skb_reserve(new_skb, pad);
975                                 skb_put(new_skb, len);
976
977                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
978
979                                 skb = new_skb;
980
981                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
982                                 pci_unmap_single(bp->pdev,
983                                         pci_unmap_addr(rx_buf, mapping),
984                                                  bp->rx_buf_use_size,
985                                                  PCI_DMA_FROMDEVICE);
986                                 skb_reserve(skb, pad);
987                                 skb_put(skb, len);
988
989                         } else {
990                                 DP(NETIF_MSG_RX_ERR,
991                                    "ERROR packet dropped because "
992                                    "of alloc failure\n");
993 reuse_rx:
994                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
995                                 goto next_rx;
996                         }
997
998                         skb->protocol = eth_type_trans(skb, bp->dev);
999
1000                         skb->ip_summed = CHECKSUM_NONE;
1001                         if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1002                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1003
1004                         /* TBD do we pass bad csum packets in promisc */
1005                 }
1006
1007 #ifdef BCM_VLAN
1008                 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1009                                 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1010                     && (bp->vlgrp != NULL))
1011                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1012                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1013                 else
1014 #endif
1015                 netif_receive_skb(skb);
1016
1017                 bp->dev->last_rx = jiffies;
1018
1019 next_rx:
1020                 rx_buf->skb = NULL;
1021
1022                 bd_cons = NEXT_RX_IDX(bd_cons);
1023                 bd_prod = NEXT_RX_IDX(bd_prod);
1024 next_cqe:
1025                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1027                 rx_pkt++;
1028
1029                 if ((rx_pkt == budget))
1030                         break;
1031         } /* while */
1032
1033         fp->rx_bd_cons = bd_cons;
1034         fp->rx_bd_prod = bd_prod;
1035         fp->rx_comp_cons = sw_comp_cons;
1036         fp->rx_comp_prod = sw_comp_prod;
1037
1038         REG_WR(bp, BAR_TSTRORM_INTMEM +
1039                TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1040
1041         mmiowb(); /* keep prod updates ordered */
1042
1043         fp->rx_pkt += rx_pkt;
1044         fp->rx_calls++;
1045
1046         return rx_pkt;
1047 }
1048
1049 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1050 {
1051         struct bnx2x_fastpath *fp = fp_cookie;
1052         struct bnx2x *bp = fp->bp;
1053         struct net_device *dev = bp->dev;
1054         int index = fp->index;
1055
1056         DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1057         bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1058
1059 #ifdef BNX2X_STOP_ON_ERROR
1060         if (unlikely(bp->panic))
1061                 return IRQ_HANDLED;
1062 #endif
1063
1064         prefetch(fp->rx_cons_sb);
1065         prefetch(fp->tx_cons_sb);
1066         prefetch(&fp->status_blk->c_status_block.status_block_index);
1067         prefetch(&fp->status_blk->u_status_block.status_block_index);
1068
1069         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1070         return IRQ_HANDLED;
1071 }
1072
1073 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1074 {
1075         struct net_device *dev = dev_instance;
1076         struct bnx2x *bp = netdev_priv(dev);
1077         u16 status = bnx2x_ack_int(bp);
1078
1079         if (unlikely(status == 0)) {
1080                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1081                 return IRQ_NONE;
1082         }
1083
1084         DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1085
1086 #ifdef BNX2X_STOP_ON_ERROR
1087         if (unlikely(bp->panic))
1088                 return IRQ_HANDLED;
1089 #endif
1090
1091         /* Return here if interrupt is shared and is disabled */
1092         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1093                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1094                 return IRQ_HANDLED;
1095         }
1096
1097         if (status & 0x2) {
1098                 struct bnx2x_fastpath *fp = &bp->fp[0];
1099
1100                 prefetch(fp->rx_cons_sb);
1101                 prefetch(fp->tx_cons_sb);
1102                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1103                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1104
1105                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1106
1107                 status &= ~0x2;
1108                 if (!status)
1109                         return IRQ_HANDLED;
1110         }
1111
1112         if (unlikely(status & 0x1)) {
1113
1114                 schedule_work(&bp->sp_task);
1115
1116                 status &= ~0x1;
1117                 if (!status)
1118                         return IRQ_HANDLED;
1119         }
1120
1121         DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1122            status);
1123
1124         return IRQ_HANDLED;
1125 }
1126
1127 /* end of fast path */
1128
1129 /* PHY/MAC */
1130
1131 /*
1132  * General service functions
1133  */
1134
1135 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1136 {
1137         int port = bp->port;
1138
1139         NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1140                ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1141                 SHARED_HW_CFG_LED_MODE_SHIFT));
1142         NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1143
1144         /* Set blinking rate to ~15.9Hz */
1145         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1146                LED_BLINK_RATE_VAL);
1147         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1148
1149         /* On Ax chip versions for speeds less than 10G
1150            LED scheme is different */
1151         if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1152                 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1153                 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1154                 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1155         }
1156 }
1157
1158 static void bnx2x_leds_unset(struct bnx2x *bp)
1159 {
1160         int port = bp->port;
1161
1162         NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1163         NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1164 }
1165
1166 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1167 {
1168         u32 val = REG_RD(bp, reg);
1169
1170         val |= bits;
1171         REG_WR(bp, reg, val);
1172         return val;
1173 }
1174
1175 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1176 {
1177         u32 val = REG_RD(bp, reg);
1178
1179         val &= ~bits;
1180         REG_WR(bp, reg, val);
1181         return val;
1182 }
1183
1184 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1185 {
1186         int rc;
1187         u32 tmp, i;
1188         int port = bp->port;
1189         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1190
1191 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  val 0x%08x\n",
1192            bp->phy_addr, reg, val); */
1193
1194         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1195
1196                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1197                 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1198                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1199                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1200                 udelay(40);
1201         }
1202
1203         tmp = ((bp->phy_addr << 21) | (reg << 16) |
1204                (val & EMAC_MDIO_COMM_DATA) |
1205                EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1206                EMAC_MDIO_COMM_START_BUSY);
1207         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1208
1209         for (i = 0; i < 50; i++) {
1210                 udelay(10);
1211
1212                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1213                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1214                         udelay(5);
1215                         break;
1216                 }
1217         }
1218
1219         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1220                 BNX2X_ERR("write phy register failed\n");
1221
1222                 rc = -EBUSY;
1223         } else {
1224                 rc = 0;
1225         }
1226
1227         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1228
1229                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1230                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1231                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1232         }
1233
1234         return rc;
1235 }
1236
1237 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1238 {
1239         int port = bp->port;
1240         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1241         u32 val, i;
1242         int rc;
1243
1244         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1245
1246                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1247                 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1248                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1249                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1250                 udelay(40);
1251         }
1252
1253         val = ((bp->phy_addr << 21) | (reg << 16) |
1254                EMAC_MDIO_COMM_COMMAND_READ_22 |
1255                EMAC_MDIO_COMM_START_BUSY);
1256         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1257
1258         for (i = 0; i < 50; i++) {
1259                 udelay(10);
1260
1261                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1262                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1263                         val &= EMAC_MDIO_COMM_DATA;
1264                         break;
1265                 }
1266         }
1267
1268         if (val & EMAC_MDIO_COMM_START_BUSY) {
1269                 BNX2X_ERR("read phy register failed\n");
1270
1271                 *ret_val = 0x0;
1272                 rc = -EBUSY;
1273         } else {
1274                 *ret_val = val;
1275                 rc = 0;
1276         }
1277
1278         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1279
1280                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1281                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1282                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1283         }
1284
1285 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  ret_val 0x%08x\n",
1286            bp->phy_addr, reg, *ret_val); */
1287
1288         return rc;
1289 }
1290
1291 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1292 {
1293         int rc = 0;
1294         u32 tmp, i;
1295         int port = bp->port;
1296         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1297
1298         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1299
1300                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1301                 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1302                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1303                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1304                 udelay(40);
1305         }
1306
1307         /* set clause 45 mode */
1308         tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1309         tmp |= EMAC_MDIO_MODE_CLAUSE_45;
1310         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1311
1312         /* address */
1313         tmp = ((bp->phy_addr << 21) | (reg << 16) | addr |
1314                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1315                EMAC_MDIO_COMM_START_BUSY);
1316         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1317
1318         for (i = 0; i < 50; i++) {
1319                 udelay(10);
1320
1321                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1322                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1323                         udelay(5);
1324                         break;
1325                 }
1326         }
1327
1328         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1329                 BNX2X_ERR("write phy register failed\n");
1330
1331                 rc = -EBUSY;
1332         } else {
1333                 /* data */
1334                 tmp = ((bp->phy_addr << 21) | (reg << 16) | val |
1335                        EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1336                        EMAC_MDIO_COMM_START_BUSY);
1337                 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1338
1339                 for (i = 0; i < 50; i++) {
1340                         udelay(10);
1341
1342                         tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1343                         if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1344                                 udelay(5);
1345                                 break;
1346                         }
1347                 }
1348
1349                 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1350                         BNX2X_ERR("write phy register failed\n");
1351
1352                         rc = -EBUSY;
1353                 }
1354         }
1355
1356         /* unset clause 45 mode */
1357         tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1358         tmp &= ~EMAC_MDIO_MODE_CLAUSE_45;
1359         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1360
1361         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1362
1363                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1364                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1365                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1366         }
1367
1368         return rc;
1369 }
1370
1371 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
1372                              u32 *ret_val)
1373 {
1374         int port = bp->port;
1375         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1376         u32 val, i;
1377         int rc = 0;
1378
1379         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1380
1381                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1382                 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1383                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1384                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1385                 udelay(40);
1386         }
1387
1388         /* set clause 45 mode */
1389         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1390         val |= EMAC_MDIO_MODE_CLAUSE_45;
1391         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1392
1393         /* address */
1394         val = ((bp->phy_addr << 21) | (reg << 16) | addr |
1395                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1396                EMAC_MDIO_COMM_START_BUSY);
1397         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1398
1399         for (i = 0; i < 50; i++) {
1400                 udelay(10);
1401
1402                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1403                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1404                         udelay(5);
1405                         break;
1406                 }
1407         }
1408
1409         if (val & EMAC_MDIO_COMM_START_BUSY) {
1410                 BNX2X_ERR("read phy register failed\n");
1411
1412                 *ret_val = 0;
1413                 rc = -EBUSY;
1414         } else {
1415                 /* data */
1416                 val = ((bp->phy_addr << 21) | (reg << 16) |
1417                        EMAC_MDIO_COMM_COMMAND_READ_45 |
1418                        EMAC_MDIO_COMM_START_BUSY);
1419                 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1420
1421                 for (i = 0; i < 50; i++) {
1422                         udelay(10);
1423
1424                         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1425                         if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1426                                 val &= EMAC_MDIO_COMM_DATA;
1427                                 break;
1428                         }
1429                 }
1430
1431                 if (val & EMAC_MDIO_COMM_START_BUSY) {
1432                         BNX2X_ERR("read phy register failed\n");
1433
1434                         val = 0;
1435                         rc = -EBUSY;
1436                 }
1437
1438                 *ret_val = val;
1439         }
1440
1441         /* unset clause 45 mode */
1442         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1443         val &= ~EMAC_MDIO_MODE_CLAUSE_45;
1444         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1445
1446         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1447
1448                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1449                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1450                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1451         }
1452
1453         return rc;
1454 }
1455
1456 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1457 {
1458         int i;
1459         u32 rd_val;
1460
1461         might_sleep();
1462         for (i = 0; i < 10; i++) {
1463                 bnx2x_mdio45_write(bp, reg, addr, val);
1464                 msleep(5);
1465                 bnx2x_mdio45_read(bp, reg, addr, &rd_val);
1466                 /* if the read value is not the same as the value we wrote,
1467                    we should write it again */
1468                 if (rd_val == val)
1469                         return 0;
1470         }
1471         BNX2X_ERR("MDIO write in CL45 failed\n");
1472         return -EBUSY;
1473 }
1474
1475 /*
1476  * link management
1477  */
1478
1479 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1480 {
1481         u32 ld_pause;   /* local driver */
1482         u32 lp_pause;   /* link partner */
1483         u32 pause_result;
1484
1485         bp->flow_ctrl = 0;
1486
1487         /* resolve from gp_status in case of AN complete and not sgmii */
1488         if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1489             (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1490             (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1491             (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1492
1493                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1494                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1495                                   &ld_pause);
1496                 bnx2x_mdio22_read(bp,
1497                         MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1498                                   &lp_pause);
1499                 pause_result = (ld_pause &
1500                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1501                 pause_result |= (lp_pause &
1502                                  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1503                 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1504
1505                 switch (pause_result) {                 /* ASYM P ASYM P */
1506                 case 0xb:                               /*   1  0   1  1 */
1507                         bp->flow_ctrl = FLOW_CTRL_TX;
1508                         break;
1509
1510                 case 0xe:                               /*   1  1   1  0 */
1511                         bp->flow_ctrl = FLOW_CTRL_RX;
1512                         break;
1513
1514                 case 0x5:                               /*   0  1   0  1 */
1515                 case 0x7:                               /*   0  1   1  1 */
1516                 case 0xd:                               /*   1  1   0  1 */
1517                 case 0xf:                               /*   1  1   1  1 */
1518                         bp->flow_ctrl = FLOW_CTRL_BOTH;
1519                         break;
1520
1521                 default:
1522                         break;
1523                 }
1524
1525         } else { /* forced mode */
1526                 switch (bp->req_flow_ctrl) {
1527                 case FLOW_CTRL_AUTO:
1528                         if (bp->dev->mtu <= 4500)
1529                                 bp->flow_ctrl = FLOW_CTRL_BOTH;
1530                         else
1531                                 bp->flow_ctrl = FLOW_CTRL_TX;
1532                         break;
1533
1534                 case FLOW_CTRL_TX:
1535                 case FLOW_CTRL_RX:
1536                 case FLOW_CTRL_BOTH:
1537                         bp->flow_ctrl = bp->req_flow_ctrl;
1538                         break;
1539
1540                 case FLOW_CTRL_NONE:
1541                 default:
1542                         break;
1543                 }
1544         }
1545         DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1546 }
1547
1548 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1549 {
1550         bp->link_status = 0;
1551
1552         if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1553                 DP(NETIF_MSG_LINK, "link up\n");
1554
1555                 bp->link_up = 1;
1556                 bp->link_status |= LINK_STATUS_LINK_UP;
1557
1558                 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1559                         bp->duplex = DUPLEX_FULL;
1560                 else
1561                         bp->duplex = DUPLEX_HALF;
1562
1563                 bnx2x_flow_ctrl_resolve(bp, gp_status);
1564
1565                 switch (gp_status & GP_STATUS_SPEED_MASK) {
1566                 case GP_STATUS_10M:
1567                         bp->line_speed = SPEED_10;
1568                         if (bp->duplex == DUPLEX_FULL)
1569                                 bp->link_status |= LINK_10TFD;
1570                         else
1571                                 bp->link_status |= LINK_10THD;
1572                         break;
1573
1574                 case GP_STATUS_100M:
1575                         bp->line_speed = SPEED_100;
1576                         if (bp->duplex == DUPLEX_FULL)
1577                                 bp->link_status |= LINK_100TXFD;
1578                         else
1579                                 bp->link_status |= LINK_100TXHD;
1580                         break;
1581
1582                 case GP_STATUS_1G:
1583                 case GP_STATUS_1G_KX:
1584                         bp->line_speed = SPEED_1000;
1585                         if (bp->duplex == DUPLEX_FULL)
1586                                 bp->link_status |= LINK_1000TFD;
1587                         else
1588                                 bp->link_status |= LINK_1000THD;
1589                         break;
1590
1591                 case GP_STATUS_2_5G:
1592                         bp->line_speed = SPEED_2500;
1593                         if (bp->duplex == DUPLEX_FULL)
1594                                 bp->link_status |= LINK_2500TFD;
1595                         else
1596                                 bp->link_status |= LINK_2500THD;
1597                         break;
1598
1599                 case GP_STATUS_5G:
1600                 case GP_STATUS_6G:
1601                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1602                                   gp_status);
1603                         break;
1604
1605                 case GP_STATUS_10G_KX4:
1606                 case GP_STATUS_10G_HIG:
1607                 case GP_STATUS_10G_CX4:
1608                         bp->line_speed = SPEED_10000;
1609                         bp->link_status |= LINK_10GTFD;
1610                         break;
1611
1612                 case GP_STATUS_12G_HIG:
1613                         bp->line_speed = SPEED_12000;
1614                         bp->link_status |= LINK_12GTFD;
1615                         break;
1616
1617                 case GP_STATUS_12_5G:
1618                         bp->line_speed = SPEED_12500;
1619                         bp->link_status |= LINK_12_5GTFD;
1620                         break;
1621
1622                 case GP_STATUS_13G:
1623                         bp->line_speed = SPEED_13000;
1624                         bp->link_status |= LINK_13GTFD;
1625                         break;
1626
1627                 case GP_STATUS_15G:
1628                         bp->line_speed = SPEED_15000;
1629                         bp->link_status |= LINK_15GTFD;
1630                         break;
1631
1632                 case GP_STATUS_16G:
1633                         bp->line_speed = SPEED_16000;
1634                         bp->link_status |= LINK_16GTFD;
1635                         break;
1636
1637                 default:
1638                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1639                                   gp_status);
1640                         break;
1641                 }
1642
1643                 bp->link_status |= LINK_STATUS_SERDES_LINK;
1644
1645                 if (bp->req_autoneg & AUTONEG_SPEED) {
1646                         bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1647
1648                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1649                                 bp->link_status |=
1650                                         LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1651
1652                         if (bp->autoneg & AUTONEG_PARALLEL)
1653                                 bp->link_status |=
1654                                         LINK_STATUS_PARALLEL_DETECTION_USED;
1655                 }
1656
1657                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1658                        bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1659
1660                 if (bp->flow_ctrl & FLOW_CTRL_RX)
1661                        bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1662
1663         } else { /* link_down */
1664                 DP(NETIF_MSG_LINK, "link down\n");
1665
1666                 bp->link_up = 0;
1667
1668                 bp->line_speed = 0;
1669                 bp->duplex = DUPLEX_FULL;
1670                 bp->flow_ctrl = 0;
1671         }
1672
1673         DP(NETIF_MSG_LINK, "gp_status 0x%x  link_up %d\n"
1674            DP_LEVEL "  line_speed %d  duplex %d  flow_ctrl 0x%x"
1675                     "  link_status 0x%x\n",
1676            gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl,
1677            bp->link_status);
1678 }
1679
1680 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1681 {
1682         int port = bp->port;
1683
1684         /* first reset all status
1685          * we assume only one line will be change at a time */
1686         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1687                        (NIG_XGXS0_LINK_STATUS |
1688                         NIG_SERDES0_LINK_STATUS |
1689                         NIG_STATUS_INTERRUPT_XGXS0_LINK10G));
1690         if (bp->link_up) {
1691                 if (is_10g) {
1692                         /* Disable the 10G link interrupt
1693                          * by writing 1 to the status register
1694                          */
1695                         DP(NETIF_MSG_LINK, "10G XGXS link up\n");
1696                         bnx2x_bits_en(bp,
1697                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1698                                       NIG_STATUS_INTERRUPT_XGXS0_LINK10G);
1699
1700                 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1701                         /* Disable the link interrupt
1702                          * by writing 1 to the relevant lane
1703                          * in the status register
1704                          */
1705                         DP(NETIF_MSG_LINK, "1G XGXS link up\n");
1706                         bnx2x_bits_en(bp,
1707                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1708                                       ((1 << bp->ser_lane) <<
1709                                        NIG_XGXS0_LINK_STATUS_SIZE));
1710
1711                 } else { /* SerDes */
1712                         DP(NETIF_MSG_LINK, "SerDes link up\n");
1713                         /* Disable the link interrupt
1714                          * by writing 1 to the status register
1715                          */
1716                         bnx2x_bits_en(bp,
1717                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1718                                       NIG_SERDES0_LINK_STATUS);
1719                 }
1720
1721         } else { /* link_down */
1722         }
1723 }
1724
1725 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1726 {
1727         u32 ext_phy_type;
1728         u32 ext_phy_addr;
1729         u32 local_phy;
1730         u32 val = 0;
1731         u32 rx_sd, pcs_status;
1732
1733         if (bp->phy_flags & PHY_XGXS_FLAG) {
1734                 local_phy = bp->phy_addr;
1735                 ext_phy_addr = ((bp->ext_phy_config &
1736                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1737                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1738                 bp->phy_addr = (u8)ext_phy_addr;
1739
1740                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1741                 switch (ext_phy_type) {
1742                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1743                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
1744                         val = 1;
1745                         break;
1746
1747                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1748                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
1749                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1750                                           EXT_PHY_OPT_LASI_STATUS, &val);
1751                         DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1752
1753                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1754                                           EXT_PHY_OPT_LASI_STATUS, &val);
1755                         DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1756
1757                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1758                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1759                         val = (rx_sd & 0x1);
1760                         break;
1761
1762                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1763                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
1764                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1765                                           EXT_PHY_OPT_LASI_STATUS, &val);
1766                         DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1767
1768                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1769                                           EXT_PHY_OPT_LASI_STATUS, &val);
1770                         DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1771
1772                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1773                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1774                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD,
1775                                          EXT_PHY_OPT_PCS_STATUS, &pcs_status);
1776                         DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
1777                            "  pcs_status 0x%x\n", rx_sd, pcs_status);
1778                         /* link is up if both bit 0 of pmd_rx and
1779                          * bit 0 of pcs_status are set
1780                          */
1781                         val = (rx_sd & pcs_status);
1782                         break;
1783
1784                 default:
1785                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
1786                            bp->ext_phy_config);
1787                         val = 0;
1788                         break;
1789                 }
1790                 bp->phy_addr = local_phy;
1791
1792         } else { /* SerDes */
1793                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
1794                 switch (ext_phy_type) {
1795                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
1796                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
1797                         val = 1;
1798                         break;
1799
1800                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1801                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
1802                         val = 1;
1803                         break;
1804
1805                 default:
1806                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
1807                            bp->ext_phy_config);
1808                         val = 0;
1809                         break;
1810                 }
1811         }
1812
1813         return val;
1814 }
1815
1816 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1817 {
1818         int port = bp->port;
1819         u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1820                                NIG_REG_INGRESS_BMAC0_MEM;
1821         u32 wb_write[2];
1822         u32 val;
1823
1824         DP(NETIF_MSG_LINK, "enabling BigMAC\n");
1825         /* reset and unreset the BigMac */
1826         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1827                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1828         msleep(5);
1829         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1830                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1831
1832         /* enable access for bmac registers */
1833         NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
1834
1835         /* XGXS control */
1836         wb_write[0] = 0x3c;
1837         wb_write[1] = 0;
1838         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1839                     wb_write, 2);
1840
1841         /* tx MAC SA */
1842         wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
1843                        (bp->dev->dev_addr[3] << 16) |
1844                        (bp->dev->dev_addr[4] << 8) |
1845                         bp->dev->dev_addr[5]);
1846         wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
1847                         bp->dev->dev_addr[1]);
1848         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
1849                     wb_write, 2);
1850
1851         /* tx control */
1852         val = 0xc0;
1853         if (bp->flow_ctrl & FLOW_CTRL_TX)
1854                 val |= 0x800000;
1855         wb_write[0] = val;
1856         wb_write[1] = 0;
1857         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
1858
1859         /* set tx mtu */
1860         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
1861         wb_write[1] = 0;
1862         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
1863
1864         /* mac control */
1865         val = 0x3;
1866         if (is_lb) {
1867                 val |= 0x4;
1868                 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
1869         }
1870         wb_write[0] = val;
1871         wb_write[1] = 0;
1872         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
1873                     wb_write, 2);
1874
1875         /* rx control set to don't strip crc */
1876         val = 0x14;
1877         if (bp->flow_ctrl & FLOW_CTRL_RX)
1878                 val |= 0x20;
1879         wb_write[0] = val;
1880         wb_write[1] = 0;
1881         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
1882
1883         /* set rx mtu */
1884         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1885         wb_write[1] = 0;
1886         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
1887
1888         /* set cnt max size */
1889         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
1890         wb_write[1] = 0;
1891         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
1892                     wb_write, 2);
1893
1894         /* configure safc */
1895         wb_write[0] = 0x1000200;
1896         wb_write[1] = 0;
1897         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1898                     wb_write, 2);
1899
1900         /* fix for emulation */
1901         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1902                 wb_write[0] = 0xf000;
1903                 wb_write[1] = 0;
1904                 REG_WR_DMAE(bp,
1905                             bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1906                             wb_write, 2);
1907         }
1908
1909         /* reset old bmac stats */
1910         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
1911
1912         NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
1913
1914         /* select XGXS */
1915         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
1916         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
1917
1918         /* disable the NIG in/out to the emac */
1919         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
1920         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
1921         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
1922
1923         /* enable the NIG in/out to the bmac */
1924         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
1925
1926         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
1927         val = 0;
1928         if (bp->flow_ctrl & FLOW_CTRL_TX)
1929                 val = 1;
1930         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
1931         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
1932
1933         bp->phy_flags |= PHY_BMAC_FLAG;
1934
1935         bp->stats_state = STATS_STATE_ENABLE;
1936 }
1937
1938 static void bnx2x_emac_enable(struct bnx2x *bp)
1939 {
1940         int port = bp->port;
1941         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1942         u32 val;
1943         int timeout;
1944
1945         DP(NETIF_MSG_LINK, "enabling EMAC\n");
1946         /* reset and unreset the emac core */
1947         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1948                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1949         msleep(5);
1950         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1951                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1952
1953         /* enable emac and not bmac */
1954         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1955
1956         /* for paladium */
1957         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1958                 /* Use lane 1 (of lanes 0-3) */
1959                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1960                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1961         }
1962         /* for fpga */
1963         else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
1964                 /* Use lane 1 (of lanes 0-3) */
1965                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1966                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1967         }
1968         /* ASIC */
1969         else {
1970                 if (bp->phy_flags & PHY_XGXS_FLAG) {
1971                         DP(NETIF_MSG_LINK, "XGXS\n");
1972                         /* select the master lanes (out of 0-3) */
1973                         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
1974                                bp->ser_lane);
1975                         /* select XGXS */
1976                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1977
1978                 } else { /* SerDes */
1979                         DP(NETIF_MSG_LINK, "SerDes\n");
1980                         /* select SerDes */
1981                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1982                 }
1983         }
1984
1985         /* enable emac */
1986         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
1987
1988         /* init emac - use read-modify-write */
1989         /* self clear reset */
1990         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1991         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
1992
1993         timeout = 200;
1994         while (val & EMAC_MODE_RESET) {
1995                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1996                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
1997                 if (!timeout) {
1998                         BNX2X_ERR("EMAC timeout!\n");
1999                         break;
2000                 }
2001                 timeout--;
2002         }
2003
2004         /* reset tx part */
2005         EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2006
2007         timeout = 200;
2008         while (val & EMAC_TX_MODE_RESET) {
2009                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2010                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2011                 if (!timeout) {
2012                         BNX2X_ERR("EMAC timeout!\n");
2013                         break;
2014                 }
2015                 timeout--;
2016         }
2017
2018         if (CHIP_REV_IS_SLOW(bp)) {
2019                 /* config GMII mode */
2020                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2021                 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2022
2023         } else { /* ASIC */
2024                 /* pause enable/disable */
2025                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2026                                EMAC_RX_MODE_FLOW_EN);
2027                 if (bp->flow_ctrl & FLOW_CTRL_RX)
2028                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2029                                       EMAC_RX_MODE_FLOW_EN);
2030
2031                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2032                                EMAC_TX_MODE_EXT_PAUSE_EN);
2033                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2034                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2035                                       EMAC_TX_MODE_EXT_PAUSE_EN);
2036         }
2037
2038         /* KEEP_VLAN_TAG, promiscuous */
2039         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2040         val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2041         EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2042
2043         /* identify magic packets */
2044         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2045         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2046
2047         /* enable emac for jumbo packets */
2048         EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2049                 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2050                  (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2051
2052         /* strip CRC */
2053         NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2054
2055         val = ((bp->dev->dev_addr[0] << 8) |
2056                 bp->dev->dev_addr[1]);
2057         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2058
2059         val = ((bp->dev->dev_addr[2] << 24) |
2060                (bp->dev->dev_addr[3] << 16) |
2061                (bp->dev->dev_addr[4] << 8) |
2062                 bp->dev->dev_addr[5]);
2063         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2064
2065         /* disable the NIG in/out to the bmac */
2066         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2067         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2068         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2069
2070         /* enable the NIG in/out to the emac */
2071         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2072         val = 0;
2073         if (bp->flow_ctrl & FLOW_CTRL_TX)
2074                 val = 1;
2075         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2076         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2077
2078         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2079                 /* take the BigMac out of reset */
2080                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2081                        (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2082
2083                 /* enable access for bmac registers */
2084                 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2085         }
2086
2087         bp->phy_flags |= PHY_EMAC_FLAG;
2088
2089         bp->stats_state = STATS_STATE_ENABLE;
2090 }
2091
2092 static void bnx2x_emac_program(struct bnx2x *bp)
2093 {
2094         u16 mode = 0;
2095         int port = bp->port;
2096
2097         DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2098         bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2099                        (EMAC_MODE_25G_MODE |
2100                         EMAC_MODE_PORT_MII_10M |
2101                         EMAC_MODE_HALF_DUPLEX));
2102         switch (bp->line_speed) {
2103         case SPEED_10:
2104                 mode |= EMAC_MODE_PORT_MII_10M;
2105                 break;
2106
2107         case SPEED_100:
2108                 mode |= EMAC_MODE_PORT_MII;
2109                 break;
2110
2111         case SPEED_1000:
2112                 mode |= EMAC_MODE_PORT_GMII;
2113                 break;
2114
2115         case SPEED_2500:
2116                 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2117                 break;
2118
2119         default:
2120                 /* 10G not valid for EMAC */
2121                 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2122                 break;
2123         }
2124
2125         if (bp->duplex == DUPLEX_HALF)
2126                 mode |= EMAC_MODE_HALF_DUPLEX;
2127         bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2128                       mode);
2129
2130         bnx2x_leds_set(bp, bp->line_speed);
2131 }
2132
2133 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2134 {
2135         u32 lp_up2;
2136         u32 tx_driver;
2137
2138         /* read precomp */
2139         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2140         bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2141
2142         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2143         bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2144
2145         /* bits [10:7] at lp_up2, positioned at [15:12] */
2146         lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2147                    MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2148                   MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2149
2150         if ((lp_up2 != 0) &&
2151             (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2152                 /* replace tx_driver bits [15:12] */
2153                 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2154                 tx_driver |= lp_up2;
2155                 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2156         }
2157 }
2158
2159 static void bnx2x_pbf_update(struct bnx2x *bp)
2160 {
2161         int port = bp->port;
2162         u32 init_crd, crd;
2163         u32 count = 1000;
2164         u32 pause = 0;
2165
2166         /* disable port */
2167         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2168
2169         /* wait for init credit */
2170         init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2171         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2172         DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
2173
2174         while ((init_crd != crd) && count) {
2175                 msleep(5);
2176
2177                 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2178                 count--;
2179         }
2180         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2181         if (init_crd != crd)
2182                 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2183
2184         if (bp->flow_ctrl & FLOW_CTRL_RX)
2185                 pause = 1;
2186         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2187         if (pause) {
2188                 /* update threshold */
2189                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2190                 /* update init credit */
2191                 init_crd = 778;         /* (800-18-4) */
2192
2193         } else {
2194                 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2195
2196                 /* update threshold */
2197                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2198                 /* update init credit */
2199                 switch (bp->line_speed) {
2200                 case SPEED_10:
2201                 case SPEED_100:
2202                 case SPEED_1000:
2203                         init_crd = thresh + 55 - 22;
2204                         break;
2205
2206                 case SPEED_2500:
2207                         init_crd = thresh + 138 - 22;
2208                         break;
2209
2210                 case SPEED_10000:
2211                         init_crd = thresh + 553 - 22;
2212                         break;
2213
2214                 default:
2215                         BNX2X_ERR("Invalid line_speed 0x%x\n",
2216                                   bp->line_speed);
2217                         break;
2218                 }
2219         }
2220         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2221         DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2222            bp->line_speed, init_crd);
2223
2224         /* probe the credit changes */
2225         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2226         msleep(5);
2227         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2228
2229         /* enable port */
2230         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2231 }
2232
2233 static void bnx2x_update_mng(struct bnx2x *bp)
2234 {
2235         if (!nomcp)
2236                 SHMEM_WR(bp, drv_fw_mb[bp->port].link_status,
2237                          bp->link_status);
2238 }
2239
2240 static void bnx2x_link_report(struct bnx2x *bp)
2241 {
2242         if (bp->link_up) {
2243                 netif_carrier_on(bp->dev);
2244                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2245
2246                 printk("%d Mbps ", bp->line_speed);
2247
2248                 if (bp->duplex == DUPLEX_FULL)
2249                         printk("full duplex");
2250                 else
2251                         printk("half duplex");
2252
2253                 if (bp->flow_ctrl) {
2254                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
2255                                 printk(", receive ");
2256                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2257                                         printk("& transmit ");
2258                         } else {
2259                                 printk(", transmit ");
2260                         }
2261                         printk("flow control ON");
2262                 }
2263                 printk("\n");
2264
2265         } else { /* link_down */
2266                 netif_carrier_off(bp->dev);
2267                 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2268         }
2269 }
2270
2271 static void bnx2x_link_up(struct bnx2x *bp)
2272 {
2273         int port = bp->port;
2274
2275         /* PBF - link up */
2276         bnx2x_pbf_update(bp);
2277
2278         /* disable drain */
2279         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2280
2281         /* update shared memory */
2282         bnx2x_update_mng(bp);
2283
2284         /* indicate link up */
2285         bnx2x_link_report(bp);
2286 }
2287
2288 static void bnx2x_link_down(struct bnx2x *bp)
2289 {
2290         int port = bp->port;
2291
2292         /* notify stats */
2293         if (bp->stats_state != STATS_STATE_DISABLE) {
2294                 bp->stats_state = STATS_STATE_STOP;
2295                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2296         }
2297
2298         /* indicate link down */
2299         bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2300
2301         /* reset BigMac */
2302         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2303                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2304
2305         /* ignore drain flag interrupt */
2306         /* activate nig drain */
2307         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2308
2309         /* update shared memory */
2310         bnx2x_update_mng(bp);
2311
2312         /* indicate link down */
2313         bnx2x_link_report(bp);
2314 }
2315
2316 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2317
2318 /* This function is called upon link interrupt */
2319 static void bnx2x_link_update(struct bnx2x *bp)
2320 {
2321         u32 gp_status;
2322         int port = bp->port;
2323         int i;
2324         int link_10g;
2325
2326         DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x,"
2327            " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2328            " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG),
2329            REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2330            REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2331            REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2332            REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2333            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2334            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2335         );
2336
2337         might_sleep();
2338         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2339         /* avoid fast toggling */
2340         for (i = 0 ; i < 10 ; i++) {
2341                 msleep(10);
2342                 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2343                                   &gp_status);
2344         }
2345
2346         bnx2x_link_settings_status(bp, gp_status);
2347
2348         /* anything 10 and over uses the bmac */
2349         link_10g = ((bp->line_speed >= SPEED_10000) &&
2350                     (bp->line_speed <= SPEED_16000));
2351
2352         bnx2x_link_int_ack(bp, link_10g);
2353
2354         /* link is up only if both local phy and external phy are up */
2355         if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) {
2356                 if (link_10g) {
2357                         bnx2x_bmac_enable(bp, 0);
2358                         bnx2x_leds_set(bp, SPEED_10000);
2359
2360                 } else {
2361                         bnx2x_emac_enable(bp);
2362                         bnx2x_emac_program(bp);
2363
2364                         /* AN complete? */
2365                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2366                                 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2367                                         bnx2x_set_sgmii_tx_driver(bp);
2368                         }
2369                 }
2370                 bnx2x_link_up(bp);
2371
2372         } else { /* link down */
2373                 bnx2x_leds_unset(bp);
2374                 bnx2x_link_down(bp);
2375         }
2376
2377         bnx2x_init_mac_stats(bp);
2378 }
2379
2380 /*
2381  * Init service functions
2382  */
2383
2384 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2385 {
2386         u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2387                                         (bp->phy_addr + bp->ser_lane) : 0;
2388
2389         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2390         bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2391 }
2392
2393 static void bnx2x_set_master_ln(struct bnx2x *bp)
2394 {
2395         u32 new_master_ln;
2396
2397         /* set the master_ln for AN */
2398         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2399         bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2400                           &new_master_ln);
2401         bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2402                            (new_master_ln | bp->ser_lane));
2403 }
2404
2405 static void bnx2x_reset_unicore(struct bnx2x *bp)
2406 {
2407         u32 mii_control;
2408         int i;
2409
2410         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2411         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2412         /* reset the unicore */
2413         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2414                            (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2415
2416         /* wait for the reset to self clear */
2417         for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2418                 udelay(5);
2419
2420                 /* the reset erased the previous bank value */
2421                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2422                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2423                                   &mii_control);
2424
2425                 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2426                         udelay(5);
2427                         return;
2428                 }
2429         }
2430
2431         BNX2X_ERR("BUG! unicore is still in reset!\n");
2432 }
2433
2434 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2435 {
2436         /* Each two bits represents a lane number:
2437            No swap is 0123 => 0x1b no need to enable the swap */
2438
2439         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2440         if (bp->rx_lane_swap != 0x1b) {
2441                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2442                                    (bp->rx_lane_swap |
2443                                     MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2444                                    MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2445         } else {
2446                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2447         }
2448
2449         if (bp->tx_lane_swap != 0x1b) {
2450                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2451                                    (bp->tx_lane_swap |
2452                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2453         } else {
2454                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2455         }
2456 }
2457
2458 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2459 {
2460         u32 control2;
2461
2462         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2463         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2464                           &control2);
2465
2466         if (bp->autoneg & AUTONEG_PARALLEL) {
2467                 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2468         } else {
2469                 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2470         }
2471         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2472                            control2);
2473
2474         if (bp->phy_flags & PHY_XGXS_FLAG) {
2475                 DP(NETIF_MSG_LINK, "XGXS\n");
2476                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2477
2478                 bnx2x_mdio22_write(bp,
2479                                    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2480                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2481
2482                 bnx2x_mdio22_read(bp,
2483                                  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2484                                   &control2);
2485
2486                 if (bp->autoneg & AUTONEG_PARALLEL) {
2487                         control2 |=
2488                     MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2489                 } else {
2490                         control2 &=
2491                    ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2492                 }
2493                 bnx2x_mdio22_write(bp,
2494                                  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2495                                    control2);
2496         }
2497 }
2498
2499 static void bnx2x_set_autoneg(struct bnx2x *bp)
2500 {
2501         u32 reg_val;
2502
2503         /* CL37 Autoneg */
2504         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2505         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2506         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2507             (bp->autoneg & AUTONEG_CL37)) {
2508                 /* CL37 Autoneg Enabled */
2509                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2510         } else {
2511                 /* CL37 Autoneg Disabled */
2512                 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2513                              MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2514         }
2515         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2516
2517         /* Enable/Disable Autodetection */
2518         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2519         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2520         reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2521
2522         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2523             (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2524                 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2525         } else {
2526                 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2527         }
2528         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2529
2530         /* Enable TetonII and BAM autoneg */
2531         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2532         bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2533                           &reg_val);
2534         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2535             (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2536                 /* Enable BAM aneg Mode and TetonII aneg Mode */
2537                 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2538                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2539         } else {
2540                 /* TetonII and BAM Autoneg Disabled */
2541                 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2542                              MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2543         }
2544         bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2545                            reg_val);
2546
2547         /* Enable Clause 73 Aneg */
2548         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2549             (bp->autoneg & AUTONEG_CL73)) {
2550                 /* Enable BAM Station Manager */
2551                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2552                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2553                                    (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2554                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2555                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2556
2557                 /* Merge CL73 and CL37 aneg resolution */
2558                 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2559                                   &reg_val);
2560                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2561                                    (reg_val |
2562                         MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2563
2564                 /* Set the CL73 AN speed */
2565                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2566                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2567                 /* In the SerDes we support only the 1G.
2568                    In the XGXS we support the 10G KX4
2569                    but we currently do not support the KR */
2570                 if (bp->phy_flags & PHY_XGXS_FLAG) {
2571                         DP(NETIF_MSG_LINK, "XGXS\n");
2572                         /* 10G KX4 */
2573                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2574                 } else {
2575                         DP(NETIF_MSG_LINK, "SerDes\n");
2576                         /* 1000M KX */
2577                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2578                 }
2579                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2580
2581                 /* CL73 Autoneg Enabled */
2582                 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2583         } else {
2584                 /* CL73 Autoneg Disabled */
2585                 reg_val = 0;
2586         }
2587         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2588         bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2589 }
2590
2591 /* program SerDes, forced speed */
2592 static void bnx2x_program_serdes(struct bnx2x *bp)
2593 {
2594         u32 reg_val;
2595
2596         /* program duplex, disable autoneg */
2597         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2598         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2599         reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2600                      MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2601         if (bp->req_duplex == DUPLEX_FULL)
2602                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2603         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2604
2605         /* program speed
2606            - needed only if the speed is greater than 1G (2.5G or 10G) */
2607         if (bp->req_line_speed > SPEED_1000) {
2608                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2609                 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2610                 /* clearing the speed value before setting the right speed */
2611                 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2612                 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2613                             MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2614                 if (bp->req_line_speed == SPEED_10000)
2615                         reg_val |=
2616                                 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2617                 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2618         }
2619 }
2620
2621 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2622 {
2623         u32 val = 0;
2624
2625         /* configure the 48 bits for BAM AN */
2626         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2627
2628         /* set extended capabilities */
2629         if (bp->advertising & ADVERTISED_2500baseT_Full)
2630                 val |= MDIO_OVER_1G_UP1_2_5G;
2631         if (bp->advertising & ADVERTISED_10000baseT_Full)
2632                 val |= MDIO_OVER_1G_UP1_10G;
2633         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
2634
2635         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
2636 }
2637
2638 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
2639 {
2640         u32 an_adv;
2641
2642         /* for AN, we are always publishing full duplex */
2643         an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2644
2645         /* set pause */
2646         switch (bp->pause_mode) {
2647         case PAUSE_SYMMETRIC:
2648                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2649                 break;
2650         case PAUSE_ASYMMETRIC:
2651                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2652                 break;
2653         case PAUSE_BOTH:
2654                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2655                 break;
2656         case PAUSE_NONE:
2657                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
2658                 break;
2659         }
2660
2661         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2662         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
2663 }
2664
2665 static void bnx2x_restart_autoneg(struct bnx2x *bp)
2666 {
2667         if (bp->autoneg & AUTONEG_CL73) {
2668                 /* enable and restart clause 73 aneg */
2669                 u32 an_ctrl;
2670
2671                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2672                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2673                                   &an_ctrl);
2674                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2675                                    (an_ctrl |
2676                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2677                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2678
2679         } else {
2680                 /* Enable and restart BAM/CL37 aneg */
2681                 u32 mii_control;
2682
2683                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2684                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2685                                   &mii_control);
2686                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2687                                    (mii_control |
2688                                     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2689                                     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2690         }
2691 }
2692
2693 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
2694 {
2695         u32 control1;
2696
2697         /* in SGMII mode, the unicore is always slave */
2698         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2699         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2700                           &control1);
2701         control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2702         /* set sgmii mode (and not fiber) */
2703         control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2704                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2705                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2706         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2707                            control1);
2708
2709         /* if forced speed */
2710         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2711                 /* set speed, disable autoneg */
2712                 u32 mii_control;
2713
2714                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2715                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2716                                   &mii_control);
2717                 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2718                                MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
2719                                  MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
2720
2721                 switch (bp->req_line_speed) {
2722                 case SPEED_100:
2723                         mii_control |=
2724                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
2725                         break;
2726                 case SPEED_1000:
2727                         mii_control |=
2728                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
2729                         break;
2730                 case SPEED_10:
2731                         /* there is nothing to set for 10M */
2732                         break;
2733                 default:
2734                         /* invalid speed for SGMII */
2735                         DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
2736                            bp->req_line_speed);
2737                         break;
2738                 }
2739
2740                 /* setting the full duplex */
2741                 if (bp->req_duplex == DUPLEX_FULL)
2742                         mii_control |=
2743                                 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2744                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2745                                    mii_control);
2746
2747         } else { /* AN mode */
2748                 /* enable and restart AN */
2749                 bnx2x_restart_autoneg(bp);
2750         }
2751 }
2752
2753 static void bnx2x_link_int_enable(struct bnx2x *bp)
2754 {
2755         int port = bp->port;
2756
2757         /* setting the status to report on link up
2758            for either XGXS or SerDes */
2759         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2760                        (NIG_XGXS0_LINK_STATUS |
2761                         NIG_STATUS_INTERRUPT_XGXS0_LINK10G |
2762                         NIG_SERDES0_LINK_STATUS));
2763
2764         if (bp->phy_flags & PHY_XGXS_FLAG) {
2765                 /* TBD -
2766                  * in force mode (not AN) we can enable just the relevant
2767                  * interrupt
2768                  * Even in AN we might enable only one according to the AN
2769                  * speed mask
2770                  */
2771                 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2772                               (NIG_MASK_XGXS0_LINK_STATUS |
2773                                NIG_MASK_XGXS0_LINK10G));
2774                 DP(NETIF_MSG_LINK, "enable XGXS interrupt\n");
2775
2776         } else { /* SerDes */
2777                 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2778                               NIG_MASK_SERDES0_LINK_STATUS);
2779                 DP(NETIF_MSG_LINK, "enable SerDes interrupt\n");
2780         }
2781 }
2782
2783 static void bnx2x_ext_phy_init(struct bnx2x *bp)
2784 {
2785         int port = bp->port;
2786         u32 ext_phy_type;
2787         u32 ext_phy_addr;
2788         u32 local_phy;
2789
2790         if (bp->phy_flags & PHY_XGXS_FLAG) {
2791                 local_phy = bp->phy_addr;
2792                 ext_phy_addr = ((bp->ext_phy_config &
2793                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2794                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2795
2796                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2797                 switch (ext_phy_type) {
2798                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2799                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
2800                         break;
2801
2802                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2803                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
2804                         bnx2x_bits_en(bp,
2805                                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2806                                       NIG_MASK_MI_INT);
2807                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
2808
2809                         bp->phy_addr = ext_phy_type;
2810                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2811                                             EXT_PHY_OPT_PMD_MISC_CNTL,
2812                                             0x8288);
2813                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2814                                             EXT_PHY_OPT_PHY_IDENTIFIER,
2815                                             0x7fbf);
2816                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2817                                             EXT_PHY_OPT_CMU_PLL_BYPASS,
2818                                             0x0100);
2819                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD,
2820                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
2821                         break;
2822
2823                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2824                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
2825                         bnx2x_bits_en(bp,
2826                                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2827                                       NIG_MASK_MI_INT);
2828                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
2829
2830                         bp->phy_addr = ext_phy_type;
2831                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2832                                             EXT_PHY_OPT_PMD_DIGITAL_CNT,
2833                                             0x400);
2834                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2835                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
2836                         break;
2837
2838                 default:
2839                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2840                            bp->ext_phy_config);
2841                         break;
2842                 }
2843                 bp->phy_addr = local_phy;
2844
2845         } else { /* SerDes */
2846 /*              ext_phy_addr = ((bp->ext_phy_config &
2847                                  PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2848                                 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2849 */
2850                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2851                 switch (ext_phy_type) {
2852                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2853                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
2854                         break;
2855
2856                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2857                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
2858                         bnx2x_bits_en(bp,
2859                                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2860                                       NIG_MASK_MI_INT);
2861                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
2862                         break;
2863
2864                 default:
2865                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2866                            bp->ext_phy_config);
2867                         break;
2868                 }
2869         }
2870 }
2871
2872 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2873 {
2874         u32 ext_phy_type;
2875         u32 ext_phy_addr;
2876         u32 local_phy;
2877
2878         if (bp->phy_flags & PHY_XGXS_FLAG) {
2879                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2880                 switch (ext_phy_type) {
2881                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2882                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
2883                         break;
2884
2885                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2886                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2887                         DP(NETIF_MSG_LINK, "XGXS 8705/6\n");
2888                         local_phy = bp->phy_addr;
2889                         ext_phy_addr = ((bp->ext_phy_config &
2890                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2891                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2892                         bp->phy_addr = (u8)ext_phy_addr;
2893                         bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2894                                            EXT_PHY_OPT_CNTL, 0xa040);
2895                         bp->phy_addr = local_phy;
2896                         break;
2897
2898                 default:
2899                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2900                            bp->ext_phy_config);
2901                         break;
2902                 }
2903
2904         } else { /* SerDes */
2905                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2906                 switch (ext_phy_type) {
2907                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2908                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
2909                         break;
2910
2911                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2912                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
2913                         break;
2914
2915                 default:
2916                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2917                            bp->ext_phy_config);
2918                         break;
2919                 }
2920         }
2921 }
2922
2923 static void bnx2x_link_initialize(struct bnx2x *bp)
2924 {
2925         int port = bp->port;
2926
2927         /* disable attentions */
2928         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2929                        (NIG_MASK_XGXS0_LINK_STATUS |
2930                         NIG_MASK_XGXS0_LINK10G |
2931                         NIG_MASK_SERDES0_LINK_STATUS |
2932                         NIG_MASK_MI_INT));
2933
2934         bnx2x_ext_phy_reset(bp);
2935
2936         bnx2x_set_aer_mmd(bp);
2937
2938         if (bp->phy_flags & PHY_XGXS_FLAG)
2939                 bnx2x_set_master_ln(bp);
2940
2941         /* reset the SerDes and wait for reset bit return low */
2942         bnx2x_reset_unicore(bp);
2943
2944         bnx2x_set_aer_mmd(bp);
2945
2946         /* setting the masterLn_def again after the reset */
2947         if (bp->phy_flags & PHY_XGXS_FLAG) {
2948                 bnx2x_set_master_ln(bp);
2949                 bnx2x_set_swap_lanes(bp);
2950         }
2951
2952         /* Set Parallel Detect */
2953         if (bp->req_autoneg & AUTONEG_SPEED)
2954                 bnx2x_set_parallel_detection(bp);
2955
2956         if (bp->phy_flags & PHY_XGXS_FLAG) {
2957                 if (bp->req_line_speed &&
2958                     bp->req_line_speed < SPEED_1000) {
2959                         bp->phy_flags |= PHY_SGMII_FLAG;
2960                 } else {
2961                         bp->phy_flags &= ~PHY_SGMII_FLAG;
2962                 }
2963         }
2964
2965         if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
2966                 u16 bank, rx_eq;
2967
2968                 rx_eq = ((bp->serdes_config &
2969                           PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2970                          PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2971
2972                 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
2973                 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2974                             bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
2975                         MDIO_SET_REG_BANK(bp, bank);
2976                         bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
2977                                            ((rx_eq &
2978                                 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2979                                 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2980                 }
2981
2982                 /* forced speed requested? */
2983                 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2984                         DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2985
2986                         /* disable autoneg */
2987                         bnx2x_set_autoneg(bp);
2988
2989                         /* program speed and duplex */
2990                         bnx2x_program_serdes(bp);
2991
2992                 } else { /* AN_mode */
2993                         DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2994
2995                         /* AN enabled */
2996                         bnx2x_set_brcm_cl37_advertisment(bp);
2997
2998                         /* program duplex & pause advertisement (for aneg) */
2999                         bnx2x_set_ieee_aneg_advertisment(bp);
3000
3001                         /* enable autoneg */
3002                         bnx2x_set_autoneg(bp);
3003
3004                         /* enable and restart AN */
3005                         bnx2x_restart_autoneg(bp);
3006                 }
3007
3008         } else { /* SGMII mode */
3009                 DP(NETIF_MSG_LINK, "SGMII\n");
3010
3011                 bnx2x_initialize_sgmii_process(bp);
3012         }
3013
3014         /* enable the interrupt */
3015         bnx2x_link_int_enable(bp);
3016
3017         /* init ext phy and enable link state int */
3018         bnx2x_ext_phy_init(bp);
3019 }
3020
3021 static void bnx2x_phy_deassert(struct bnx2x *bp)
3022 {
3023         int port = bp->port;
3024         u32 val;
3025
3026         if (bp->phy_flags & PHY_XGXS_FLAG) {
3027                 DP(NETIF_MSG_LINK, "XGXS\n");
3028                 val = XGXS_RESET_BITS;
3029
3030         } else { /* SerDes */
3031                 DP(NETIF_MSG_LINK, "SerDes\n");
3032                 val = SERDES_RESET_BITS;
3033         }
3034
3035         val = val << (port*16);
3036
3037         /* reset and unreset the SerDes/XGXS */
3038         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3039         msleep(5);
3040         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3041 }
3042
3043 static int bnx2x_phy_init(struct bnx2x *bp)
3044 {
3045         DP(NETIF_MSG_LINK, "started\n");
3046         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3047                 bp->phy_flags |= PHY_EMAC_FLAG;
3048                 bp->link_up = 1;
3049                 bp->line_speed = SPEED_10000;
3050                 bp->duplex = DUPLEX_FULL;
3051                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3052                 bnx2x_emac_enable(bp);
3053                 bnx2x_link_report(bp);
3054                 return 0;
3055
3056         } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3057                 bp->phy_flags |= PHY_BMAC_FLAG;
3058                 bp->link_up = 1;
3059                 bp->line_speed = SPEED_10000;
3060                 bp->duplex = DUPLEX_FULL;
3061                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3062                 bnx2x_bmac_enable(bp, 0);
3063                 bnx2x_link_report(bp);
3064                 return 0;
3065
3066         } else {
3067                 bnx2x_phy_deassert(bp);
3068                 bnx2x_link_initialize(bp);
3069         }
3070
3071         return 0;
3072 }
3073
3074 static void bnx2x_link_reset(struct bnx2x *bp)
3075 {
3076         int port = bp->port;
3077
3078         /* disable attentions */
3079         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3080                        (NIG_MASK_XGXS0_LINK_STATUS |
3081                         NIG_MASK_XGXS0_LINK10G |
3082                         NIG_MASK_SERDES0_LINK_STATUS |
3083                         NIG_MASK_MI_INT));
3084
3085         bnx2x_ext_phy_reset(bp);
3086
3087         /* reset the SerDes/XGXS */
3088         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3089                (0x1ff << (port*16)));
3090
3091         /* reset EMAC / BMAC and disable NIG interfaces */
3092         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3093         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3094
3095         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3096         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3097         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3098
3099         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3100 }
3101
3102 #ifdef BNX2X_XGXS_LB
3103 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3104 {
3105         int port = bp->port;
3106
3107         if (is_10g) {
3108                 u32 md_devad;
3109
3110                 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3111
3112                 /* change the uni_phy_addr in the nig */
3113                 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3114                        &md_devad);
3115                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3116
3117                 /* change the aer mmd */
3118                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3119                 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3120
3121                 /* config combo IEEE0 control reg for loopback */
3122                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3123                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3124                                    0x6041);
3125
3126                 /* set aer mmd back */
3127                 bnx2x_set_aer_mmd(bp);
3128
3129                 /* and md_devad */
3130                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3131
3132         } else {
3133                 u32 mii_control;
3134
3135                 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3136
3137                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3138                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3139                                   &mii_control);
3140                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3141                                    (mii_control |
3142                                     MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3143         }
3144 }
3145 #endif
3146
3147 /* end of PHY/MAC */
3148
3149 /* slow path */
3150
3151 /*
3152  * General service functions
3153  */
3154
3155 /* the slow path queue is odd since completions arrive on the fastpath ring */
3156 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3157                          u32 data_hi, u32 data_lo, int common)
3158 {
3159         int port = bp->port;
3160
3161         DP(NETIF_MSG_TIMER,
3162            "spe (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
3163            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3164            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3165            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3166
3167 #ifdef BNX2X_STOP_ON_ERROR
3168         if (unlikely(bp->panic))
3169                 return -EIO;
3170 #endif
3171
3172         spin_lock(&bp->spq_lock);
3173
3174         if (!bp->spq_left) {
3175                 BNX2X_ERR("BUG! SPQ ring full!\n");
3176                 spin_unlock(&bp->spq_lock);
3177                 bnx2x_panic();
3178                 return -EBUSY;
3179         }
3180         /* CID needs port number to be encoded int it */
3181         bp->spq_prod_bd->hdr.conn_and_cmd_data =
3182                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3183                                      HW_CID(bp, cid)));
3184         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3185         if (common)
3186                 bp->spq_prod_bd->hdr.type |=
3187                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3188
3189         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3190         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3191
3192         bp->spq_left--;
3193
3194         if (bp->spq_prod_bd == bp->spq_last_bd) {
3195                 bp->spq_prod_bd = bp->spq;
3196                 bp->spq_prod_idx = 0;
3197                 DP(NETIF_MSG_TIMER, "end of spq\n");
3198
3199         } else {
3200                 bp->spq_prod_bd++;
3201                 bp->spq_prod_idx++;
3202         }
3203
3204         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
3205                bp->spq_prod_idx);
3206
3207         spin_unlock(&bp->spq_lock);
3208         return 0;
3209 }
3210
3211 /* acquire split MCP access lock register */
3212 static int bnx2x_lock_alr(struct bnx2x *bp)
3213 {
3214         int rc = 0;
3215         u32 i, j, val;
3216
3217         might_sleep();
3218         i = 100;
3219         for (j = 0; j < i*10; j++) {
3220                 val = (1UL << 31);
3221                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3222                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3223                 if (val & (1L << 31))
3224                         break;
3225
3226                 msleep(5);
3227         }
3228
3229         if (!(val & (1L << 31))) {
3230                 BNX2X_ERR("Cannot acquire nvram interface\n");
3231
3232                 rc = -EBUSY;
3233         }
3234
3235         return rc;
3236 }
3237
3238 /* Release split MCP access lock register */
3239 static void bnx2x_unlock_alr(struct bnx2x *bp)
3240 {
3241         u32 val = 0;
3242
3243         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3244 }
3245
3246 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3247 {
3248         struct host_def_status_block *def_sb = bp->def_status_blk;
3249         u16 rc = 0;
3250
3251         barrier(); /* status block is written to by the chip */
3252
3253         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3254                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3255                 rc |= 1;
3256         }
3257         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
3258                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
3259                 rc |= 2;
3260         }
3261         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
3262                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
3263                 rc |= 4;
3264         }
3265         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
3266                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
3267                 rc |= 8;
3268         }
3269         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
3270                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
3271                 rc |= 16;
3272         }
3273         return rc;
3274 }
3275
3276 /*
3277  * slow path service functions
3278  */
3279
3280 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3281 {
3282         int port = bp->port;
3283         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
3284         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3285                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3286         u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3287                                    NIG_REG_MASK_INTERRUPT_PORT0;
3288
3289         if (~bp->aeu_mask & (asserted & 0xff))
3290                 BNX2X_ERR("IGU ERROR\n");
3291         if (bp->attn_state & asserted)
3292                 BNX2X_ERR("IGU ERROR\n");
3293
3294         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
3295            bp->aeu_mask, asserted);
3296         bp->aeu_mask &= ~(asserted & 0xff);
3297         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
3298
3299         REG_WR(bp, aeu_addr, bp->aeu_mask);
3300
3301         bp->attn_state |= asserted;
3302
3303         if (asserted & ATTN_HARD_WIRED_MASK) {
3304                 if (asserted & ATTN_NIG_FOR_FUNC) {
3305                         u32 nig_status_port;
3306                         u32 nig_int_addr = port ?
3307                                         NIG_REG_STATUS_INTERRUPT_PORT1 :
3308                                         NIG_REG_STATUS_INTERRUPT_PORT0;
3309
3310                         bp->nig_mask = REG_RD(bp, nig_mask_addr);
3311                         REG_WR(bp, nig_mask_addr, 0);
3312
3313                         nig_status_port = REG_RD(bp, nig_int_addr);
3314                         bnx2x_link_update(bp);
3315
3316                         /* handle unicore attn? */
3317                 }
3318                 if (asserted & ATTN_SW_TIMER_4_FUNC)
3319                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3320
3321                 if (asserted & GPIO_2_FUNC)
3322                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3323
3324                 if (asserted & GPIO_3_FUNC)
3325                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3326
3327                 if (asserted & GPIO_4_FUNC)
3328                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3329
3330                 if (port == 0) {
3331                         if (asserted & ATTN_GENERAL_ATTN_1) {
3332                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3333                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3334                         }
3335                         if (asserted & ATTN_GENERAL_ATTN_2) {
3336                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3337                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3338                         }
3339                         if (asserted & ATTN_GENERAL_ATTN_3) {
3340                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3341                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3342                         }
3343                 } else {
3344                         if (asserted & ATTN_GENERAL_ATTN_4) {
3345                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3346                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3347                         }
3348                         if (asserted & ATTN_GENERAL_ATTN_5) {
3349                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3350                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3351                         }
3352                         if (asserted & ATTN_GENERAL_ATTN_6) {
3353                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3354                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3355                         }
3356                 }
3357
3358         } /* if hardwired */
3359
3360         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
3361            asserted, BAR_IGU_INTMEM + igu_addr);
3362         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
3363
3364         /* now set back the mask */
3365         if (asserted & ATTN_NIG_FOR_FUNC)
3366                 REG_WR(bp, nig_mask_addr, bp->nig_mask);
3367 }
3368
3369 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3370 {
3371         int port = bp->port;
3372         int index;
3373         struct attn_route attn;
3374         struct attn_route group_mask;
3375         u32 reg_addr;
3376         u32 val;
3377
3378         /* need to take HW lock because MCP or other port might also
3379            try to handle this event */
3380         bnx2x_lock_alr(bp);
3381
3382         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3383         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3384         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3385         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3386         DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
3387
3388         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3389                 if (deasserted & (1 << index)) {
3390                         group_mask = bp->attn_group[index];
3391
3392                         DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
3393                            (unsigned long long)group_mask.sig[0]);
3394
3395                         if (attn.sig[3] & group_mask.sig[3] &
3396                             EVEREST_GEN_ATTN_IN_USE_MASK) {
3397
3398                                 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
3399
3400                                         BNX2X_ERR("MC assert!\n");
3401                                         bnx2x_panic();
3402
3403                                 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
3404
3405                                         BNX2X_ERR("MCP assert!\n");
3406                                         REG_WR(bp,
3407                                              MISC_REG_AEU_GENERAL_ATTN_11, 0);
3408                                         bnx2x_mc_assert(bp);
3409
3410                                 } else {
3411                                         BNX2X_ERR("UNKOWEN HW ASSERT!\n");
3412                                 }
3413                         }
3414
3415                         if (attn.sig[1] & group_mask.sig[1] &
3416                             BNX2X_DOORQ_ASSERT) {
3417
3418                                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3419                                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3420                                 /* DORQ discard attention */
3421                                 if (val & 0x2)
3422                                         BNX2X_ERR("FATAL error from DORQ\n");
3423                         }
3424
3425                         if (attn.sig[2] & group_mask.sig[2] &
3426                             AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3427
3428                                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3429                                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3430                                 /* CFC error attention */
3431                                 if (val & 0x2)
3432                                         BNX2X_ERR("FATAL error from CFC\n");
3433                         }
3434
3435                         if (attn.sig[2] & group_mask.sig[2] &
3436                             AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3437
3438                                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3439                                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3440                                 /* RQ_USDMDP_FIFO_OVERFLOW */
3441                                 if (val & 0x18000)
3442                                         BNX2X_ERR("FATAL error from PXP\n");
3443                         }
3444
3445                         if (attn.sig[3] & group_mask.sig[3] &
3446                             EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3447
3448                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
3449                                        0x7ff);
3450                                 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
3451                                    attn.sig[3]);
3452                         }
3453
3454                         if ((attn.sig[0] & group_mask.sig[0] &
3455                                                 HW_INTERRUT_ASSERT_SET_0) ||
3456                             (attn.sig[1] & group_mask.sig[1] &
3457                                                 HW_INTERRUT_ASSERT_SET_1) ||
3458                             (attn.sig[2] & group_mask.sig[2] &
3459                                                 HW_INTERRUT_ASSERT_SET_2))
3460                                 BNX2X_ERR("FATAL HW block attention\n");
3461
3462                         if ((attn.sig[0] & group_mask.sig[0] &
3463                                                 HW_PRTY_ASSERT_SET_0) ||
3464                             (attn.sig[1] & group_mask.sig[1] &
3465                                                 HW_PRTY_ASSERT_SET_1) ||
3466                             (attn.sig[2] & group_mask.sig[2] &
3467                                                 HW_PRTY_ASSERT_SET_2))
3468                                 BNX2X_ERR("FATAL HW block parity attention\n");
3469                 }
3470         }
3471
3472         bnx2x_unlock_alr(bp);
3473
3474         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
3475
3476         val = ~deasserted;
3477 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
3478            val, BAR_IGU_INTMEM + reg_addr); */
3479         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
3480
3481         if (bp->aeu_mask & (deasserted & 0xff))
3482                 BNX2X_ERR("IGU BUG\n");
3483         if (~bp->attn_state & deasserted)
3484                 BNX2X_ERR("IGU BUG\n");
3485
3486         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3487                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3488
3489         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
3490         bp->aeu_mask |= (deasserted & 0xff);
3491
3492         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
3493         REG_WR(bp, reg_addr, bp->aeu_mask);
3494
3495         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3496         bp->attn_state &= ~deasserted;
3497         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3498 }
3499
3500 static void bnx2x_attn_int(struct bnx2x *bp)
3501 {
3502         /* read local copy of bits */
3503         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
3504         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
3505         u32 attn_state = bp->attn_state;
3506
3507         /* look for changed bits */
3508         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3509         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3510
3511         DP(NETIF_MSG_HW,
3512            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3513            attn_bits, attn_ack, asserted, deasserted);
3514
3515         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3516                 BNX2X_ERR("bad attention state\n");
3517
3518         /* handle bits that were raised */
3519         if (asserted)
3520                 bnx2x_attn_int_asserted(bp, asserted);
3521
3522         if (deasserted)
3523                 bnx2x_attn_int_deasserted(bp, deasserted);
3524 }
3525
3526 static void bnx2x_sp_task(struct work_struct *work)
3527 {
3528         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
3529         u16 status;
3530
3531         /* Return here if interrupt is disabled */
3532         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3533                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3534                 return;
3535         }
3536
3537         status = bnx2x_update_dsb_idx(bp);
3538         if (status == 0)
3539                 BNX2X_ERR("spurious slowpath interrupt!\n");
3540
3541         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3542
3543         if (status & 0x1) {
3544                 /* HW attentions */
3545                 bnx2x_attn_int(bp);
3546         }
3547
3548         /* CStorm events: query_stats, cfc delete ramrods */
3549         if (status & 0x2)
3550                 bp->stat_pending = 0;
3551
3552         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
3553                      IGU_INT_NOP, 1);
3554         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3555                      IGU_INT_NOP, 1);
3556         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3557                      IGU_INT_NOP, 1);
3558         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3559                      IGU_INT_NOP, 1);
3560         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3561                      IGU_INT_ENABLE, 1);
3562 }
3563
3564 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3565 {
3566         struct net_device *dev = dev_instance;
3567         struct bnx2x *bp = netdev_priv(dev);
3568
3569         /* Return here if interrupt is disabled */
3570         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3571                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3572                 return IRQ_HANDLED;
3573         }
3574
3575         bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
3576
3577 #ifdef BNX2X_STOP_ON_ERROR
3578         if (unlikely(bp->panic))
3579                 return IRQ_HANDLED;
3580 #endif
3581
3582         schedule_work(&bp->sp_task);
3583
3584         return IRQ_HANDLED;
3585 }
3586
3587 /* end of slow path */
3588
3589 /* Statistics */
3590
3591 /****************************************************************************
3592 * Macros
3593 ****************************************************************************/
3594
3595 #define UPDATE_STAT(s, t) \
3596         do { \
3597                 estats->t += new->s - old->s; \
3598                 old->s = new->s; \
3599         } while (0)
3600
3601 /* sum[hi:lo] += add[hi:lo] */
3602 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3603         do { \
3604                 s_lo += a_lo; \
3605                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
3606         } while (0)
3607
3608 /* difference = minuend - subtrahend */
3609 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3610         do { \
3611                 if (m_lo < s_lo) {      /* underflow */ \
3612                         d_hi = m_hi - s_hi; \
3613                         if (d_hi > 0) { /* we can 'loan' 1 */ \
3614                                 d_hi--; \
3615                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3616                         } else {        /* m_hi <= s_hi */ \
3617                                 d_hi = 0; \
3618                                 d_lo = 0; \
3619                         } \
3620                 } else {                /* m_lo >= s_lo */ \
3621                         if (m_hi < s_hi) { \
3622                             d_hi = 0; \
3623                             d_lo = 0; \
3624                         } else {        /* m_hi >= s_hi */ \
3625                             d_hi = m_hi - s_hi; \
3626                             d_lo = m_lo - s_lo; \
3627                         } \
3628                 } \
3629         } while (0)
3630
3631 /* minuend -= subtrahend */
3632 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3633         do { \
3634                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3635         } while (0)
3636
3637 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
3638         do { \
3639                 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
3640                         diff.lo, new->s_lo, old->s_lo); \
3641                 old->s_hi = new->s_hi; \
3642                 old->s_lo = new->s_lo; \
3643                 ADD_64(estats->t_hi, diff.hi, \
3644                        estats->t_lo, diff.lo); \
3645         } while (0)
3646
3647 /* sum[hi:lo] += add */
3648 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3649         do { \
3650                 s_lo += a; \
3651                 s_hi += (s_lo < a) ? 1 : 0; \
3652         } while (0)
3653
3654 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
3655         do { \
3656                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
3657         } while (0)
3658
3659 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
3660         do { \
3661                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3662                 old_tclient->s = le32_to_cpu(tclient->s); \
3663                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
3664         } while (0)
3665
3666 /*
3667  * General service functions
3668  */
3669
3670 static inline long bnx2x_hilo(u32 *hiref)
3671 {
3672         u32 lo = *(hiref + 1);
3673 #if (BITS_PER_LONG == 64)
3674         u32 hi = *hiref;
3675
3676         return HILO_U64(hi, lo);
3677 #else
3678         return lo;
3679 #endif
3680 }
3681
3682 /*
3683  * Init service functions
3684  */
3685
3686 static void bnx2x_init_mac_stats(struct bnx2x *bp)
3687 {
3688         struct dmae_command *dmae;
3689         int port = bp->port;
3690         int loader_idx = port * 8;
3691         u32 opcode;
3692         u32 mac_addr;
3693
3694         bp->executer_idx = 0;
3695         if (bp->fw_mb) {
3696                 /* MCP */
3697                 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3698                           DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3699 #ifdef __BIG_ENDIAN
3700                           DMAE_CMD_ENDIANITY_B_DW_SWAP |
3701 #else
3702                           DMAE_CMD_ENDIANITY_DW_SWAP |
3703 #endif
3704                           (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3705
3706                 if (bp->link_up)
3707                         opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
3708
3709                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710                 dmae->opcode = opcode;
3711                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
3712                                            sizeof(u32));
3713                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
3714                                            sizeof(u32));
3715                 dmae->dst_addr_lo = bp->fw_mb >> 2;
3716                 dmae->dst_addr_hi = 0;
3717                 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
3718                              sizeof(u32)) >> 2;
3719                 if (bp->link_up) {
3720                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721                         dmae->comp_addr_hi = 0;
3722                         dmae->comp_val = 1;
3723                 } else {
3724                         dmae->comp_addr_lo = 0;
3725                         dmae->comp_addr_hi = 0;
3726                         dmae->comp_val = 0;
3727                 }
3728         }
3729
3730         if (!bp->link_up) {
3731                 /* no need to collect statistics in link down */
3732                 return;
3733         }
3734
3735         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3736                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3737                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3738 #ifdef __BIG_ENDIAN
3739                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3740 #else
3741                   DMAE_CMD_ENDIANITY_DW_SWAP |
3742 #endif
3743                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3744
3745         if (bp->phy_flags & PHY_BMAC_FLAG) {
3746
3747                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3748                                    NIG_REG_INGRESS_BMAC0_MEM);
3749
3750                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3751                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3752                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753                 dmae->opcode = opcode;
3754                 dmae->src_addr_lo = (mac_addr +
3755                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3756                 dmae->src_addr_hi = 0;
3757                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3758                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3759                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3760                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3761                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3762                 dmae->comp_addr_hi = 0;
3763                 dmae->comp_val = 1;
3764
3765                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3766                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3767                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3768                 dmae->opcode = opcode;
3769                 dmae->src_addr_lo = (mac_addr +
3770                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3771                 dmae->src_addr_hi = 0;
3772                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3773                                         offsetof(struct bmac_stats, rx_gr64));
3774                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3775                                         offsetof(struct bmac_stats, rx_gr64));
3776                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3777                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3778                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3779                 dmae->comp_addr_hi = 0;
3780                 dmae->comp_val = 1;
3781
3782         } else if (bp->phy_flags & PHY_EMAC_FLAG) {
3783
3784                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3785
3786                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3787                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3788                 dmae->opcode = opcode;
3789                 dmae->src_addr_lo = (mac_addr +
3790                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3791                 dmae->src_addr_hi = 0;
3792                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3793                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3794                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3795                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3796                 dmae->comp_addr_hi = 0;
3797                 dmae->comp_val = 1;
3798
3799                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3800                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3801                 dmae->opcode = opcode;
3802                 dmae->src_addr_lo = (mac_addr +
3803                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3804                 dmae->src_addr_hi = 0;
3805                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3806                                            offsetof(struct emac_stats,
3807                                                     rx_falsecarriererrors));
3808                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3809                                            offsetof(struct emac_stats,
3810                                                     rx_falsecarriererrors));
3811                 dmae->len = 1;
3812                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3813                 dmae->comp_addr_hi = 0;
3814                 dmae->comp_val = 1;
3815
3816                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3817                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3818                 dmae->opcode = opcode;
3819                 dmae->src_addr_lo = (mac_addr +
3820                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3821                 dmae->src_addr_hi = 0;
3822                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3823                                            offsetof(struct emac_stats,
3824                                                     tx_ifhcoutoctets));
3825                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3826                                            offsetof(struct emac_stats,
3827                                                     tx_ifhcoutoctets));
3828                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3829                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3830                 dmae->comp_addr_hi = 0;
3831                 dmae->comp_val = 1;
3832         }
3833
3834         /* NIG */
3835         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3836         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3837                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3838                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3839 #ifdef __BIG_ENDIAN
3840                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3841 #else
3842                         DMAE_CMD_ENDIANITY_DW_SWAP |
3843 #endif
3844                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3845         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3846                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3847         dmae->src_addr_hi = 0;
3848         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
3849         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
3850         dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
3851         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
3852                                     offsetof(struct nig_stats, done));
3853         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
3854                                     offsetof(struct nig_stats, done));
3855         dmae->comp_val = 0xffffffff;
3856 }
3857
3858 static void bnx2x_init_stats(struct bnx2x *bp)
3859 {
3860         int port = bp->port;
3861
3862         bp->stats_state = STATS_STATE_DISABLE;
3863         bp->executer_idx = 0;
3864
3865         bp->old_brb_discard = REG_RD(bp,
3866                                      NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3867
3868         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
3869         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3870         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3871
3872         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
3873         REG_WR(bp, BAR_XSTRORM_INTMEM +
3874                XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3875
3876         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
3877         REG_WR(bp, BAR_TSTRORM_INTMEM +
3878                TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3879
3880         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
3881         REG_WR(bp, BAR_CSTRORM_INTMEM +
3882                CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3883
3884         REG_WR(bp, BAR_XSTRORM_INTMEM +
3885                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3886                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3887         REG_WR(bp, BAR_XSTRORM_INTMEM +
3888                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3889                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3890
3891         REG_WR(bp, BAR_TSTRORM_INTMEM +
3892                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3893                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3894         REG_WR(bp, BAR_TSTRORM_INTMEM +
3895                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3896                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3897 }
3898
3899 static void bnx2x_stop_stats(struct bnx2x *bp)
3900 {
3901         might_sleep();
3902         if (bp->stats_state != STATS_STATE_DISABLE) {
3903                 int timeout = 10;
3904
3905                 bp->stats_state = STATS_STATE_STOP;
3906                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
3907
3908                 while (bp->stats_state != STATS_STATE_DISABLE) {
3909                         if (!timeout) {
3910                                 BNX2X_ERR("timeout waiting for stats stop\n");
3911                                 break;
3912                         }
3913                         timeout--;
3914                         msleep(100);
3915                 }
3916         }
3917         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
3918 }
3919
3920 /*
3921  * Statistics service functions
3922  */
3923
3924 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
3925 {
3926         struct regp diff;
3927         struct regp sum;
3928         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
3929         struct bmac_stats *old = &bp->old_bmac;
3930         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3931
3932         sum.hi = 0;
3933         sum.lo = 0;
3934
3935         UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
3936                       tx_gtbyt.lo, total_bytes_transmitted_lo);
3937
3938         UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
3939                       tx_gtmca.lo, total_multicast_packets_transmitted_lo);
3940         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3941
3942         UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
3943                       tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
3944         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3945
3946         UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
3947                       tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
3948         SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
3949                estats->total_unicast_packets_transmitted_lo, sum.lo);
3950
3951         UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
3952         UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
3953         UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
3954         UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
3955         UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
3956         UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
3957         UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
3958         UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
3959         UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
3960         UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
3961         UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
3962
3963         UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
3964         UPDATE_STAT(rx_grund.lo, runt_packets_received);
3965         UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
3966         UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
3967         UPDATE_STAT(rx_grxcf.lo, control_frames_received);
3968         /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
3969         UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
3970         UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
3971
3972         UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
3973                       rx_grerb.lo, stat_IfHCInBadOctets_lo);
3974         UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
3975                       tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
3976         UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
3977         /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
3978         estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
3979 }
3980
3981 static void bnx2x_update_emac_stats(struct bnx2x *bp)
3982 {
3983         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
3984         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3985
3986         UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
3987                                              total_bytes_transmitted_lo);
3988         UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
3989                                         total_unicast_packets_transmitted_hi,
3990                                         total_unicast_packets_transmitted_lo);
3991         UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
3992                                       total_multicast_packets_transmitted_hi,
3993                                       total_multicast_packets_transmitted_lo);
3994         UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
3995                                       total_broadcast_packets_transmitted_hi,
3996                                       total_broadcast_packets_transmitted_lo);
3997
3998         estats->pause_xon_frames_transmitted += new->tx_outxonsent;
3999         estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4000         estats->single_collision_transmit_frames +=
4001                                 new->tx_dot3statssinglecollisionframes;
4002         estats->multiple_collision_transmit_frames +=
4003                                 new->tx_dot3statsmultiplecollisionframes;
4004         estats->late_collision_frames += new->tx_dot3statslatecollisions;
4005         estats->excessive_collision_frames +=
4006                                 new->tx_dot3statsexcessivecollisions;
4007         estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4008         estats->frames_transmitted_65_127_bytes +=
4009                                 new->tx_etherstatspkts65octetsto127octets;
4010         estats->frames_transmitted_128_255_bytes +=
4011                                 new->tx_etherstatspkts128octetsto255octets;
4012         estats->frames_transmitted_256_511_bytes +=
4013                                 new->tx_etherstatspkts256octetsto511octets;
4014         estats->frames_transmitted_512_1023_bytes +=
4015                                 new->tx_etherstatspkts512octetsto1023octets;
4016         estats->frames_transmitted_1024_1522_bytes +=
4017                                 new->tx_etherstatspkts1024octetsto1522octet;
4018         estats->frames_transmitted_1523_9022_bytes +=
4019                                 new->tx_etherstatspktsover1522octets;
4020
4021         estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4022         estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4023         estats->false_carrier_detections += new->rx_falsecarriererrors;
4024         estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4025         estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4026         estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4027         estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4028         estats->control_frames_received += new->rx_maccontrolframesreceived;
4029         estats->error_runt_packets_received += new->rx_etherstatsfragments;
4030         estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4031
4032         UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4033                                                stat_IfHCInBadOctets_lo);
4034         UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4035                                                 stat_IfHCOutBadOctets_lo);
4036         estats->stat_Dot3statsInternalMacTransmitErrors +=
4037                                 new->tx_dot3statsinternalmactransmiterrors;
4038         estats->stat_Dot3StatsCarrierSenseErrors +=
4039                                 new->rx_dot3statscarriersenseerrors;
4040         estats->stat_Dot3StatsDeferredTransmissions +=
4041                                 new->tx_dot3statsdeferredtransmissions;
4042         estats->stat_FlowControlDone += new->tx_flowcontroldone;
4043         estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4044 }
4045
4046 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4047 {
4048         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4049         struct tstorm_common_stats *tstats = &stats->tstorm_common;
4050         struct tstorm_per_client_stats *tclient =
4051                                                 &tstats->client_statistics[0];
4052         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4053         struct xstorm_common_stats *xstats = &stats->xstorm_common;
4054         struct nig_stats *nstats = bnx2x_sp(bp, nig);
4055         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4056         u32 diff;
4057
4058         /* are DMAE stats valid? */
4059         if (nstats->done != 0xffffffff) {
4060                 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4061                 return -1;
4062         }
4063
4064         /* are storm stats valid? */
4065         if (tstats->done.hi != 0xffffffff) {
4066                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4067                 return -2;
4068         }
4069         if (xstats->done.hi != 0xffffffff) {
4070                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4071                 return -3;
4072         }
4073
4074         estats->total_bytes_received_hi =
4075         estats->valid_bytes_received_hi =
4076                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
4077         estats->total_bytes_received_lo =
4078         estats->valid_bytes_received_lo =
4079                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
4080         ADD_64(estats->total_bytes_received_hi,
4081                le32_to_cpu(tclient->rcv_error_bytes.hi),
4082                estats->total_bytes_received_lo,
4083                le32_to_cpu(tclient->rcv_error_bytes.lo));
4084
4085         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4086                                         total_unicast_packets_received_hi,
4087                                         total_unicast_packets_received_lo);
4088         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4089                                         total_multicast_packets_received_hi,
4090                                         total_multicast_packets_received_lo);
4091         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4092                                         total_broadcast_packets_received_hi,
4093                                         total_broadcast_packets_received_lo);
4094
4095         estats->frames_received_64_bytes = MAC_STX_NA;
4096         estats->frames_received_65_127_bytes = MAC_STX_NA;
4097         estats->frames_received_128_255_bytes = MAC_STX_NA;
4098         estats->frames_received_256_511_bytes = MAC_STX_NA;
4099         estats->frames_received_512_1023_bytes = MAC_STX_NA;
4100         estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4101         estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4102
4103         estats->x_total_sent_bytes_hi =
4104                                 le32_to_cpu(xstats->total_sent_bytes.hi);
4105         estats->x_total_sent_bytes_lo =
4106                                 le32_to_cpu(xstats->total_sent_bytes.lo);
4107         estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4108
4109         estats->t_rcv_unicast_bytes_hi =
4110                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4111         estats->t_rcv_unicast_bytes_lo =
4112                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4113         estats->t_rcv_broadcast_bytes_hi =
4114                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4115         estats->t_rcv_broadcast_bytes_lo =
4116                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4117         estats->t_rcv_multicast_bytes_hi =
4118                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4119         estats->t_rcv_multicast_bytes_lo =
4120                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4121         estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4122
4123         estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4124         estats->packets_too_big_discard =
4125                                 le32_to_cpu(tclient->packets_too_big_discard);
4126         estats->jabber_packets_received = estats->packets_too_big_discard +
4127                                           estats->stat_Dot3statsFramesTooLong;
4128         estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4129         estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4130         estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4131         estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4132         estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4133         estats->brb_truncate_discard =
4134                                 le32_to_cpu(tstats->brb_truncate_discard);
4135
4136         estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4137         bp->old_brb_discard = nstats->brb_discard;
4138
4139         estats->brb_packet = nstats->brb_packet;
4140         estats->brb_truncate = nstats->brb_truncate;
4141         estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4142         estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4143         estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4144         estats->mng_discard = nstats->mng_discard;
4145         estats->mng_octet_inp = nstats->mng_octet_inp;
4146         estats->mng_octet_out = nstats->mng_octet_out;
4147         estats->mng_packet_inp = nstats->mng_packet_inp;
4148         estats->mng_packet_out = nstats->mng_packet_out;
4149         estats->pbf_octets = nstats->pbf_octets;
4150         estats->pbf_packet = nstats->pbf_packet;
4151         estats->safc_inp = nstats->safc_inp;
4152
4153         xstats->done.hi = 0;
4154         tstats->done.hi = 0;
4155         nstats->done = 0;
4156
4157         return 0;
4158 }
4159
4160 static void bnx2x_update_net_stats(struct bnx2x *bp)
4161 {
4162         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4163         struct net_device_stats *nstats = &bp->dev->stats;
4164
4165         nstats->rx_packets =
4166                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4167                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4168                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4169
4170         nstats->tx_packets =
4171                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4172                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4173                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4174
4175         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4176
4177         nstats->tx_bytes =
4178                 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4179
4180         nstats->rx_dropped = estats->checksum_discard +
4181                                    estats->mac_discard;
4182         nstats->tx_dropped = 0;
4183
4184         nstats->multicast =
4185                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4186
4187         nstats->collisions =
4188                 estats->single_collision_transmit_frames +
4189                 estats->multiple_collision_transmit_frames +
4190                 estats->late_collision_frames +
4191                 estats->excessive_collision_frames;
4192
4193         nstats->rx_length_errors = estats->runt_packets_received +
4194                                    estats->jabber_packets_received;
4195         nstats->rx_over_errors = estats->no_buff_discard;
4196         nstats->rx_crc_errors = estats->crc_receive_errors;
4197         nstats->rx_frame_errors = estats->alignment_errors;
4198         nstats->rx_fifo_errors = estats->brb_discard +
4199                                        estats->brb_truncate_discard;
4200         nstats->rx_missed_errors = estats->xxoverflow_discard;
4201
4202         nstats->rx_errors = nstats->rx_length_errors +
4203                             nstats->rx_over_errors +
4204                             nstats->rx_crc_errors +
4205                             nstats->rx_frame_errors +
4206                             nstats->rx_fifo_errors;
4207
4208         nstats->tx_aborted_errors = estats->late_collision_frames +
4209                                           estats->excessive_collision_frames;
4210         nstats->tx_carrier_errors = estats->false_carrier_detections;
4211         nstats->tx_fifo_errors = 0;
4212         nstats->tx_heartbeat_errors = 0;
4213         nstats->tx_window_errors = 0;
4214
4215         nstats->tx_errors = nstats->tx_aborted_errors +
4216                             nstats->tx_carrier_errors;
4217
4218         estats->mac_stx_start = ++estats->mac_stx_end;
4219 }
4220
4221 static void bnx2x_update_stats(struct bnx2x *bp)
4222 {
4223         int i;
4224
4225         if (!bnx2x_update_storm_stats(bp)) {
4226
4227                 if (bp->phy_flags & PHY_BMAC_FLAG) {
4228                         bnx2x_update_bmac_stats(bp);
4229
4230                 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4231                         bnx2x_update_emac_stats(bp);
4232
4233                 } else { /* unreached */
4234                         BNX2X_ERR("no MAC active\n");
4235                         return;
4236                 }
4237
4238                 bnx2x_update_net_stats(bp);
4239         }
4240
4241         if (bp->msglevel & NETIF_MSG_TIMER) {
4242                 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4243                 struct net_device_stats *nstats = &bp->dev->stats;
4244
4245                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4246                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4247                                   "  tx pkt (%lx)\n",
4248                        bnx2x_tx_avail(bp->fp),
4249                        *bp->fp->tx_cons_sb, nstats->tx_packets);
4250                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4251                                   "  rx pkt (%lx)\n",
4252                        (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
4253                        *bp->fp->rx_cons_sb, nstats->rx_packets);
4254                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
4255                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
4256                        estats->driver_xoff, estats->brb_discard);
4257                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4258                         "packets_too_big_discard %u  no_buff_discard %u  "
4259                         "mac_discard %u  mac_filter_discard %u  "
4260                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4261                         "ttl0_discard %u\n",
4262                        estats->checksum_discard,
4263                        estats->packets_too_big_discard,
4264                        estats->no_buff_discard, estats->mac_discard,
4265                        estats->mac_filter_discard, estats->xxoverflow_discard,
4266                        estats->brb_truncate_discard, estats->ttl0_discard);
4267
4268                 for_each_queue(bp, i) {
4269                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4270                                bnx2x_fp(bp, i, tx_pkt),
4271                                bnx2x_fp(bp, i, rx_pkt),
4272                                bnx2x_fp(bp, i, rx_calls));
4273                 }
4274         }
4275
4276         if (bp->state != BNX2X_STATE_OPEN) {
4277                 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
4278                 return;
4279         }
4280
4281 #ifdef BNX2X_STOP_ON_ERROR
4282         if (unlikely(bp->panic))
4283                 return;
4284 #endif
4285
4286         /* loader */
4287         if (bp->executer_idx) {
4288                 struct dmae_command *dmae = &bp->dmae;
4289                 int port = bp->port;
4290                 int loader_idx = port * 8;
4291
4292                 memset(dmae, 0, sizeof(struct dmae_command));
4293
4294                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4295                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4296                                 DMAE_CMD_DST_RESET |
4297 #ifdef __BIG_ENDIAN
4298                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4299 #else
4300                                 DMAE_CMD_ENDIANITY_DW_SWAP |
4301 #endif
4302                                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4303                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
4304                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
4305                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
4306                                      sizeof(struct dmae_command) *
4307                                      (loader_idx + 1)) >> 2;
4308                 dmae->dst_addr_hi = 0;
4309                 dmae->len = sizeof(struct dmae_command) >> 2;
4310                 dmae->len--;    /* !!! for A0/1 only */
4311                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
4312                 dmae->comp_addr_hi = 0;
4313                 dmae->comp_val = 1;
4314
4315                 bnx2x_post_dmae(bp, dmae, loader_idx);
4316         }
4317
4318         if (bp->stats_state != STATS_STATE_ENABLE) {
4319                 bp->stats_state = STATS_STATE_DISABLE;
4320                 return;
4321         }
4322
4323         if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
4324                 /* stats ramrod has it's own slot on the spe */
4325                 bp->spq_left++;
4326                 bp->stat_pending = 1;
4327         }
4328 }
4329
4330 static void bnx2x_timer(unsigned long data)
4331 {
4332         struct bnx2x *bp = (struct bnx2x *) data;
4333
4334         if (!netif_running(bp->dev))
4335                 return;
4336
4337         if (atomic_read(&bp->intr_sem) != 0)
4338                 goto bnx2x_restart_timer;
4339
4340         if (poll) {
4341                 struct bnx2x_fastpath *fp = &bp->fp[0];
4342                 int rc;
4343
4344                 bnx2x_tx_int(fp, 1000);
4345                 rc = bnx2x_rx_int(fp, 1000);
4346         }
4347
4348         if (!nomcp && (bp->bc_ver >= 0x040003)) {
4349                 int port = bp->port;
4350                 u32 drv_pulse;
4351                 u32 mcp_pulse;
4352
4353                 ++bp->fw_drv_pulse_wr_seq;
4354                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4355                 /* TBD - add SYSTEM_TIME */
4356                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4357                 SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse);
4358
4359                 mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) &
4360                              MCP_PULSE_SEQ_MASK);
4361                 /* The delta between driver pulse and mcp response
4362                  * should be 1 (before mcp response) or 0 (after mcp response)
4363                  */
4364                 if ((drv_pulse != mcp_pulse) &&
4365                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4366                         /* someone lost a heartbeat... */
4367                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4368                                   drv_pulse, mcp_pulse);
4369                 }
4370         }
4371
4372         if (bp->stats_state == STATS_STATE_DISABLE)
4373                 goto bnx2x_restart_timer;
4374
4375         bnx2x_update_stats(bp);
4376
4377 bnx2x_restart_timer:
4378         mod_timer(&bp->timer, jiffies + bp->current_interval);
4379 }
4380
4381 /* end of Statistics */
4382
4383 /* nic init */
4384
4385 /*
4386  * nic init service functions
4387  */
4388
4389 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4390                           dma_addr_t mapping, int id)
4391 {
4392         int port = bp->port;
4393         u64 section;
4394         int index;
4395
4396         /* USTORM */
4397         section = ((u64)mapping) + offsetof(struct host_status_block,
4398                                             u_status_block);
4399         sb->u_status_block.status_block_id = id;
4400
4401         REG_WR(bp, BAR_USTRORM_INTMEM +
4402                USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4403         REG_WR(bp, BAR_USTRORM_INTMEM +
4404                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4405                U64_HI(section));
4406
4407         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4408                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4409                          USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4410
4411         /* CSTORM */
4412         section = ((u64)mapping) + offsetof(struct host_status_block,
4413                                             c_status_block);
4414         sb->c_status_block.status_block_id = id;
4415
4416         REG_WR(bp, BAR_CSTRORM_INTMEM +
4417                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4418         REG_WR(bp, BAR_CSTRORM_INTMEM +
4419                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4420                U64_HI(section));
4421
4422         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4423                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4424                          CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4425
4426         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4427 }
4428
4429 static void bnx2x_init_def_sb(struct bnx2x *bp,
4430                               struct host_def_status_block *def_sb,
4431                               dma_addr_t mapping, int id)
4432 {
4433         int port = bp->port;
4434         int index, val, reg_offset;
4435         u64 section;
4436
4437         /* ATTN */
4438         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4439                                             atten_status_block);
4440         def_sb->atten_status_block.status_block_id = id;
4441
4442         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4443                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4444
4445         for (index = 0; index < 3; index++) {
4446                 bp->attn_group[index].sig[0] = REG_RD(bp,
4447                                                      reg_offset + 0x10*index);
4448                 bp->attn_group[index].sig[1] = REG_RD(bp,
4449                                                reg_offset + 0x4 + 0x10*index);
4450                 bp->attn_group[index].sig[2] = REG_RD(bp,
4451                                                reg_offset + 0x8 + 0x10*index);
4452                 bp->attn_group[index].sig[3] = REG_RD(bp,
4453                                                reg_offset + 0xc + 0x10*index);
4454         }
4455
4456         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4457                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
4458
4459         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4460                              HC_REG_ATTN_MSG0_ADDR_L);
4461
4462         REG_WR(bp, reg_offset, U64_LO(section));
4463         REG_WR(bp, reg_offset + 4, U64_HI(section));
4464
4465         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4466
4467         val = REG_RD(bp, reg_offset);
4468         val |= id;
4469         REG_WR(bp, reg_offset, val);
4470
4471         /* USTORM */
4472         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4473                                             u_def_status_block);
4474         def_sb->u_def_status_block.status_block_id = id;
4475
4476         REG_WR(bp, BAR_USTRORM_INTMEM +
4477                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4478         REG_WR(bp, BAR_USTRORM_INTMEM +
4479                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4480                U64_HI(section));
4481         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
4482                BNX2X_BTR);
4483
4484         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4485                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4486                          USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4487
4488         /* CSTORM */
4489         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4490                                             c_def_status_block);
4491         def_sb->c_def_status_block.status_block_id = id;
4492
4493         REG_WR(bp, BAR_CSTRORM_INTMEM +
4494                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4495         REG_WR(bp, BAR_CSTRORM_INTMEM +
4496                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4497                U64_HI(section));
4498         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
4499                BNX2X_BTR);
4500
4501         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4502                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4503                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4504
4505         /* TSTORM */
4506         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4507                                             t_def_status_block);
4508         def_sb->t_def_status_block.status_block_id = id;
4509
4510         REG_WR(bp, BAR_TSTRORM_INTMEM +
4511                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4512         REG_WR(bp, BAR_TSTRORM_INTMEM +
4513                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4514                U64_HI(section));
4515         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
4516                BNX2X_BTR);
4517
4518         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4519                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4520                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4521
4522         /* XSTORM */
4523         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4524                                             x_def_status_block);
4525         def_sb->x_def_status_block.status_block_id = id;
4526
4527         REG_WR(bp, BAR_XSTRORM_INTMEM +
4528                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4529         REG_WR(bp, BAR_XSTRORM_INTMEM +
4530                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4531                U64_HI(section));
4532         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
4533                BNX2X_BTR);
4534
4535         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4536                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4537                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4538
4539         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4540 }
4541
4542 static void bnx2x_update_coalesce(struct bnx2x *bp)
4543 {
4544         int port = bp->port;
4545         int i;
4546
4547         for_each_queue(bp, i) {
4548
4549                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4550                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4551                         USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4552                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4553                         bp->rx_ticks_int/12);
4554                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4555                          USTORM_SB_HC_DISABLE_OFFSET(port, i,
4556                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4557                          bp->rx_ticks_int ? 0 : 1);
4558
4559                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4560                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4561                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4562                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4563                         bp->tx_ticks_int/12);
4564                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4565                          CSTORM_SB_HC_DISABLE_OFFSET(port, i,
4566                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4567                          bp->tx_ticks_int ? 0 : 1);
4568         }
4569 }
4570
4571 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4572 {
4573         u16 ring_prod;
4574         int i, j;
4575         int port = bp->port;
4576
4577         bp->rx_buf_use_size = bp->dev->mtu;
4578
4579         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4580         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4581
4582         for_each_queue(bp, j) {
4583                 struct bnx2x_fastpath *fp = &bp->fp[j];
4584
4585                 fp->rx_bd_cons = 0;
4586                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4587
4588                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4589                         struct eth_rx_bd *rx_bd;
4590
4591                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4592                         rx_bd->addr_hi =
4593                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4594                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4595                         rx_bd->addr_lo =
4596                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4597                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4598
4599                 }
4600
4601                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4602                         struct eth_rx_cqe_next_page *nextpg;
4603
4604                         nextpg = (struct eth_rx_cqe_next_page *)
4605                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4606                         nextpg->addr_hi =
4607                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4608                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4609                         nextpg->addr_lo =
4610                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4611                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4612                 }
4613
4614                 /* rx completion queue */
4615                 fp->rx_comp_cons = ring_prod = 0;
4616
4617                 for (i = 0; i < bp->rx_ring_size; i++) {
4618                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4619                                 BNX2X_ERR("was only able to allocate "
4620                                           "%d rx skbs\n", i);
4621                                 break;
4622                         }
4623                         ring_prod = NEXT_RX_IDX(ring_prod);
4624                         BUG_TRAP(ring_prod > i);
4625                 }
4626
4627                 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
4628                 fp->rx_pkt = fp->rx_calls = 0;
4629
4630                 /* Warning! this will generate an interrupt (to the TSTORM) */
4631                 /* must only be done when chip is initialized */
4632                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4633                        TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
4634                 if (j != 0)
4635                         continue;
4636
4637                 REG_WR(bp, BAR_USTRORM_INTMEM +
4638                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
4639                        U64_LO(fp->rx_comp_mapping));
4640                 REG_WR(bp, BAR_USTRORM_INTMEM +
4641                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
4642                        U64_HI(fp->rx_comp_mapping));
4643         }
4644 }
4645
4646 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4647 {
4648         int i, j;
4649
4650         for_each_queue(bp, j) {
4651                 struct bnx2x_fastpath *fp = &bp->fp[j];
4652
4653                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4654                         struct eth_tx_bd *tx_bd =
4655                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4656
4657                         tx_bd->addr_hi =
4658                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4659                                            BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4660                         tx_bd->addr_lo =
4661                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4662                                            BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4663                 }
4664
4665                 fp->tx_pkt_prod = 0;
4666                 fp->tx_pkt_cons = 0;
4667                 fp->tx_bd_prod = 0;
4668                 fp->tx_bd_cons = 0;
4669                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4670                 fp->tx_pkt = 0;
4671         }
4672 }
4673
4674 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4675 {
4676         int port = bp->port;
4677
4678         spin_lock_init(&bp->spq_lock);
4679
4680         bp->spq_left = MAX_SPQ_PENDING;
4681         bp->spq_prod_idx = 0;
4682         bp->dsb_sp_prod_idx = 0;
4683         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4684         bp->spq_prod_bd = bp->spq;
4685         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4686
4687         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
4688                U64_LO(bp->spq_mapping));
4689         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
4690                U64_HI(bp->spq_mapping));
4691
4692         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
4693                bp->spq_prod_idx);
4694 }
4695
4696 static void bnx2x_init_context(struct bnx2x *bp)
4697 {
4698         int i;
4699
4700         for_each_queue(bp, i) {
4701                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4702                 struct bnx2x_fastpath *fp = &bp->fp[i];
4703
4704                 context->xstorm_st_context.tx_bd_page_base_hi =
4705                                                 U64_HI(fp->tx_desc_mapping);
4706                 context->xstorm_st_context.tx_bd_page_base_lo =
4707                                                 U64_LO(fp->tx_desc_mapping);
4708                 context->xstorm_st_context.db_data_addr_hi =
4709                                                 U64_HI(fp->tx_prods_mapping);
4710                 context->xstorm_st_context.db_data_addr_lo =
4711                                                 U64_LO(fp->tx_prods_mapping);
4712
4713                 context->ustorm_st_context.rx_bd_page_base_hi =
4714                                                 U64_HI(fp->rx_desc_mapping);
4715                 context->ustorm_st_context.rx_bd_page_base_lo =
4716                                                 U64_LO(fp->rx_desc_mapping);
4717                 context->ustorm_st_context.status_block_id = i;
4718                 context->ustorm_st_context.sb_index_number =
4719                                                 HC_INDEX_U_ETH_RX_CQ_CONS;
4720                 context->ustorm_st_context.rcq_base_address_hi =
4721                                                 U64_HI(fp->rx_comp_mapping);
4722                 context->ustorm_st_context.rcq_base_address_lo =
4723                                                 U64_LO(fp->rx_comp_mapping);
4724                 context->ustorm_st_context.flags =
4725                                 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
4726                 context->ustorm_st_context.mc_alignment_size = 64;
4727                 context->ustorm_st_context.num_rss = bp->num_queues;
4728
4729                 context->cstorm_st_context.sb_index_number =
4730                                                 HC_INDEX_C_ETH_TX_CQ_CONS;
4731                 context->cstorm_st_context.status_block_id = i;
4732
4733                 context->xstorm_ag_context.cdu_reserved =
4734                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4735                                                CDU_REGION_NUMBER_XCM_AG,
4736                                                ETH_CONNECTION_TYPE);
4737                 context->ustorm_ag_context.cdu_usage =
4738                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4739                                                CDU_REGION_NUMBER_UCM_AG,
4740                                                ETH_CONNECTION_TYPE);
4741         }
4742 }
4743
4744 static void bnx2x_init_ind_table(struct bnx2x *bp)
4745 {
4746         int port = bp->port;
4747         int i;
4748
4749         if (!is_multi(bp))
4750                 return;
4751
4752         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4753                 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4754                         i % bp->num_queues);
4755
4756         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4757 }
4758
4759 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4760 {
4761         int mode = bp->rx_mode;
4762         int port = bp->port;
4763         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4764         int i;
4765
4766         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4767
4768         switch (mode) {
4769         case BNX2X_RX_MODE_NONE: /* no Rx */
4770                 tstorm_mac_filter.ucast_drop_all = 1;
4771                 tstorm_mac_filter.mcast_drop_all = 1;
4772                 tstorm_mac_filter.bcast_drop_all = 1;
4773                 break;
4774         case BNX2X_RX_MODE_NORMAL:
4775                 tstorm_mac_filter.bcast_accept_all = 1;
4776                 break;
4777         case BNX2X_RX_MODE_ALLMULTI:
4778                 tstorm_mac_filter.mcast_accept_all = 1;
4779                 tstorm_mac_filter.bcast_accept_all = 1;
4780                 break;
4781         case BNX2X_RX_MODE_PROMISC:
4782                 tstorm_mac_filter.ucast_accept_all = 1;
4783                 tstorm_mac_filter.mcast_accept_all = 1;
4784                 tstorm_mac_filter.bcast_accept_all = 1;
4785                 break;
4786         default:
4787                 BNX2X_ERR("bad rx mode (%d)\n", mode);
4788         }
4789
4790         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4791                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4792                        TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
4793                        ((u32 *)&tstorm_mac_filter)[i]);
4794
4795 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4796                    ((u32 *)&tstorm_mac_filter)[i]); */
4797         }
4798 }
4799
4800 static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
4801 {
4802 #ifdef BCM_VLAN
4803         int mode = bp->rx_mode;
4804 #endif
4805         int port = bp->port;
4806         struct tstorm_eth_client_config tstorm_client = {0};
4807
4808         tstorm_client.mtu = bp->dev->mtu;
4809         tstorm_client.statistics_counter_id = 0;
4810         tstorm_client.config_flags =
4811                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4812 #ifdef BCM_VLAN
4813         if (mode && bp->vlgrp) {
4814                 tstorm_client.config_flags |=
4815                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4816                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4817         }
4818 #endif
4819         tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
4820                                     TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
4821                                     TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
4822                                     TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
4823
4824         REG_WR(bp, BAR_TSTRORM_INTMEM +
4825                TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
4826                ((u32 *)&tstorm_client)[0]);
4827         REG_WR(bp, BAR_TSTRORM_INTMEM +
4828                TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
4829                ((u32 *)&tstorm_client)[1]);
4830
4831 /*      DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
4832            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
4833 }
4834
4835 static void bnx2x_init_internal(struct bnx2x *bp)
4836 {
4837         int port = bp->port;
4838         struct tstorm_eth_function_common_config tstorm_config = {0};
4839         struct stats_indication_flags stats_flags = {0};
4840         int i;
4841
4842         if (is_multi(bp)) {
4843                 tstorm_config.config_flags = MULTI_FLAGS;
4844                 tstorm_config.rss_result_mask = MULTI_MASK;
4845         }
4846
4847         REG_WR(bp, BAR_TSTRORM_INTMEM +
4848                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
4849                (*(u32 *)&tstorm_config));
4850
4851 /*      DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4852            (*(u32 *)&tstorm_config)); */
4853
4854         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4855         bnx2x_set_storm_rx_mode(bp);
4856
4857         for_each_queue(bp, i)
4858                 bnx2x_set_client_config(bp, i);
4859
4860
4861         stats_flags.collect_eth = cpu_to_le32(1);
4862
4863         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4864                ((u32 *)&stats_flags)[0]);
4865         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4866                ((u32 *)&stats_flags)[1]);
4867
4868         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4869                ((u32 *)&stats_flags)[0]);
4870         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4871                ((u32 *)&stats_flags)[1]);
4872
4873         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4874                ((u32 *)&stats_flags)[0]);
4875         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4876                ((u32 *)&stats_flags)[1]);
4877
4878 /*      DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4879            ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4880 }
4881
4882 static void bnx2x_nic_init(struct bnx2x *bp)
4883 {
4884         int i;
4885
4886         for_each_queue(bp, i) {
4887                 struct bnx2x_fastpath *fp = &bp->fp[i];
4888
4889                 fp->state = BNX2X_FP_STATE_CLOSED;
4890                 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
4891                    bp, fp->status_blk, i);
4892                 fp->index = i;
4893                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
4894         }
4895
4896         bnx2x_init_def_sb(bp, bp->def_status_blk,
4897                           bp->def_status_blk_mapping, 0x10);
4898         bnx2x_update_coalesce(bp);
4899         bnx2x_init_rx_rings(bp);
4900         bnx2x_init_tx_ring(bp);
4901         bnx2x_init_sp_ring(bp);
4902         bnx2x_init_context(bp);
4903         bnx2x_init_internal(bp);
4904         bnx2x_init_stats(bp);
4905         bnx2x_init_ind_table(bp);
4906         bnx2x_enable_int(bp);
4907
4908 }
4909
4910 /* end of nic init */
4911
4912 /*
4913  * gzip service functions
4914  */
4915
4916 static int bnx2x_gunzip_init(struct bnx2x *bp)
4917 {
4918         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4919                                               &bp->gunzip_mapping);
4920         if (bp->gunzip_buf  == NULL)
4921                 goto gunzip_nomem1;
4922
4923         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4924         if (bp->strm  == NULL)
4925                 goto gunzip_nomem2;
4926
4927         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4928                                       GFP_KERNEL);
4929         if (bp->strm->workspace == NULL)
4930                 goto gunzip_nomem3;
4931
4932         return 0;
4933
4934 gunzip_nomem3:
4935         kfree(bp->strm);
4936         bp->strm = NULL;
4937
4938 gunzip_nomem2:
4939         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4940                             bp->gunzip_mapping);
4941         bp->gunzip_buf = NULL;
4942
4943 gunzip_nomem1:
4944         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4945                " uncompression\n", bp->dev->name);
4946         return -ENOMEM;
4947 }
4948
4949 static void bnx2x_gunzip_end(struct bnx2x *bp)
4950 {
4951         kfree(bp->strm->workspace);
4952
4953         kfree(bp->strm);
4954         bp->strm = NULL;
4955
4956         if (bp->gunzip_buf) {
4957                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4958                                     bp->gunzip_mapping);
4959                 bp->gunzip_buf = NULL;
4960         }
4961 }
4962
4963 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4964 {
4965         int n, rc;
4966
4967         /* check gzip header */
4968         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4969                 return -EINVAL;
4970
4971         n = 10;
4972
4973 #define FNAME                           0x8
4974
4975         if (zbuf[3] & FNAME)
4976                 while ((zbuf[n++] != 0) && (n < len));
4977
4978         bp->strm->next_in = zbuf + n;
4979         bp->strm->avail_in = len - n;
4980         bp->strm->next_out = bp->gunzip_buf;
4981         bp->strm->avail_out = FW_BUF_SIZE;
4982
4983         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4984         if (rc != Z_OK)
4985                 return rc;
4986
4987         rc = zlib_inflate(bp->strm, Z_FINISH);
4988         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4989                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4990                        bp->dev->name, bp->strm->msg);
4991
4992         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4993         if (bp->gunzip_outlen & 0x3)
4994                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4995                                     " gunzip_outlen (%d) not aligned\n",
4996                        bp->dev->name, bp->gunzip_outlen);
4997         bp->gunzip_outlen >>= 2;
4998
4999         zlib_inflateEnd(bp->strm);
5000
5001         if (rc == Z_STREAM_END)
5002                 return 0;
5003
5004         return rc;
5005 }
5006
5007 /* nic load/unload */
5008
5009 /*
5010  * general service functions
5011  */
5012
5013 /* send a NIG loopback debug packet */
5014 static void bnx2x_lb_pckt(struct bnx2x *bp)
5015 {
5016 #ifdef USE_DMAE
5017         u32 wb_write[3];
5018 #endif
5019
5020         /* Ethernet source and destination addresses */
5021 #ifdef USE_DMAE
5022         wb_write[0] = 0x55555555;
5023         wb_write[1] = 0x55555555;
5024         wb_write[2] = 0x20;             /* SOP */
5025         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5026 #else
5027         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5028         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5029         /* SOP */
5030         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5031 #endif
5032
5033         /* NON-IP protocol */
5034 #ifdef USE_DMAE
5035         wb_write[0] = 0x09000000;
5036         wb_write[1] = 0x55555555;
5037         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5038         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5039 #else
5040         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5041         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5042         /* EOP, eop_bvalid = 0 */
5043         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5044 #endif
5045 }
5046
5047 /* some of the internal memories
5048  * are not directly readable from the driver
5049  * to test them we send debug packets
5050  */
5051 static int bnx2x_int_mem_test(struct bnx2x *bp)
5052 {
5053         int factor;
5054         int count, i;
5055         u32 val = 0;
5056
5057         switch (CHIP_REV(bp)) {
5058         case CHIP_REV_EMUL:
5059                 factor = 200;
5060                 break;
5061         case CHIP_REV_FPGA:
5062                 factor = 120;
5063                 break;
5064         default:
5065                 factor = 1;
5066                 break;
5067         }
5068
5069         DP(NETIF_MSG_HW, "start part1\n");
5070
5071         /* Disable inputs of parser neighbor blocks */
5072         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5073         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5074         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5075         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5076
5077         /*  Write 0 to parser credits for CFC search request */
5078         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5079
5080         /* send Ethernet packet */
5081         bnx2x_lb_pckt(bp);
5082
5083         /* TODO do i reset NIG statistic? */
5084         /* Wait until NIG register shows 1 packet of size 0x10 */
5085         count = 1000 * factor;
5086         while (count) {
5087 #ifdef BNX2X_DMAE_RD
5088                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5089                 val = *bnx2x_sp(bp, wb_data[0]);
5090 #else
5091                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5092                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5093 #endif
5094                 if (val == 0x10)
5095                         break;
5096
5097                 msleep(10);
5098                 count--;
5099         }
5100         if (val != 0x10) {
5101                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5102                 return -1;
5103         }
5104
5105         /* Wait until PRS register shows 1 packet */
5106         count = 1000 * factor;
5107         while (count) {
5108                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5109
5110                 if (val == 1)
5111                         break;
5112
5113                 msleep(10);
5114                 count--;
5115         }
5116         if (val != 0x1) {
5117                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5118                 return -2;
5119         }
5120
5121         /* Reset and init BRB, PRS */
5122         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5123         msleep(50);
5124         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5125         msleep(50);
5126         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5127         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5128
5129         DP(NETIF_MSG_HW, "part2\n");
5130
5131         /* Disable inputs of parser neighbor blocks */
5132         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5133         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5134         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5135         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5136
5137         /* Write 0 to parser credits for CFC search request */
5138         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5139
5140         /* send 10 Ethernet packets */
5141         for (i = 0; i < 10; i++)
5142                 bnx2x_lb_pckt(bp);
5143
5144         /* Wait until NIG register shows 10 + 1
5145            packets of size 11*0x10 = 0xb0 */
5146         count = 1000 * factor;
5147         while (count) {
5148 #ifdef BNX2X_DMAE_RD
5149                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5150                 val = *bnx2x_sp(bp, wb_data[0]);
5151 #else
5152                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5153                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5154 #endif
5155                 if (val == 0xb0)
5156                         break;
5157
5158                 msleep(10);
5159                 count--;
5160         }
5161         if (val != 0xb0) {
5162                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5163                 return -3;
5164         }
5165
5166         /* Wait until PRS register shows 2 packets */
5167         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5168         if (val != 2)
5169                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5170
5171         /* Write 1 to parser credits for CFC search request */
5172         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5173
5174         /* Wait until PRS register shows 3 packets */
5175         msleep(10 * factor);
5176         /* Wait until NIG register shows 1 packet of size 0x10 */
5177         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5178         if (val != 3)
5179                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5180
5181         /* clear NIG EOP FIFO */
5182         for (i = 0; i < 11; i++)
5183                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5184         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5185         if (val != 1) {
5186                 BNX2X_ERR("clear of NIG failed\n");
5187                 return -4;
5188         }
5189
5190         /* Reset and init BRB, PRS, NIG */
5191         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5192         msleep(50);
5193         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5194         msleep(50);
5195         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5196         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5197 #ifndef BCM_ISCSI
5198         /* set NIC mode */
5199         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5200 #endif
5201
5202         /* Enable inputs of parser neighbor blocks */
5203         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5204         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5205         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5206         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5207
5208         DP(NETIF_MSG_HW, "done\n");
5209
5210         return 0; /* OK */
5211 }
5212
5213 static void enable_blocks_attention(struct bnx2x *bp)
5214 {
5215         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5216         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5217         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5218         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5219         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5220         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5221         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5222         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5223         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5224 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5225 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5226         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5227         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5228         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5229 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5230 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5231         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5232         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5233         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5234         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5235 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5236 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5237         REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
5238         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5239         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5240         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5241 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5242 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5243         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5244         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5245 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5246         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5247 }
5248
5249 static int bnx2x_function_init(struct bnx2x *bp, int mode)
5250 {
5251         int func = bp->port;
5252         int port = func ? PORT1 : PORT0;
5253         u32 val, i;
5254 #ifdef USE_DMAE
5255         u32 wb_write[2];
5256 #endif
5257
5258         DP(BNX2X_MSG_MCP, "function is %d  mode is %x\n", func, mode);
5259         if ((func != 0) && (func != 1)) {
5260                 BNX2X_ERR("BAD function number (%d)\n", func);
5261                 return -ENODEV;
5262         }
5263
5264         bnx2x_gunzip_init(bp);
5265
5266         if (mode & 0x1) {       /* init common */
5267                 DP(BNX2X_MSG_MCP, "starting common init  func %d  mode %x\n",
5268                    func, mode);
5269                 REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff);
5270                 REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc);
5271                 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5272
5273                 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5274                 msleep(30);
5275                 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5276
5277                 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5278                 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5279
5280                 bnx2x_init_pxp(bp);
5281
5282                 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5283                         /* enable HW interrupt from PXP on USDM
5284                            overflow bit 16 on INT_MASK_0 */
5285                         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5286                 }
5287
5288 #ifdef __BIG_ENDIAN
5289                 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5290                 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5291                 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5292                 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5293                 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5294                 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5295
5296 /*              REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5297                 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5298                 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5299                 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5300                 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5301 #endif
5302
5303 #ifndef BCM_ISCSI
5304                 /* set NIC mode */
5305                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5306 #endif
5307
5308                 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
5309 #ifdef BCM_ISCSI
5310                 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5311                 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5312                 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5313 #endif
5314
5315                 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5316
5317                 /* let the HW do it's magic ... */
5318                 msleep(100);
5319                 /* finish PXP init
5320                    (can be moved up if we want to use the DMAE) */
5321                 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5322                 if (val != 1) {
5323                         BNX2X_ERR("PXP2 CFG failed\n");
5324                         return -EBUSY;
5325                 }
5326
5327                 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5328                 if (val != 1) {
5329                         BNX2X_ERR("PXP2 RD_INIT failed\n");
5330                         return -EBUSY;
5331                 }
5332
5333                 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5334                 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5335
5336                 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5337
5338                 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5339                 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5340                 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5341                 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5342
5343 #ifdef BNX2X_DMAE_RD
5344                 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5345                 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5346                 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5347                 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5348 #else
5349                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
5350                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
5351                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
5352                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
5353                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
5354                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
5355                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
5356                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
5357                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
5358                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
5359                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
5360                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
5361 #endif
5362                 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5363                 /* soft reset pulse */
5364                 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5365                 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5366
5367 #ifdef BCM_ISCSI
5368                 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5369 #endif
5370                 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5371                 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
5372                 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5373                         /* enable hw interrupt from doorbell Q */
5374                         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5375                 }
5376
5377                 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5378
5379                 if (CHIP_REV_IS_SLOW(bp)) {
5380                         /* fix for emulation and FPGA for no pause */
5381                         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5382                         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5383                         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5384                         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5385                 }
5386
5387                 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5388
5389                 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5390                 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5391                 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5392                 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5393
5394                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5395                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5396                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5397                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5398
5399                 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5400                 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5401                 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5402                 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5403
5404                 /* sync semi rtc */
5405                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5406                        0x80000000);
5407                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5408                        0x80000000);
5409
5410                 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5411                 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5412                 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5413
5414                 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5415                 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5416                         REG_WR(bp, i, 0xc0cac01a);
5417                         /* TODO: replace with something meaningful */
5418                 }
5419                 /* SRCH COMMON comes here */
5420                 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5421
5422                 if (sizeof(union cdu_context) != 1024) {
5423                         /* we currently assume that a context is 1024 bytes */
5424                         printk(KERN_ALERT PFX "please adjust the size of"
5425                                " cdu_context(%ld)\n",
5426                                (long)sizeof(union cdu_context));
5427                 }
5428                 val = (4 << 24) + (0 << 12) + 1024;
5429                 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5430                 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5431
5432                 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5433                 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5434
5435                 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5436                 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
5437                                  MISC_AEU_COMMON_END);
5438                 /* RXPCS COMMON comes here */
5439                 /* EMAC0 COMMON comes here */
5440                 /* EMAC1 COMMON comes here */
5441                 /* DBU COMMON comes here */
5442                 /* DBG COMMON comes here */
5443                 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5444
5445                 if (CHIP_REV_IS_SLOW(bp))
5446                         msleep(200);
5447
5448                 /* finish CFC init */
5449                 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
5450                 if (val != 1) {
5451                         BNX2X_ERR("CFC LL_INIT failed\n");
5452                         return -EBUSY;
5453                 }
5454
5455                 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
5456                 if (val != 1) {
5457                         BNX2X_ERR("CFC AC_INIT failed\n");
5458                         return -EBUSY;
5459                 }
5460
5461                 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
5462                 if (val != 1) {
5463                         BNX2X_ERR("CFC CAM_INIT failed\n");
5464                         return -EBUSY;
5465                 }
5466
5467                 REG_WR(bp, CFC_REG_DEBUG0, 0);
5468
5469                 /* read NIG statistic
5470                    to see if this is our first up since powerup */
5471 #ifdef BNX2X_DMAE_RD
5472                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5473                 val = *bnx2x_sp(bp, wb_data[0]);
5474 #else
5475                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5476                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5477 #endif
5478                 /* do internal memory self test */
5479                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5480                         BNX2X_ERR("internal mem selftest failed\n");
5481                         return -EBUSY;
5482                 }
5483
5484                 /* clear PXP2 attentions */
5485                 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
5486
5487                 enable_blocks_attention(bp);
5488                 /* enable_blocks_parity(bp); */
5489
5490         } /* end of common init */
5491
5492         /* per port init */
5493
5494         /* the phys address is shifted right 12 bits and has an added
5495            1=valid bit added to the 53rd bit
5496            then since this is a wide register(TM)
5497            we split it into two 32 bit writes
5498          */
5499 #define RQ_ONCHIP_AT_PORT_SIZE  384
5500 #define ONCHIP_ADDR1(x)   ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5501 #define ONCHIP_ADDR2(x)   ((u32)((1 << 20) | ((u64)x >> 44)))
5502 #define PXP_ONE_ILT(x)    ((x << 10) | x)
5503
5504         DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
5505
5506         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
5507
5508         /* Port PXP comes here */
5509         /* Port PXP2 comes here */
5510
5511         /* Offset is
5512          * Port0  0
5513          * Port1  384 */
5514         i = func * RQ_ONCHIP_AT_PORT_SIZE;
5515 #ifdef USE_DMAE
5516         wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
5517         wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
5518         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5519 #else
5520         REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
5521                    ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
5522         REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
5523                    ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
5524 #endif
5525         REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
5526
5527 #ifdef BCM_ISCSI
5528         /* Port0  1
5529          * Port1  385 */
5530         i++;
5531         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5532         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5533         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5534         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5535
5536         /* Port0  2
5537          * Port1  386 */
5538         i++;
5539         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5540         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5541         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5542         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5543
5544         /* Port0  3
5545          * Port1  387 */
5546         i++;
5547         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5548         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5549         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5550         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5551 #endif
5552
5553         /* Port TCM comes here */
5554         /* Port UCM comes here */
5555         /* Port CCM comes here */
5556         bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
5557                              func ? XCM_PORT1_END : XCM_PORT0_END);
5558
5559 #ifdef USE_DMAE
5560         wb_write[0] = 0;
5561         wb_write[1] = 0;
5562 #endif
5563         for (i = 0; i < 32; i++) {
5564                 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
5565 #ifdef USE_DMAE
5566                 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
5567 #else
5568                 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
5569                 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
5570 #endif
5571         }
5572         REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
5573
5574         /* Port QM comes here */
5575
5576 #ifdef BCM_ISCSI
5577         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5578         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5579
5580         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5581                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5582 #endif
5583         /* Port DQ comes here */
5584         /* Port BRB1 comes here */
5585         bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
5586                              func ? PRS_PORT1_END : PRS_PORT0_END);
5587         /* Port TSDM comes here */
5588         /* Port CSDM comes here */
5589         /* Port USDM comes here */
5590         /* Port XSDM comes here */
5591         bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
5592                              func ? TSEM_PORT1_END : TSEM_PORT0_END);
5593         bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
5594                              func ? USEM_PORT1_END : USEM_PORT0_END);
5595         bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
5596                              func ? CSEM_PORT1_END : CSEM_PORT0_END);
5597         bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
5598                              func ? XSEM_PORT1_END : XSEM_PORT0_END);
5599         /* Port UPB comes here */
5600         /* Port XSDM comes here */
5601         bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
5602                              func ? PBF_PORT1_END : PBF_PORT0_END);
5603
5604         /* configure PBF to work without PAUSE mtu 9000 */
5605         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
5606
5607         /* update threshold */
5608         REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
5609         /* update init credit */
5610         REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
5611
5612         /* probe changes */
5613         REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
5614         msleep(5);
5615         REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
5616
5617 #ifdef BCM_ISCSI
5618         /* tell the searcher where the T2 table is */
5619         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5620
5621         wb_write[0] = U64_LO(bp->t2_mapping);
5622         wb_write[1] = U64_HI(bp->t2_mapping);
5623         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5624         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5625         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5626         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5627
5628         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5629         /* Port SRCH comes here */
5630 #endif
5631         /* Port CDU comes here */
5632         /* Port CFC comes here */
5633         bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
5634                              func ? HC_PORT1_END : HC_PORT0_END);
5635         bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
5636                                     MISC_AEU_PORT0_START,
5637                              func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5638         /* Port PXPCS comes here */
5639         /* Port EMAC0 comes here */
5640         /* Port EMAC1 comes here */
5641         /* Port DBU comes here */
5642         /* Port DBG comes here */
5643         bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
5644                              func ? NIG_PORT1_END : NIG_PORT0_END);
5645         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
5646         /* Port MCP comes here */
5647         /* Port DMAE comes here */
5648
5649         bnx2x_link_reset(bp);
5650
5651         /* Reset PCIE errors for debug */
5652         REG_WR(bp, 0x2114, 0xffffffff);
5653         REG_WR(bp, 0x2120, 0xffffffff);
5654         REG_WR(bp, 0x2814, 0xffffffff);
5655
5656         /* !!! move to init_values.h */
5657         REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5658         REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5659         REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5660         REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5661
5662         REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
5663         REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
5664         REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5665         REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
5666
5667         bnx2x_gunzip_end(bp);
5668
5669         if (!nomcp) {
5670                 port = bp->port;
5671
5672                 bp->fw_drv_pulse_wr_seq =
5673                                 (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) &
5674                                  DRV_PULSE_SEQ_MASK);
5675                 bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param);
5676                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  fw_mb 0x%x\n",
5677                    bp->fw_drv_pulse_wr_seq, bp->fw_mb);
5678         } else {
5679                 bp->fw_mb = 0;
5680         }
5681
5682         return 0;
5683 }
5684
5685 /* send the MCP a request, block until there is a reply */
5686 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687 {
5688         u32 rc = 0;
5689         u32 seq = ++bp->fw_seq;
5690         int port = bp->port;
5691
5692         SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq);
5693         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq);
5694
5695         /* let the FW do it's magic ... */
5696         msleep(100); /* TBD */
5697
5698         if (CHIP_REV_IS_SLOW(bp))
5699                 msleep(900);
5700
5701         rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header);
5702
5703         DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5704
5705         /* is this a reply to our command? */
5706         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5707                 rc &= FW_MSG_CODE_MASK;
5708         } else {
5709                 /* FW BUG! */
5710                 BNX2X_ERR("FW failed to respond!\n");
5711                 bnx2x_fw_dump(bp);
5712                 rc = 0;
5713         }
5714         return rc;
5715 }
5716
5717 static void bnx2x_free_mem(struct bnx2x *bp)
5718 {
5719
5720 #define BNX2X_PCI_FREE(x, y, size) \
5721         do { \
5722                 if (x) { \
5723                         pci_free_consistent(bp->pdev, size, x, y); \
5724                         x = NULL; \
5725                         y = 0; \
5726                 } \
5727         } while (0)
5728
5729 #define BNX2X_FREE(x) \
5730         do { \
5731                 if (x) { \
5732                         vfree(x); \
5733                         x = NULL; \
5734                 } \
5735         } while (0)
5736
5737         int i;
5738
5739         /* fastpath */
5740         for_each_queue(bp, i) {
5741
5742                 /* Status blocks */
5743                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5744                                bnx2x_fp(bp, i, status_blk_mapping),
5745                                sizeof(struct host_status_block) +
5746                                sizeof(struct eth_tx_db_data));
5747
5748                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5749                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5750                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5751                                bnx2x_fp(bp, i, tx_desc_mapping),
5752                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5753
5754                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5755                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5756                                bnx2x_fp(bp, i, rx_desc_mapping),
5757                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5758
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5760                                bnx2x_fp(bp, i, rx_comp_mapping),
5761                                sizeof(struct eth_fast_path_rx_cqe) *
5762                                NUM_RCQ_BD);
5763         }
5764
5765         BNX2X_FREE(bp->fp);
5766
5767         /* end of fastpath */
5768
5769         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5770                        (sizeof(struct host_def_status_block)));
5771
5772         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5773                        (sizeof(struct bnx2x_slowpath)));
5774
5775 #ifdef BCM_ISCSI
5776         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5777         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5778         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5779         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5780 #endif
5781         BNX2X_PCI_FREE(bp->spq,&n