rndis_wlan: cleanup: rename and remove local pointers
[linux-2.6.git] / drivers / net / bnx2x.c
1 /* bnx2x.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Eliezer Tamir <eliezert@broadcom.com>
10  * Based on code from Michael Chan's bnx2 driver
11  * UDP CSUM errata workaround by Arik Gendelman
12  * Slowpath rework by Vladislav Zolotarov
13  * Statistics and Link management by Yitchak Gertner
14  *
15  */
16
17 /* define this to make the driver freeze on error
18  * to allow getting debug info
19  * (you will need to reboot afterwards)
20  */
21 /*#define BNX2X_STOP_ON_ERROR*/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h>  /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47         #include <linux/if_vlan.h>
48         #define BCM_VLAN 1
49 #endif
50 #include <net/ip.h>
51 #include <net/tcp.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
58 #include <linux/io.h>
59
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
63 #include "bnx2x.h"
64 #include "bnx2x_init.h"
65
66 #define DRV_MODULE_VERSION      "1.42.4"
67 #define DRV_MODULE_RELDATE      "2008/4/9"
68 #define BNX2X_BC_VER            0x040200
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT              (5*HZ)
72
73 static char version[] __devinitdata =
74         "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81
82 static int use_inta;
83 static int poll;
84 static int onefunc;
85 static int nomcp;
86 static int debug;
87 static int use_multi;
88
89 module_param(use_inta, int, 0);
90 module_param(poll, int, 0);
91 module_param(onefunc, int, 0);
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
94 MODULE_PARM_DESC(poll, "use polling (for debug)");
95 MODULE_PARM_DESC(onefunc, "enable only first function");
96 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
97 MODULE_PARM_DESC(debug, "default debug msglevel");
98
99 #ifdef BNX2X_MULTI
100 module_param(use_multi, int, 0);
101 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
102 #endif
103
104 enum bnx2x_board_type {
105         BCM57710 = 0,
106 };
107
108 /* indexed by board_t, above */
109 static struct {
110         char *name;
111 } board_info[] __devinitdata = {
112         { "Broadcom NetXtreme II BCM57710 XGb" }
113 };
114
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118         { 0 }
119 };
120
121 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
122
123 /****************************************************************************
124 * General service functions
125 ****************************************************************************/
126
127 /* used only at init
128  * locking is done by mcp
129  */
130 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
131 {
132         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
133         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
134         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
135                                PCICFG_VENDOR_ID_OFFSET);
136 }
137
138 #ifdef BNX2X_IND_RD
139 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
140 {
141         u32 val;
142
143         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
144         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
146                                PCICFG_VENDOR_ID_OFFSET);
147
148         return val;
149 }
150 #endif
151
152 static const u32 dmae_reg_go_c[] = {
153         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
154         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
155         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
156         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
157 };
158
159 /* copy command into DMAE command memory and set DMAE command go */
160 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
161                             int idx)
162 {
163         u32 cmd_offset;
164         int i;
165
166         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
167         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
168                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
169
170 /*              DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
171                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
172         }
173         REG_WR(bp, dmae_reg_go_c[idx], 1);
174 }
175
176 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
177                              u32 dst_addr, u32 len32)
178 {
179         struct dmae_command *dmae = &bp->dmae;
180         int port = bp->port;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int timeout = 200;
183
184         memset(dmae, 0, sizeof(struct dmae_command));
185
186         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
187                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
188                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
189 #ifdef __BIG_ENDIAN
190                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
191 #else
192                         DMAE_CMD_ENDIANITY_DW_SWAP |
193 #endif
194                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
195         dmae->src_addr_lo = U64_LO(dma_addr);
196         dmae->src_addr_hi = U64_HI(dma_addr);
197         dmae->dst_addr_lo = dst_addr >> 2;
198         dmae->dst_addr_hi = 0;
199         dmae->len = len32;
200         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
201         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
202         dmae->comp_val = BNX2X_WB_COMP_VAL;
203
204 /*
205         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
206            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
207                     "dst_addr [%x:%08x (%08x)]\n"
208            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
209            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
210            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
211            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
212 */
213 /*
214         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
215            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
216            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
217 */
218
219         *wb_comp = 0;
220
221         bnx2x_post_dmae(bp, dmae, port * 8);
222
223         udelay(5);
224         /* adjust timeout for emulation/FPGA */
225         if (CHIP_REV_IS_SLOW(bp))
226                 timeout *= 100;
227         while (*wb_comp != BNX2X_WB_COMP_VAL) {
228 /*              DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
229                 udelay(5);
230                 if (!timeout) {
231                         BNX2X_ERR("dmae timeout!\n");
232                         break;
233                 }
234                 timeout--;
235         }
236 }
237
238 #ifdef BNX2X_DMAE_RD
239 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
240 {
241         struct dmae_command *dmae = &bp->dmae;
242         int port = bp->port;
243         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
244         int timeout = 200;
245
246         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
247         memset(dmae, 0, sizeof(struct dmae_command));
248
249         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
250                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
251                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
252 #ifdef __BIG_ENDIAN
253                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
254 #else
255                         DMAE_CMD_ENDIANITY_DW_SWAP |
256 #endif
257                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
258         dmae->src_addr_lo = src_addr >> 2;
259         dmae->src_addr_hi = 0;
260         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
261         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
262         dmae->len = len32;
263         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
264         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
265         dmae->comp_val = BNX2X_WB_COMP_VAL;
266
267 /*
268         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
269            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
270                     "dst_addr [%x:%08x (%08x)]\n"
271            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
272            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
273            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
274            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
275 */
276
277         *wb_comp = 0;
278
279         bnx2x_post_dmae(bp, dmae, port * 8);
280
281         udelay(5);
282         while (*wb_comp != BNX2X_WB_COMP_VAL) {
283                 udelay(5);
284                 if (!timeout) {
285                         BNX2X_ERR("dmae timeout!\n");
286                         break;
287                 }
288                 timeout--;
289         }
290 /*
291         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
292            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
293            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
294 */
295 }
296 #endif
297
298 static int bnx2x_mc_assert(struct bnx2x *bp)
299 {
300         int i, j, rc = 0;
301         char last_idx;
302         const char storm[] = {"XTCU"};
303         const u32 intmem_base[] = {
304                 BAR_XSTRORM_INTMEM,
305                 BAR_TSTRORM_INTMEM,
306                 BAR_CSTRORM_INTMEM,
307                 BAR_USTRORM_INTMEM
308         };
309
310         /* Go through all instances of all SEMIs */
311         for (i = 0; i < 4; i++) {
312                 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
313                                    intmem_base[i]);
314                 if (last_idx)
315                         BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
316                                   storm[i], last_idx);
317
318                 /* print the asserts */
319                 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
320                         u32 row0, row1, row2, row3;
321
322                         row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
323                                       intmem_base[i]);
324                         row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
325                                       intmem_base[i]);
326                         row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
327                                       intmem_base[i]);
328                         row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
329                                       intmem_base[i]);
330
331                         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
332                                 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
333                                           " 0x%08x 0x%08x 0x%08x 0x%08x\n",
334                                           storm[i], j, row3, row2, row1, row0);
335                                 rc++;
336                         } else {
337                                 break;
338                         }
339                 }
340         }
341         return rc;
342 }
343
344 static void bnx2x_fw_dump(struct bnx2x *bp)
345 {
346         u32 mark, offset;
347         u32 data[9];
348         int word;
349
350         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
351         mark = ((mark + 0x3) & ~0x3);
352         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
353
354         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355                 for (word = 0; word < 8; word++)
356                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357                                                   offset + 4*word));
358                 data[8] = 0x0;
359                 printk(KERN_CONT "%s", (char *)data);
360         }
361         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362                 for (word = 0; word < 8; word++)
363                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364                                                   offset + 4*word));
365                 data[8] = 0x0;
366                 printk(KERN_CONT "%s", (char *)data);
367         }
368         printk("\n" KERN_ERR PFX "end of fw dump\n");
369 }
370
371 static void bnx2x_panic_dump(struct bnx2x *bp)
372 {
373         int i;
374         u16 j, start, end;
375
376         BNX2X_ERR("begin crash dump -----------------\n");
377
378         for_each_queue(bp, i) {
379                 struct bnx2x_fastpath *fp = &bp->fp[i];
380                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
383                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)"
384                           "  *rx_cons_sb(%x)  rx_comp_prod(%x)"
385                           "  rx_comp_cons(%x)  fp_c_idx(%x)  fp_u_idx(%x)"
386                           "  bd data(%x,%x)\n",
387                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388                           fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389                           fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390                           fp->fp_u_idx, hw_prods->packets_prod,
391                           hw_prods->bds_prod);
392
393                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395                 for (j = start; j < end; j++) {
396                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399                                   sw_bd->skb, sw_bd->first_bd);
400                 }
401
402                 start = TX_BD(fp->tx_bd_cons - 10);
403                 end = TX_BD(fp->tx_bd_cons + 254);
404                 for (j = start; j < end; j++) {
405                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409                 }
410
411                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413                 for (j = start; j < end; j++) {
414                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
418                                   j, rx_bd[0], rx_bd[1], sw_bd->skb);
419                 }
420
421                 start = RCQ_BD(fp->rx_comp_cons - 10);
422                 end = RCQ_BD(fp->rx_comp_cons + 503);
423                 for (j = start; j < end; j++) {
424                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
428                 }
429         }
430
431         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
432                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
433                   "  spq_prod_idx(%u)\n",
434                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
435                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438         bnx2x_mc_assert(bp);
439         BNX2X_ERR("end crash dump -----------------\n");
440
441         bp->stats_state = STATS_STATE_DISABLE;
442         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443 }
444
445 static void bnx2x_int_enable(struct bnx2x *bp)
446 {
447         int port = bp->port;
448         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449         u32 val = REG_RD(bp, addr);
450         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452         if (msix) {
453                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456         } else {
457                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
459                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
460                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461
462                 /* Errata A0.158 workaround */
463                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
464                    val, port, addr, msix);
465
466                 REG_WR(bp, addr, val);
467
468                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
469         }
470
471         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
472            val, port, addr, msix);
473
474         REG_WR(bp, addr, val);
475 }
476
477 static void bnx2x_int_disable(struct bnx2x *bp)
478 {
479         int port = bp->port;
480         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
481         u32 val = REG_RD(bp, addr);
482
483         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
484                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
485                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
486                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
487
488         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
489            val, port, addr);
490
491         REG_WR(bp, addr, val);
492         if (REG_RD(bp, addr) != val)
493                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
494 }
495
496 static void bnx2x_int_disable_sync(struct bnx2x *bp)
497 {
498
499         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
500         int i;
501
502         atomic_inc(&bp->intr_sem);
503         /* prevent the HW from sending interrupts */
504         bnx2x_int_disable(bp);
505
506         /* make sure all ISRs are done */
507         if (msix) {
508                 for_each_queue(bp, i)
509                         synchronize_irq(bp->msix_table[i].vector);
510
511                 /* one more for the Slow Path IRQ */
512                 synchronize_irq(bp->msix_table[i].vector);
513         } else
514                 synchronize_irq(bp->pdev->irq);
515
516         /* make sure sp_task is not running */
517         cancel_work_sync(&bp->sp_task);
518
519 }
520
521 /* fast path code */
522
523 /*
524  * general service functions
525  */
526
527 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
528                                 u8 storm, u16 index, u8 op, u8 update)
529 {
530         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
531         struct igu_ack_register igu_ack;
532
533         igu_ack.status_block_index = index;
534         igu_ack.sb_id_and_flags =
535                         ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
536                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
537                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
538                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
539
540 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
541            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
542         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
543 }
544
545 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
546 {
547         struct host_status_block *fpsb = fp->status_blk;
548         u16 rc = 0;
549
550         barrier(); /* status block is written to by the chip */
551         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
552                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
553                 rc |= 1;
554         }
555         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
556                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
557                 rc |= 2;
558         }
559         return rc;
560 }
561
562 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
563 {
564         u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
565
566         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
567                 rx_cons_sb++;
568
569         if ((rx_cons_sb != fp->rx_comp_cons) ||
570             (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
571                 return 1;
572
573         return 0;
574 }
575
576 static u16 bnx2x_ack_int(struct bnx2x *bp)
577 {
578         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
579         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
580
581 /*      DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
582            result, BAR_IGU_INTMEM + igu_addr); */
583
584 #ifdef IGU_DEBUG
585 #warning IGU_DEBUG active
586         if (result == 0) {
587                 BNX2X_ERR("read %x from IGU\n", result);
588                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
589         }
590 #endif
591         return result;
592 }
593
594
595 /*
596  * fast path service functions
597  */
598
599 /* free skb in the packet ring at pos idx
600  * return idx of last bd freed
601  */
602 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
603                              u16 idx)
604 {
605         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
606         struct eth_tx_bd *tx_bd;
607         struct sk_buff *skb = tx_buf->skb;
608         u16 bd_idx = tx_buf->first_bd;
609         int nbd;
610
611         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
612            idx, tx_buf, skb);
613
614         /* unmap first bd */
615         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
616         tx_bd = &fp->tx_desc_ring[bd_idx];
617         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
618                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
619
620         nbd = le16_to_cpu(tx_bd->nbd) - 1;
621 #ifdef BNX2X_STOP_ON_ERROR
622         if (nbd > (MAX_SKB_FRAGS + 2)) {
623                 BNX2X_ERR("bad nbd!\n");
624                 bnx2x_panic();
625         }
626 #endif
627
628         /* Skip a parse bd and the TSO split header bd
629            since they have no mapping */
630         if (nbd)
631                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
632
633         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
634                                            ETH_TX_BD_FLAGS_TCP_CSUM |
635                                            ETH_TX_BD_FLAGS_SW_LSO)) {
636                 if (--nbd)
637                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
638                 tx_bd = &fp->tx_desc_ring[bd_idx];
639                 /* is this a TSO split header bd? */
640                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
641                         if (--nbd)
642                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
643                 }
644         }
645
646         /* now free frags */
647         while (nbd > 0) {
648
649                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
650                 tx_bd = &fp->tx_desc_ring[bd_idx];
651                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
652                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
653                 if (--nbd)
654                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
655         }
656
657         /* release skb */
658         BUG_TRAP(skb);
659         dev_kfree_skb(skb);
660         tx_buf->first_bd = 0;
661         tx_buf->skb = NULL;
662
663         return bd_idx;
664 }
665
666 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
667 {
668         u16 used;
669         u32 prod;
670         u32 cons;
671
672         /* Tell compiler that prod and cons can change */
673         barrier();
674         prod = fp->tx_bd_prod;
675         cons = fp->tx_bd_cons;
676
677         used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
678                 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
679
680         if (prod >= cons) {
681                 /* used = prod - cons - prod/size + cons/size */
682                 used -= NUM_TX_BD - NUM_TX_RINGS;
683         }
684
685         BUG_TRAP(used <= fp->bp->tx_ring_size);
686         BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
687
688         return (fp->bp->tx_ring_size - used);
689 }
690
691 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
692 {
693         struct bnx2x *bp = fp->bp;
694         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
695         int done = 0;
696
697 #ifdef BNX2X_STOP_ON_ERROR
698         if (unlikely(bp->panic))
699                 return;
700 #endif
701
702         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
703         sw_cons = fp->tx_pkt_cons;
704
705         while (sw_cons != hw_cons) {
706                 u16 pkt_cons;
707
708                 pkt_cons = TX_BD(sw_cons);
709
710                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
711
712                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %d\n",
713                    hw_cons, sw_cons, pkt_cons);
714
715 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
716                         rmb();
717                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
718                 }
719 */
720                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
721                 sw_cons++;
722                 done++;
723
724                 if (done == work)
725                         break;
726         }
727
728         fp->tx_pkt_cons = sw_cons;
729         fp->tx_bd_cons = bd_cons;
730
731         /* Need to make the tx_cons update visible to start_xmit()
732          * before checking for netif_queue_stopped().  Without the
733          * memory barrier, there is a small possibility that start_xmit()
734          * will miss it and cause the queue to be stopped forever.
735          */
736         smp_mb();
737
738         /* TBD need a thresh? */
739         if (unlikely(netif_queue_stopped(bp->dev))) {
740
741                 netif_tx_lock(bp->dev);
742
743                 if (netif_queue_stopped(bp->dev) &&
744                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
745                         netif_wake_queue(bp->dev);
746
747                 netif_tx_unlock(bp->dev);
748
749         }
750 }
751
752 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
753                            union eth_rx_cqe *rr_cqe)
754 {
755         struct bnx2x *bp = fp->bp;
756         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
757         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
758
759         DP(NETIF_MSG_RX_STATUS,
760            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
761            fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
762
763         bp->spq_left++;
764
765         if (fp->index) {
766                 switch (command | fp->state) {
767                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
768                                                 BNX2X_FP_STATE_OPENING):
769                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
770                            cid);
771                         fp->state = BNX2X_FP_STATE_OPEN;
772                         break;
773
774                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
775                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
776                            cid);
777                         fp->state = BNX2X_FP_STATE_HALTED;
778                         break;
779
780                 default:
781                         BNX2X_ERR("unexpected MC reply(%d)  state is %x\n",
782                                   command, fp->state);
783                 }
784                 mb(); /* force bnx2x_wait_ramrod to see the change */
785                 return;
786         }
787
788         switch (command | bp->state) {
789         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
790                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
791                 bp->state = BNX2X_STATE_OPEN;
792                 break;
793
794         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
795                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
796                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
797                 fp->state = BNX2X_FP_STATE_HALTED;
798                 break;
799
800         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
801                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
802                    cid);
803                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
804                 break;
805
806         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
807                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
808                 break;
809
810         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
811                 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
812                 break;
813
814         default:
815                 BNX2X_ERR("unexpected ramrod (%d)  state is %x\n",
816                           command, bp->state);
817         }
818
819         mb(); /* force bnx2x_wait_ramrod to see the change */
820 }
821
822 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
823                                      struct bnx2x_fastpath *fp, u16 index)
824 {
825         struct sk_buff *skb;
826         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
827         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
828         dma_addr_t mapping;
829
830         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
831         if (unlikely(skb == NULL))
832                 return -ENOMEM;
833
834         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
835                                  PCI_DMA_FROMDEVICE);
836         if (unlikely(dma_mapping_error(mapping))) {
837
838                 dev_kfree_skb(skb);
839                 return -ENOMEM;
840         }
841
842         rx_buf->skb = skb;
843         pci_unmap_addr_set(rx_buf, mapping, mapping);
844
845         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
846         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
847
848         return 0;
849 }
850
851 /* note that we are not allocating a new skb,
852  * we are just moving one from cons to prod
853  * we are not creating a new mapping,
854  * so there is no need to check for dma_mapping_error().
855  */
856 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
857                                struct sk_buff *skb, u16 cons, u16 prod)
858 {
859         struct bnx2x *bp = fp->bp;
860         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
861         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
862         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
863         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
864
865         pci_dma_sync_single_for_device(bp->pdev,
866                                        pci_unmap_addr(cons_rx_buf, mapping),
867                                        bp->rx_offset + RX_COPY_THRESH,
868                                        PCI_DMA_FROMDEVICE);
869
870         prod_rx_buf->skb = cons_rx_buf->skb;
871         pci_unmap_addr_set(prod_rx_buf, mapping,
872                            pci_unmap_addr(cons_rx_buf, mapping));
873         *prod_bd = *cons_bd;
874 }
875
876 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
877 {
878         struct bnx2x *bp = fp->bp;
879         u16 bd_cons, bd_prod, comp_ring_cons;
880         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
881         int rx_pkt = 0;
882
883 #ifdef BNX2X_STOP_ON_ERROR
884         if (unlikely(bp->panic))
885                 return 0;
886 #endif
887
888         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
889         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
890                 hw_comp_cons++;
891
892         bd_cons = fp->rx_bd_cons;
893         bd_prod = fp->rx_bd_prod;
894         sw_comp_cons = fp->rx_comp_cons;
895         sw_comp_prod = fp->rx_comp_prod;
896
897         /* Memory barrier necessary as speculative reads of the rx
898          * buffer can be ahead of the index in the status block
899          */
900         rmb();
901
902         DP(NETIF_MSG_RX_STATUS,
903            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
904            fp->index, hw_comp_cons, sw_comp_cons);
905
906         while (sw_comp_cons != hw_comp_cons) {
907                 unsigned int len, pad;
908                 struct sw_rx_bd *rx_buf;
909                 struct sk_buff *skb;
910                 union eth_rx_cqe *cqe;
911
912                 comp_ring_cons = RCQ_BD(sw_comp_cons);
913                 bd_prod = RX_BD(bd_prod);
914                 bd_cons = RX_BD(bd_cons);
915
916                 cqe = &fp->rx_comp_ring[comp_ring_cons];
917
918                 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u  sw_comp_cons %u"
919                    "  comp_ring (%u)  bd_ring (%u,%u)\n",
920                    hw_comp_cons, sw_comp_cons,
921                    comp_ring_cons, bd_prod, bd_cons);
922                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
923                    "  queue %x  vlan %x  len %x\n",
924                    cqe->fast_path_cqe.type,
925                    cqe->fast_path_cqe.error_type_flags,
926                    cqe->fast_path_cqe.status_flags,
927                    cqe->fast_path_cqe.rss_hash_result,
928                    cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
929
930                 /* is this a slowpath msg? */
931                 if (unlikely(cqe->fast_path_cqe.type)) {
932                         bnx2x_sp_event(fp, cqe);
933                         goto next_cqe;
934
935                 /* this is an rx packet */
936                 } else {
937                         rx_buf = &fp->rx_buf_ring[bd_cons];
938                         skb = rx_buf->skb;
939
940                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
941                         pad = cqe->fast_path_cqe.placement_offset;
942
943                         pci_dma_sync_single_for_device(bp->pdev,
944                                         pci_unmap_addr(rx_buf, mapping),
945                                                        pad + RX_COPY_THRESH,
946                                                        PCI_DMA_FROMDEVICE);
947                         prefetch(skb);
948                         prefetch(((char *)(skb)) + 128);
949
950                         /* is this an error packet? */
951                         if (unlikely(cqe->fast_path_cqe.error_type_flags &
952                                                         ETH_RX_ERROR_FALGS)) {
953                         /* do we sometimes forward error packets anyway? */
954                                 DP(NETIF_MSG_RX_ERR,
955                                    "ERROR flags(%u) Rx packet(%u)\n",
956                                    cqe->fast_path_cqe.error_type_flags,
957                                    sw_comp_cons);
958                                 /* TBD make sure MC counts this as a drop */
959                                 goto reuse_rx;
960                         }
961
962                         /* Since we don't have a jumbo ring
963                          * copy small packets if mtu > 1500
964                          */
965                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
966                             (len <= RX_COPY_THRESH)) {
967                                 struct sk_buff *new_skb;
968
969                                 new_skb = netdev_alloc_skb(bp->dev,
970                                                            len + pad);
971                                 if (new_skb == NULL) {
972                                         DP(NETIF_MSG_RX_ERR,
973                                            "ERROR packet dropped "
974                                            "because of alloc failure\n");
975                                         /* TBD count this as a drop? */
976                                         goto reuse_rx;
977                                 }
978
979                                 /* aligned copy */
980                                 skb_copy_from_linear_data_offset(skb, pad,
981                                                     new_skb->data + pad, len);
982                                 skb_reserve(new_skb, pad);
983                                 skb_put(new_skb, len);
984
985                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
986
987                                 skb = new_skb;
988
989                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
990                                 pci_unmap_single(bp->pdev,
991                                         pci_unmap_addr(rx_buf, mapping),
992                                                  bp->rx_buf_use_size,
993                                                  PCI_DMA_FROMDEVICE);
994                                 skb_reserve(skb, pad);
995                                 skb_put(skb, len);
996
997                         } else {
998                                 DP(NETIF_MSG_RX_ERR,
999                                    "ERROR packet dropped because "
1000                                    "of alloc failure\n");
1001 reuse_rx:
1002                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1003                                 goto next_rx;
1004                         }
1005
1006                         skb->protocol = eth_type_trans(skb, bp->dev);
1007
1008                         skb->ip_summed = CHECKSUM_NONE;
1009                         if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1010                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1011
1012                         /* TBD do we pass bad csum packets in promisc */
1013                 }
1014
1015 #ifdef BCM_VLAN
1016                 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1017                                 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1018                     && (bp->vlgrp != NULL))
1019                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1020                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1021                 else
1022 #endif
1023                 netif_receive_skb(skb);
1024
1025                 bp->dev->last_rx = jiffies;
1026
1027 next_rx:
1028                 rx_buf->skb = NULL;
1029
1030                 bd_cons = NEXT_RX_IDX(bd_cons);
1031                 bd_prod = NEXT_RX_IDX(bd_prod);
1032 next_cqe:
1033                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1034                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1035                 rx_pkt++;
1036
1037                 if ((rx_pkt == budget))
1038                         break;
1039         } /* while */
1040
1041         fp->rx_bd_cons = bd_cons;
1042         fp->rx_bd_prod = bd_prod;
1043         fp->rx_comp_cons = sw_comp_cons;
1044         fp->rx_comp_prod = sw_comp_prod;
1045
1046         REG_WR(bp, BAR_TSTRORM_INTMEM +
1047                TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1048
1049         mmiowb(); /* keep prod updates ordered */
1050
1051         fp->rx_pkt += rx_pkt;
1052         fp->rx_calls++;
1053
1054         return rx_pkt;
1055 }
1056
1057 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1058 {
1059         struct bnx2x_fastpath *fp = fp_cookie;
1060         struct bnx2x *bp = fp->bp;
1061         struct net_device *dev = bp->dev;
1062         int index = fp->index;
1063
1064         DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1065         bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1066
1067 #ifdef BNX2X_STOP_ON_ERROR
1068         if (unlikely(bp->panic))
1069                 return IRQ_HANDLED;
1070 #endif
1071
1072         prefetch(fp->rx_cons_sb);
1073         prefetch(fp->tx_cons_sb);
1074         prefetch(&fp->status_blk->c_status_block.status_block_index);
1075         prefetch(&fp->status_blk->u_status_block.status_block_index);
1076
1077         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1078         return IRQ_HANDLED;
1079 }
1080
1081 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1082 {
1083         struct net_device *dev = dev_instance;
1084         struct bnx2x *bp = netdev_priv(dev);
1085         u16 status = bnx2x_ack_int(bp);
1086
1087         if (unlikely(status == 0)) {
1088                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1089                 return IRQ_NONE;
1090         }
1091
1092         DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1093
1094 #ifdef BNX2X_STOP_ON_ERROR
1095         if (unlikely(bp->panic))
1096                 return IRQ_HANDLED;
1097 #endif
1098
1099         /* Return here if interrupt is shared and is disabled */
1100         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1101                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1102                 return IRQ_HANDLED;
1103         }
1104
1105         if (status & 0x2) {
1106                 struct bnx2x_fastpath *fp = &bp->fp[0];
1107
1108                 prefetch(fp->rx_cons_sb);
1109                 prefetch(fp->tx_cons_sb);
1110                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1111                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1112
1113                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1114
1115                 status &= ~0x2;
1116                 if (!status)
1117                         return IRQ_HANDLED;
1118         }
1119
1120         if (unlikely(status & 0x1)) {
1121
1122                 schedule_work(&bp->sp_task);
1123
1124                 status &= ~0x1;
1125                 if (!status)
1126                         return IRQ_HANDLED;
1127         }
1128
1129         DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1130            status);
1131
1132         return IRQ_HANDLED;
1133 }
1134
1135 /* end of fast path */
1136
1137 /* PHY/MAC */
1138
1139 /*
1140  * General service functions
1141  */
1142
1143 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1144 {
1145         int port = bp->port;
1146
1147         NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1148                ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1149                 SHARED_HW_CFG_LED_MODE_SHIFT));
1150         NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1151
1152         /* Set blinking rate to ~15.9Hz */
1153         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1154                LED_BLINK_RATE_VAL);
1155         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1156
1157         /* On Ax chip versions for speeds less than 10G
1158            LED scheme is different */
1159         if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1160                 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1161                 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1162                 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1163         }
1164 }
1165
1166 static void bnx2x_leds_unset(struct bnx2x *bp)
1167 {
1168         int port = bp->port;
1169
1170         NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1171         NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1172 }
1173
1174 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1175 {
1176         u32 val = REG_RD(bp, reg);
1177
1178         val |= bits;
1179         REG_WR(bp, reg, val);
1180         return val;
1181 }
1182
1183 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1184 {
1185         u32 val = REG_RD(bp, reg);
1186
1187         val &= ~bits;
1188         REG_WR(bp, reg, val);
1189         return val;
1190 }
1191
1192 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1193 {
1194         u32 cnt;
1195         u32 lock_status;
1196         u32 resource_bit = (1 << resource);
1197         u8 func = bp->port;
1198
1199         /* Validating that the resource is within range */
1200         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1201                 DP(NETIF_MSG_HW,
1202                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1203                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1204                 return -EINVAL;
1205         }
1206
1207         /* Validating that the resource is not already taken */
1208         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1209         if (lock_status & resource_bit) {
1210                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1211                    lock_status, resource_bit);
1212                 return -EEXIST;
1213         }
1214
1215         /* Try for 1 second every 5ms */
1216         for (cnt = 0; cnt < 200; cnt++) {
1217                 /* Try to acquire the lock */
1218                 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1219                        resource_bit);
1220                 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1221                 if (lock_status & resource_bit)
1222                         return 0;
1223
1224                 msleep(5);
1225         }
1226         DP(NETIF_MSG_HW, "Timeout\n");
1227         return -EAGAIN;
1228 }
1229
1230 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1231 {
1232         u32 lock_status;
1233         u32 resource_bit = (1 << resource);
1234         u8 func = bp->port;
1235
1236         /* Validating that the resource is within range */
1237         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1238                 DP(NETIF_MSG_HW,
1239                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1240                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1241                 return -EINVAL;
1242         }
1243
1244         /* Validating that the resource is currently taken */
1245         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1246         if (!(lock_status & resource_bit)) {
1247                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1248                    lock_status, resource_bit);
1249                 return -EFAULT;
1250         }
1251
1252         REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1253         return 0;
1254 }
1255
1256 static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1257 {
1258         /* The GPIO should be swapped if swap register is set and active */
1259         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1260                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1261         int gpio_shift = gpio_num +
1262                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1263         u32 gpio_mask = (1 << gpio_shift);
1264         u32 gpio_reg;
1265
1266         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1267                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1268                 return -EINVAL;
1269         }
1270
1271         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1272         /* read GPIO and mask except the float bits */
1273         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1274
1275         switch (mode) {
1276         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1277                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1278                    gpio_num, gpio_shift);
1279                 /* clear FLOAT and set CLR */
1280                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1282                 break;
1283
1284         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1285                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1286                    gpio_num, gpio_shift);
1287                 /* clear FLOAT and set SET */
1288                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1289                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1290                 break;
1291
1292         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1293                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1294                    gpio_num, gpio_shift);
1295                 /* set FLOAT */
1296                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1297                 break;
1298
1299         default:
1300                 break;
1301         }
1302
1303         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1304         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1305
1306         return 0;
1307 }
1308
1309 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1310 {
1311         u32 spio_mask = (1 << spio_num);
1312         u32 spio_reg;
1313
1314         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1315             (spio_num > MISC_REGISTERS_SPIO_7)) {
1316                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1317                 return -EINVAL;
1318         }
1319
1320         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1321         /* read SPIO and mask except the float bits */
1322         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1323
1324         switch (mode) {
1325         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1326                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1327                 /* clear FLOAT and set CLR */
1328                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1330                 break;
1331
1332         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1333                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1334                 /* clear FLOAT and set SET */
1335                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1336                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1337                 break;
1338
1339         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1340                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1341                 /* set FLOAT */
1342                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1343                 break;
1344
1345         default:
1346                 break;
1347         }
1348
1349         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1350         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1351
1352         return 0;
1353 }
1354
1355 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1356 {
1357         int port = bp->port;
1358         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1359         u32 tmp;
1360         int i, rc;
1361
1362 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  val 0x%08x\n",
1363            bp->phy_addr, reg, val); */
1364
1365         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1366
1367                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1368                 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1369                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1370                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1371                 udelay(40);
1372         }
1373
1374         tmp = ((bp->phy_addr << 21) | (reg << 16) |
1375                (val & EMAC_MDIO_COMM_DATA) |
1376                EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1377                EMAC_MDIO_COMM_START_BUSY);
1378         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1379
1380         for (i = 0; i < 50; i++) {
1381                 udelay(10);
1382
1383                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1384                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1385                         udelay(5);
1386                         break;
1387                 }
1388         }
1389
1390         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1391                 BNX2X_ERR("write phy register failed\n");
1392
1393                 rc = -EBUSY;
1394         } else {
1395                 rc = 0;
1396         }
1397
1398         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1399
1400                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1401                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1402                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1403         }
1404
1405         return rc;
1406 }
1407
1408 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1409 {
1410         int port = bp->port;
1411         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1412         u32 val;
1413         int i, rc;
1414
1415         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1416
1417                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1418                 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1419                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1420                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1421                 udelay(40);
1422         }
1423
1424         val = ((bp->phy_addr << 21) | (reg << 16) |
1425                EMAC_MDIO_COMM_COMMAND_READ_22 |
1426                EMAC_MDIO_COMM_START_BUSY);
1427         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1428
1429         for (i = 0; i < 50; i++) {
1430                 udelay(10);
1431
1432                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1433                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1434                         val &= EMAC_MDIO_COMM_DATA;
1435                         break;
1436                 }
1437         }
1438
1439         if (val & EMAC_MDIO_COMM_START_BUSY) {
1440                 BNX2X_ERR("read phy register failed\n");
1441
1442                 *ret_val = 0x0;
1443                 rc = -EBUSY;
1444         } else {
1445                 *ret_val = val;
1446                 rc = 0;
1447         }
1448
1449         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1450
1451                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1452                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1453                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1454         }
1455
1456 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  ret_val 0x%08x\n",
1457            bp->phy_addr, reg, *ret_val); */
1458
1459         return rc;
1460 }
1461
1462 static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1463                                    u32 phy_addr, u32 reg, u32 addr, u32 val)
1464 {
1465         u32 tmp;
1466         int i, rc = 0;
1467
1468         /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1469          * (a value of 49==0x31) and make sure that the AUTO poll is off
1470          */
1471         tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1472         tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1473         tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1474                 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1475         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1476         REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1477         udelay(40);
1478
1479         /* address */
1480         tmp = ((phy_addr << 21) | (reg << 16) | addr |
1481                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1482                EMAC_MDIO_COMM_START_BUSY);
1483         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1484
1485         for (i = 0; i < 50; i++) {
1486                 udelay(10);
1487
1488                 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1489                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1490                         udelay(5);
1491                         break;
1492                 }
1493         }
1494         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1495                 BNX2X_ERR("write phy register failed\n");
1496
1497                 rc = -EBUSY;
1498
1499         } else {
1500                 /* data */
1501                 tmp = ((phy_addr << 21) | (reg << 16) | val |
1502                        EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1503                        EMAC_MDIO_COMM_START_BUSY);
1504                 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1505
1506                 for (i = 0; i < 50; i++) {
1507                         udelay(10);
1508
1509                         tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1510                         if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1511                                 udelay(5);
1512                                 break;
1513                         }
1514                 }
1515
1516                 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1517                         BNX2X_ERR("write phy register failed\n");
1518
1519                         rc = -EBUSY;
1520                 }
1521         }
1522
1523         /* unset clause 45 mode, set the MDIO clock to a faster value
1524          * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1525          */
1526         tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1527         tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1528         tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1529         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1530                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1531         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1532
1533         return rc;
1534 }
1535
1536 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1537                               u32 addr, u32 val)
1538 {
1539         u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1540
1541         return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1542                                        reg, addr, val);
1543 }
1544
1545 static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1546                                   u32 phy_addr, u32 reg, u32 addr,
1547                                   u32 *ret_val)
1548 {
1549         u32 val;
1550         int i, rc = 0;
1551
1552         /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1553          * (a value of 49==0x31) and make sure that the AUTO poll is off
1554          */
1555         val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1556         val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1557         val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1558                 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1559         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1560         REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1561         udelay(40);
1562
1563         /* address */
1564         val = ((phy_addr << 21) | (reg << 16) | addr |
1565                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1566                EMAC_MDIO_COMM_START_BUSY);
1567         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1568
1569         for (i = 0; i < 50; i++) {
1570                 udelay(10);
1571
1572                 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1573                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1574                         udelay(5);
1575                         break;
1576                 }
1577         }
1578         if (val & EMAC_MDIO_COMM_START_BUSY) {
1579                 BNX2X_ERR("read phy register failed\n");
1580
1581                 *ret_val = 0;
1582                 rc = -EBUSY;
1583
1584         } else {
1585                 /* data */
1586                 val = ((phy_addr << 21) | (reg << 16) |
1587                        EMAC_MDIO_COMM_COMMAND_READ_45 |
1588                        EMAC_MDIO_COMM_START_BUSY);
1589                 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1590
1591                 for (i = 0; i < 50; i++) {
1592                         udelay(10);
1593
1594                         val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1595                         if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1596                                 val &= EMAC_MDIO_COMM_DATA;
1597                                 break;
1598                         }
1599                 }
1600
1601                 if (val & EMAC_MDIO_COMM_START_BUSY) {
1602                         BNX2X_ERR("read phy register failed\n");
1603
1604                         val = 0;
1605                         rc = -EBUSY;
1606                 }
1607
1608                 *ret_val = val;
1609         }
1610
1611         /* unset clause 45 mode, set the MDIO clock to a faster value
1612          * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1613          */
1614         val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1615         val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1616         val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1617         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1618                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1619         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1620
1621         return rc;
1622 }
1623
1624 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1625                              u32 addr, u32 *ret_val)
1626 {
1627         u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1628
1629         return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1630                                       reg, addr, ret_val);
1631 }
1632
1633 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1634                                u32 addr, u32 val)
1635 {
1636         int i;
1637         u32 rd_val;
1638
1639         might_sleep();
1640         for (i = 0; i < 10; i++) {
1641                 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1642                 msleep(5);
1643                 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1644                 /* if the read value is not the same as the value we wrote,
1645                    we should write it again */
1646                 if (rd_val == val)
1647                         return 0;
1648         }
1649         BNX2X_ERR("MDIO write in CL45 failed\n");
1650         return -EBUSY;
1651 }
1652
1653 /*
1654  * link management
1655  */
1656
1657 static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1658 {
1659         switch (pause_result) {                 /* ASYM P ASYM P */
1660         case 0xb:                               /*   1  0   1  1 */
1661                 bp->flow_ctrl = FLOW_CTRL_TX;
1662                 break;
1663
1664         case 0xe:                               /*   1  1   1  0 */
1665                 bp->flow_ctrl = FLOW_CTRL_RX;
1666                 break;
1667
1668         case 0x5:                               /*   0  1   0  1 */
1669         case 0x7:                               /*   0  1   1  1 */
1670         case 0xd:                               /*   1  1   0  1 */
1671         case 0xf:                               /*   1  1   1  1 */
1672                 bp->flow_ctrl = FLOW_CTRL_BOTH;
1673                 break;
1674
1675         default:
1676                 break;
1677         }
1678 }
1679
1680 static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1681 {
1682         u32 ext_phy_addr;
1683         u32 ld_pause;   /* local */
1684         u32 lp_pause;   /* link partner */
1685         u32 an_complete; /* AN complete */
1686         u32 pause_result;
1687         u8 ret = 0;
1688
1689         ext_phy_addr = ((bp->ext_phy_config &
1690                          PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1691                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1692
1693         /* read twice */
1694         bnx2x_mdio45_read(bp, ext_phy_addr,
1695                           EXT_PHY_KR_AUTO_NEG_DEVAD,
1696                           EXT_PHY_KR_STATUS, &an_complete);
1697         bnx2x_mdio45_read(bp, ext_phy_addr,
1698                           EXT_PHY_KR_AUTO_NEG_DEVAD,
1699                           EXT_PHY_KR_STATUS, &an_complete);
1700
1701         if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1702                 ret = 1;
1703                 bnx2x_mdio45_read(bp, ext_phy_addr,
1704                                   EXT_PHY_KR_AUTO_NEG_DEVAD,
1705                                   EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1706                 bnx2x_mdio45_read(bp, ext_phy_addr,
1707                                   EXT_PHY_KR_AUTO_NEG_DEVAD,
1708                                   EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1709                 pause_result = (ld_pause &
1710                                 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1711                 pause_result |= (lp_pause &
1712                                  EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1713                 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1714                    pause_result);
1715                 bnx2x_pause_resolve(bp, pause_result);
1716         }
1717         return ret;
1718 }
1719
1720 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1721 {
1722         u32 ld_pause;   /* local driver */
1723         u32 lp_pause;   /* link partner */
1724         u32 pause_result;
1725
1726         bp->flow_ctrl = 0;
1727
1728         /* resolve from gp_status in case of AN complete and not sgmii */
1729         if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1730             (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1731             (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1732             (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1733
1734                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1735                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1736                                   &ld_pause);
1737                 bnx2x_mdio22_read(bp,
1738                         MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1739                                   &lp_pause);
1740                 pause_result = (ld_pause &
1741                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1742                 pause_result |= (lp_pause &
1743                                  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1744                 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1745                 bnx2x_pause_resolve(bp, pause_result);
1746         } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1747                    !(bnx2x_ext_phy_resove_fc(bp))) {
1748                 /* forced speed */
1749                 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1750                         switch (bp->req_flow_ctrl) {
1751                         case FLOW_CTRL_AUTO:
1752                                 if (bp->dev->mtu <= 4500)
1753                                         bp->flow_ctrl = FLOW_CTRL_BOTH;
1754                                 else
1755                                         bp->flow_ctrl = FLOW_CTRL_TX;
1756                                 break;
1757
1758                         case FLOW_CTRL_TX:
1759                                 bp->flow_ctrl = FLOW_CTRL_TX;
1760                                 break;
1761
1762                         case FLOW_CTRL_RX:
1763                                 if (bp->dev->mtu <= 4500)
1764                                         bp->flow_ctrl = FLOW_CTRL_RX;
1765                                 break;
1766
1767                         case FLOW_CTRL_BOTH:
1768                                 if (bp->dev->mtu <= 4500)
1769                                         bp->flow_ctrl = FLOW_CTRL_BOTH;
1770                                 else
1771                                         bp->flow_ctrl = FLOW_CTRL_TX;
1772                                 break;
1773
1774                         case FLOW_CTRL_NONE:
1775                         default:
1776                                 break;
1777                         }
1778                 } else { /* forced mode */
1779                         switch (bp->req_flow_ctrl) {
1780                         case FLOW_CTRL_AUTO:
1781                                 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1782                                                    " req_autoneg 0x%x\n",
1783                                    bp->req_flow_ctrl, bp->req_autoneg);
1784                                 break;
1785
1786                         case FLOW_CTRL_TX:
1787                         case FLOW_CTRL_RX:
1788                         case FLOW_CTRL_BOTH:
1789                                 bp->flow_ctrl = bp->req_flow_ctrl;
1790                                 break;
1791
1792                         case FLOW_CTRL_NONE:
1793                         default:
1794                                 break;
1795                         }
1796                 }
1797         }
1798         DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1799 }
1800
1801 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1802 {
1803         bp->link_status = 0;
1804
1805         if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1806                 DP(NETIF_MSG_LINK, "phy link up\n");
1807
1808                 bp->phy_link_up = 1;
1809                 bp->link_status |= LINK_STATUS_LINK_UP;
1810
1811                 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1812                         bp->duplex = DUPLEX_FULL;
1813                 else
1814                         bp->duplex = DUPLEX_HALF;
1815
1816                 bnx2x_flow_ctrl_resolve(bp, gp_status);
1817
1818                 switch (gp_status & GP_STATUS_SPEED_MASK) {
1819                 case GP_STATUS_10M:
1820                         bp->line_speed = SPEED_10;
1821                         if (bp->duplex == DUPLEX_FULL)
1822                                 bp->link_status |= LINK_10TFD;
1823                         else
1824                                 bp->link_status |= LINK_10THD;
1825                         break;
1826
1827                 case GP_STATUS_100M:
1828                         bp->line_speed = SPEED_100;
1829                         if (bp->duplex == DUPLEX_FULL)
1830                                 bp->link_status |= LINK_100TXFD;
1831                         else
1832                                 bp->link_status |= LINK_100TXHD;
1833                         break;
1834
1835                 case GP_STATUS_1G:
1836                 case GP_STATUS_1G_KX:
1837                         bp->line_speed = SPEED_1000;
1838                         if (bp->duplex == DUPLEX_FULL)
1839                                 bp->link_status |= LINK_1000TFD;
1840                         else
1841                                 bp->link_status |= LINK_1000THD;
1842                         break;
1843
1844                 case GP_STATUS_2_5G:
1845                         bp->line_speed = SPEED_2500;
1846                         if (bp->duplex == DUPLEX_FULL)
1847                                 bp->link_status |= LINK_2500TFD;
1848                         else
1849                                 bp->link_status |= LINK_2500THD;
1850                         break;
1851
1852                 case GP_STATUS_5G:
1853                 case GP_STATUS_6G:
1854                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1855                                   gp_status);
1856                         break;
1857
1858                 case GP_STATUS_10G_KX4:
1859                 case GP_STATUS_10G_HIG:
1860                 case GP_STATUS_10G_CX4:
1861                         bp->line_speed = SPEED_10000;
1862                         bp->link_status |= LINK_10GTFD;
1863                         break;
1864
1865                 case GP_STATUS_12G_HIG:
1866                         bp->line_speed = SPEED_12000;
1867                         bp->link_status |= LINK_12GTFD;
1868                         break;
1869
1870                 case GP_STATUS_12_5G:
1871                         bp->line_speed = SPEED_12500;
1872                         bp->link_status |= LINK_12_5GTFD;
1873                         break;
1874
1875                 case GP_STATUS_13G:
1876                         bp->line_speed = SPEED_13000;
1877                         bp->link_status |= LINK_13GTFD;
1878                         break;
1879
1880                 case GP_STATUS_15G:
1881                         bp->line_speed = SPEED_15000;
1882                         bp->link_status |= LINK_15GTFD;
1883                         break;
1884
1885                 case GP_STATUS_16G:
1886                         bp->line_speed = SPEED_16000;
1887                         bp->link_status |= LINK_16GTFD;
1888                         break;
1889
1890                 default:
1891                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1892                                   gp_status);
1893                         break;
1894                 }
1895
1896                 bp->link_status |= LINK_STATUS_SERDES_LINK;
1897
1898                 if (bp->req_autoneg & AUTONEG_SPEED) {
1899                         bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1900
1901                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1902                                 bp->link_status |=
1903                                         LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1904
1905                         if (bp->autoneg & AUTONEG_PARALLEL)
1906                                 bp->link_status |=
1907                                         LINK_STATUS_PARALLEL_DETECTION_USED;
1908                 }
1909
1910                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1911                        bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1912
1913                 if (bp->flow_ctrl & FLOW_CTRL_RX)
1914                        bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1915
1916         } else { /* link_down */
1917                 DP(NETIF_MSG_LINK, "phy link down\n");
1918
1919                 bp->phy_link_up = 0;
1920
1921                 bp->line_speed = 0;
1922                 bp->duplex = DUPLEX_FULL;
1923                 bp->flow_ctrl = 0;
1924         }
1925
1926         DP(NETIF_MSG_LINK, "gp_status 0x%x  phy_link_up %d\n"
1927            DP_LEVEL "  line_speed %d  duplex %d  flow_ctrl 0x%x"
1928                     "  link_status 0x%x\n",
1929            gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1930            bp->flow_ctrl, bp->link_status);
1931 }
1932
1933 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1934 {
1935         int port = bp->port;
1936
1937         /* first reset all status
1938          * we assume only one line will be change at a time */
1939         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1940                        (NIG_STATUS_XGXS0_LINK10G |
1941                         NIG_STATUS_XGXS0_LINK_STATUS |
1942                         NIG_STATUS_SERDES0_LINK_STATUS));
1943         if (bp->phy_link_up) {
1944                 if (is_10g) {
1945                         /* Disable the 10G link interrupt
1946                          * by writing 1 to the status register
1947                          */
1948                         DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1949                         bnx2x_bits_en(bp,
1950                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1951                                       NIG_STATUS_XGXS0_LINK10G);
1952
1953                 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1954                         /* Disable the link interrupt
1955                          * by writing 1 to the relevant lane
1956                          * in the status register
1957                          */
1958                         DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1959                         bnx2x_bits_en(bp,
1960                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1961                                       ((1 << bp->ser_lane) <<
1962                                        NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1963
1964                 } else { /* SerDes */
1965                         DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1966                         /* Disable the link interrupt
1967                          * by writing 1 to the status register
1968                          */
1969                         bnx2x_bits_en(bp,
1970                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1971                                       NIG_STATUS_SERDES0_LINK_STATUS);
1972                 }
1973
1974         } else { /* link_down */
1975         }
1976 }
1977
1978 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1979 {
1980         u32 ext_phy_type;
1981         u32 ext_phy_addr;
1982         u32 val1 = 0, val2;
1983         u32 rx_sd, pcs_status;
1984
1985         if (bp->phy_flags & PHY_XGXS_FLAG) {
1986                 ext_phy_addr = ((bp->ext_phy_config &
1987                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1988                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1989
1990                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1991                 switch (ext_phy_type) {
1992                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1993                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
1994                         val1 = 1;
1995                         break;
1996
1997                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1998                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
1999                         bnx2x_mdio45_read(bp, ext_phy_addr,
2000                                           EXT_PHY_OPT_WIS_DEVAD,
2001                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2002                         DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2003
2004                         bnx2x_mdio45_read(bp, ext_phy_addr,
2005                                           EXT_PHY_OPT_WIS_DEVAD,
2006                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2007                         DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2008
2009                         bnx2x_mdio45_read(bp, ext_phy_addr,
2010                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2011                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2012                         DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2013                         val1 = (rx_sd & 0x1);
2014                         break;
2015
2016                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2017                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
2018                         bnx2x_mdio45_read(bp, ext_phy_addr,
2019                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2020                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2021                         DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2022
2023                         bnx2x_mdio45_read(bp, ext_phy_addr,
2024                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2025                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2026                         DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2027
2028                         bnx2x_mdio45_read(bp, ext_phy_addr,
2029                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2030                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2031                         bnx2x_mdio45_read(bp, ext_phy_addr,
2032                                           EXT_PHY_OPT_PCS_DEVAD,
2033                                           EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2034                         bnx2x_mdio45_read(bp, ext_phy_addr,
2035                                           EXT_PHY_AUTO_NEG_DEVAD,
2036                                           EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2037
2038                         DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
2039                            "  pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2040                            rx_sd, pcs_status, val2, (val2 & (1<<1)));
2041                         /* link is up if both bit 0 of pmd_rx_sd and
2042                          * bit 0 of pcs_status are set, or if the autoneg bit
2043                            1 is set
2044                          */
2045                         val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2046                         break;
2047
2048                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2049                         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2050
2051                         /* clear the interrupt LASI status register */
2052                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2053                                                ext_phy_addr,
2054                                                EXT_PHY_KR_PCS_DEVAD,
2055                                                EXT_PHY_KR_LASI_STATUS, &val2);
2056                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057                                                ext_phy_addr,
2058                                                EXT_PHY_KR_PCS_DEVAD,
2059                                                EXT_PHY_KR_LASI_STATUS, &val1);
2060                         DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2061                            val2, val1);
2062                         /* Check the LASI */
2063                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2064                                                ext_phy_addr,
2065                                                EXT_PHY_KR_PMA_PMD_DEVAD,
2066                                                0x9003, &val2);
2067                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068                                                ext_phy_addr,
2069                                                EXT_PHY_KR_PMA_PMD_DEVAD,
2070                                                0x9003, &val1);
2071                         DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2072                            val2, val1);
2073                         /* Check the link status */
2074                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2075                                                ext_phy_addr,
2076                                                EXT_PHY_KR_PCS_DEVAD,
2077                                                EXT_PHY_KR_PCS_STATUS, &val2);
2078                         DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2079                         /* Check the link status on 1.1.2 */
2080                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2081                                           ext_phy_addr,
2082                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2083                                           EXT_PHY_KR_STATUS, &val2);
2084                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2085                                           ext_phy_addr,
2086                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2087                                           EXT_PHY_KR_STATUS, &val1);
2088                         DP(NETIF_MSG_LINK,
2089                            "KR PMA status 0x%x->0x%x\n", val2, val1);
2090                         val1 = ((val1 & 4) == 4);
2091                         /* If 1G was requested assume the link is up */
2092                         if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2093                             (bp->req_line_speed == SPEED_1000))
2094                                 val1 = 1;
2095                         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2096                         break;
2097
2098                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2099                         bnx2x_mdio45_read(bp, ext_phy_addr,
2100                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2101                                           EXT_PHY_OPT_LASI_STATUS, &val2);
2102                         bnx2x_mdio45_read(bp, ext_phy_addr,
2103                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2104                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2105                         DP(NETIF_MSG_LINK,
2106                            "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2107                         bnx2x_mdio45_read(bp, ext_phy_addr,
2108                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2109                                           EXT_PHY_KR_STATUS, &val2);
2110                         bnx2x_mdio45_read(bp, ext_phy_addr,
2111                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2112                                           EXT_PHY_KR_STATUS, &val1);
2113                         DP(NETIF_MSG_LINK,
2114                            "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2115                         val1 = ((val1 & 4) == 4);
2116                         /* if link is up
2117                          * print the AN outcome of the SFX7101 PHY
2118                          */
2119                         if (val1) {
2120                                 bnx2x_mdio45_read(bp, ext_phy_addr,
2121                                                   EXT_PHY_KR_AUTO_NEG_DEVAD,
2122                                                   0x21, &val2);
2123                                 DP(NETIF_MSG_LINK,
2124                                    "SFX7101 AN status 0x%x->%s\n", val2,
2125                                    (val2 & (1<<14)) ? "Master" : "Slave");
2126                         }
2127                         break;
2128
2129                 default:
2130                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2131                            bp->ext_phy_config);
2132                         val1 = 0;
2133                         break;
2134                 }
2135
2136         } else { /* SerDes */
2137                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2138                 switch (ext_phy_type) {
2139                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2140                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
2141                         val1 = 1;
2142                         break;
2143
2144                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2145                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
2146                         val1 = 1;
2147                         break;
2148
2149                 default:
2150                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2151                            bp->ext_phy_config);
2152                         val1 = 0;
2153                         break;
2154                 }
2155         }
2156
2157         return val1;
2158 }
2159
2160 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2161 {
2162         int port = bp->port;
2163         u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2164                                NIG_REG_INGRESS_BMAC0_MEM;
2165         u32 wb_write[2];
2166         u32 val;
2167
2168         DP(NETIF_MSG_LINK, "enabling BigMAC\n");
2169         /* reset and unreset the BigMac */
2170         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2171                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2172         msleep(5);
2173         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2174                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2175
2176         /* enable access for bmac registers */
2177         NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2178
2179         /* XGXS control */
2180         wb_write[0] = 0x3c;
2181         wb_write[1] = 0;
2182         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2183                     wb_write, 2);
2184
2185         /* tx MAC SA */
2186         wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2187                        (bp->dev->dev_addr[3] << 16) |
2188                        (bp->dev->dev_addr[4] << 8) |
2189                         bp->dev->dev_addr[5]);
2190         wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2191                         bp->dev->dev_addr[1]);
2192         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2193                     wb_write, 2);
2194
2195         /* tx control */
2196         val = 0xc0;
2197         if (bp->flow_ctrl & FLOW_CTRL_TX)
2198                 val |= 0x800000;
2199         wb_write[0] = val;
2200         wb_write[1] = 0;
2201         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2202
2203         /* set tx mtu */
2204         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2205         wb_write[1] = 0;
2206         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2207
2208         /* mac control */
2209         val = 0x3;
2210         if (is_lb) {
2211                 val |= 0x4;
2212                 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2213         }
2214         wb_write[0] = val;
2215         wb_write[1] = 0;
2216         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2217                     wb_write, 2);
2218
2219         /* rx control set to don't strip crc */
2220         val = 0x14;
2221         if (bp->flow_ctrl & FLOW_CTRL_RX)
2222                 val |= 0x20;
2223         wb_write[0] = val;
2224         wb_write[1] = 0;
2225         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2226
2227         /* set rx mtu */
2228         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2229         wb_write[1] = 0;
2230         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2231
2232         /* set cnt max size */
2233         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2234         wb_write[1] = 0;
2235         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2236                     wb_write, 2);
2237
2238         /* configure safc */
2239         wb_write[0] = 0x1000200;
2240         wb_write[1] = 0;
2241         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2242                     wb_write, 2);
2243
2244         /* fix for emulation */
2245         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2246                 wb_write[0] = 0xf000;
2247                 wb_write[1] = 0;
2248                 REG_WR_DMAE(bp,
2249                             bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2250                             wb_write, 2);
2251         }
2252
2253         /* reset old bmac stats */
2254         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2255
2256         NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2257
2258         /* select XGXS */
2259         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2260         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2261
2262         /* disable the NIG in/out to the emac */
2263         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2264         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2265         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2266
2267         /* enable the NIG in/out to the bmac */
2268         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2269
2270         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2271         val = 0;
2272         if (bp->flow_ctrl & FLOW_CTRL_TX)
2273                 val = 1;
2274         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2275         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2276
2277         bp->phy_flags |= PHY_BMAC_FLAG;
2278
2279         bp->stats_state = STATS_STATE_ENABLE;
2280 }
2281
2282 static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2283 {
2284         int port = bp->port;
2285         u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2286                                NIG_REG_INGRESS_BMAC0_MEM;
2287         u32 wb_write[2];
2288
2289         /* Only if the bmac is out of reset */
2290         if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2291                         (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2292                 /* Clear Rx Enable bit in BMAC_CONTROL register */
2293 #ifdef BNX2X_DMAE_RD
2294                 bnx2x_read_dmae(bp, bmac_addr +
2295                                 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2296                 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2297                 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2298 #else
2299                 wb_write[0] = REG_RD(bp,
2300                                 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2301                 wb_write[1] = REG_RD(bp,
2302                                 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2303 #endif
2304                 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2305                 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2306                             wb_write, 2);
2307                 msleep(1);
2308         }
2309 }
2310
2311 static void bnx2x_emac_enable(struct bnx2x *bp)
2312 {
2313         int port = bp->port;
2314         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2315         u32 val;
2316         int timeout;
2317
2318         DP(NETIF_MSG_LINK, "enabling EMAC\n");
2319         /* reset and unreset the emac core */
2320         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2321                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2322         msleep(5);
2323         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2324                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2325
2326         /* enable emac and not bmac */
2327         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2328
2329         /* for paladium */
2330         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2331                 /* Use lane 1 (of lanes 0-3) */
2332                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2333                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2334         }
2335         /* for fpga */
2336         else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2337                 /* Use lane 1 (of lanes 0-3) */
2338                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2339                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2340         }
2341         /* ASIC */
2342         else {
2343                 if (bp->phy_flags & PHY_XGXS_FLAG) {
2344                         DP(NETIF_MSG_LINK, "XGXS\n");
2345                         /* select the master lanes (out of 0-3) */
2346                         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2347                                bp->ser_lane);
2348                         /* select XGXS */
2349                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2350
2351                 } else { /* SerDes */
2352                         DP(NETIF_MSG_LINK, "SerDes\n");
2353                         /* select SerDes */
2354                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2355                 }
2356         }
2357
2358         /* enable emac */
2359         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2360
2361         /* init emac - use read-modify-write */
2362         /* self clear reset */
2363         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2364         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2365
2366         timeout = 200;
2367         while (val & EMAC_MODE_RESET) {
2368                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2369                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2370                 if (!timeout) {
2371                         BNX2X_ERR("EMAC timeout!\n");
2372                         break;
2373                 }
2374                 timeout--;
2375         }
2376
2377         /* reset tx part */
2378         EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2379
2380         timeout = 200;
2381         while (val & EMAC_TX_MODE_RESET) {
2382                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2383                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2384                 if (!timeout) {
2385                         BNX2X_ERR("EMAC timeout!\n");
2386                         break;
2387                 }
2388                 timeout--;
2389         }
2390
2391         if (CHIP_REV_IS_SLOW(bp)) {
2392                 /* config GMII mode */
2393                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2394                 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2395
2396         } else { /* ASIC */
2397                 /* pause enable/disable */
2398                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2399                                EMAC_RX_MODE_FLOW_EN);
2400                 if (bp->flow_ctrl & FLOW_CTRL_RX)
2401                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2402                                       EMAC_RX_MODE_FLOW_EN);
2403
2404                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2405                                EMAC_TX_MODE_EXT_PAUSE_EN);
2406                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2407                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2408                                       EMAC_TX_MODE_EXT_PAUSE_EN);
2409         }
2410
2411         /* KEEP_VLAN_TAG, promiscuous */
2412         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2413         val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2414         EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2415
2416         /* identify magic packets */
2417         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2418         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2419
2420         /* enable emac for jumbo packets */
2421         EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2422                 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2423                  (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2424
2425         /* strip CRC */
2426         NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2427
2428         val = ((bp->dev->dev_addr[0] << 8) |
2429                 bp->dev->dev_addr[1]);
2430         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2431
2432         val = ((bp->dev->dev_addr[2] << 24) |
2433                (bp->dev->dev_addr[3] << 16) |
2434                (bp->dev->dev_addr[4] << 8) |
2435                 bp->dev->dev_addr[5]);
2436         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2437
2438         /* disable the NIG in/out to the bmac */
2439         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2440         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2441         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2442
2443         /* enable the NIG in/out to the emac */
2444         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2445         val = 0;
2446         if (bp->flow_ctrl & FLOW_CTRL_TX)
2447                 val = 1;
2448         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2449         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2450
2451         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2452                 /* take the BigMac out of reset */
2453                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2454                        (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2455
2456                 /* enable access for bmac registers */
2457                 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2458         }
2459
2460         bp->phy_flags |= PHY_EMAC_FLAG;
2461
2462         bp->stats_state = STATS_STATE_ENABLE;
2463 }
2464
2465 static void bnx2x_emac_program(struct bnx2x *bp)
2466 {
2467         u16 mode = 0;
2468         int port = bp->port;
2469
2470         DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2471         bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2472                        (EMAC_MODE_25G_MODE |
2473                         EMAC_MODE_PORT_MII_10M |
2474                         EMAC_MODE_HALF_DUPLEX));
2475         switch (bp->line_speed) {
2476         case SPEED_10:
2477                 mode |= EMAC_MODE_PORT_MII_10M;
2478                 break;
2479
2480         case SPEED_100:
2481                 mode |= EMAC_MODE_PORT_MII;
2482                 break;
2483
2484         case SPEED_1000:
2485                 mode |= EMAC_MODE_PORT_GMII;
2486                 break;
2487
2488         case SPEED_2500:
2489                 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2490                 break;
2491
2492         default:
2493                 /* 10G not valid for EMAC */
2494                 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2495                 break;
2496         }
2497
2498         if (bp->duplex == DUPLEX_HALF)
2499                 mode |= EMAC_MODE_HALF_DUPLEX;
2500         bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2501                       mode);
2502
2503         bnx2x_leds_set(bp, bp->line_speed);
2504 }
2505
2506 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2507 {
2508         u32 lp_up2;
2509         u32 tx_driver;
2510
2511         /* read precomp */
2512         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2513         bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2514
2515         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2516         bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2517
2518         /* bits [10:7] at lp_up2, positioned at [15:12] */
2519         lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2520                    MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2521                   MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2522
2523         if ((lp_up2 != 0) &&
2524             (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2525                 /* replace tx_driver bits [15:12] */
2526                 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2527                 tx_driver |= lp_up2;
2528                 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2529         }
2530 }
2531
2532 static void bnx2x_pbf_update(struct bnx2x *bp)
2533 {
2534         int port = bp->port;
2535         u32 init_crd, crd;
2536         u32 count = 1000;
2537         u32 pause = 0;
2538
2539         /* disable port */
2540         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2541
2542         /* wait for init credit */
2543         init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2544         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2545         DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
2546
2547         while ((init_crd != crd) && count) {
2548                 msleep(5);
2549
2550                 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2551                 count--;
2552         }
2553         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2554         if (init_crd != crd)
2555                 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2556
2557         if (bp->flow_ctrl & FLOW_CTRL_RX)
2558                 pause = 1;
2559         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2560         if (pause) {
2561                 /* update threshold */
2562                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2563                 /* update init credit */
2564                 init_crd = 778;         /* (800-18-4) */
2565
2566         } else {
2567                 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2568
2569                 /* update threshold */
2570                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2571                 /* update init credit */
2572                 switch (bp->line_speed) {
2573                 case SPEED_10:
2574                 case SPEED_100:
2575                 case SPEED_1000:
2576                         init_crd = thresh + 55 - 22;
2577                         break;
2578
2579                 case SPEED_2500:
2580                         init_crd = thresh + 138 - 22;
2581                         break;
2582
2583                 case SPEED_10000:
2584                         init_crd = thresh + 553 - 22;
2585                         break;
2586
2587                 default:
2588                         BNX2X_ERR("Invalid line_speed 0x%x\n",
2589                                   bp->line_speed);
2590                         break;
2591                 }
2592         }
2593         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2594         DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2595            bp->line_speed, init_crd);
2596
2597         /* probe the credit changes */
2598         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2599         msleep(5);
2600         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2601
2602         /* enable port */
2603         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2604 }
2605
2606 static void bnx2x_update_mng(struct bnx2x *bp)
2607 {
2608         if (!nomcp)
2609                 SHMEM_WR(bp, port_mb[bp->port].link_status,
2610                          bp->link_status);
2611 }
2612
2613 static void bnx2x_link_report(struct bnx2x *bp)
2614 {
2615         if (bp->link_up) {
2616                 netif_carrier_on(bp->dev);
2617                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2618
2619                 printk("%d Mbps ", bp->line_speed);
2620
2621                 if (bp->duplex == DUPLEX_FULL)
2622                         printk("full duplex");
2623                 else
2624                         printk("half duplex");
2625
2626                 if (bp->flow_ctrl) {
2627                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
2628                                 printk(", receive ");
2629                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2630                                         printk("& transmit ");
2631                         } else {
2632                                 printk(", transmit ");
2633                         }
2634                         printk("flow control ON");
2635                 }
2636                 printk("\n");
2637
2638         } else { /* link_down */
2639                 netif_carrier_off(bp->dev);
2640                 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2641         }
2642 }
2643
2644 static void bnx2x_link_up(struct bnx2x *bp)
2645 {
2646         int port = bp->port;
2647
2648         /* PBF - link up */
2649         bnx2x_pbf_update(bp);
2650
2651         /* disable drain */
2652         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2653
2654         /* update shared memory */
2655         bnx2x_update_mng(bp);
2656
2657         /* indicate link up */
2658         bnx2x_link_report(bp);
2659 }
2660
2661 static void bnx2x_link_down(struct bnx2x *bp)
2662 {
2663         int port = bp->port;
2664
2665         /* notify stats */
2666         if (bp->stats_state != STATS_STATE_DISABLE) {
2667                 bp->stats_state = STATS_STATE_STOP;
2668                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2669         }
2670
2671         /* indicate no mac active */
2672         bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2673
2674         /* update shared memory */
2675         bnx2x_update_mng(bp);
2676
2677         /* activate nig drain */
2678         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2679
2680         /* reset BigMac */
2681         bnx2x_bmac_rx_disable(bp);
2682         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2683                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2684
2685         /* indicate link down */
2686         bnx2x_link_report(bp);
2687 }
2688
2689 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2690
2691 /* This function is called upon link interrupt */
2692 static void bnx2x_link_update(struct bnx2x *bp)
2693 {
2694         int port = bp->port;
2695         int i;
2696         u32 gp_status;
2697         int link_10g;
2698
2699         DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2700            " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2701            " 10G %x, XGXS_LINK %x\n", port,
2702            (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2703            REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2704            REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2705            REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2706            REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2707            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2708            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2709         );
2710
2711         might_sleep();
2712         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2713         /* avoid fast toggling */
2714         for (i = 0; i < 10; i++) {
2715                 msleep(10);
2716                 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2717                                   &gp_status);
2718         }
2719
2720         bnx2x_link_settings_status(bp, gp_status);
2721
2722         /* anything 10 and over uses the bmac */
2723         link_10g = ((bp->line_speed >= SPEED_10000) &&
2724                     (bp->line_speed <= SPEED_16000));
2725
2726         bnx2x_link_int_ack(bp, link_10g);
2727
2728         /* link is up only if both local phy and external phy are up */
2729         bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2730         if (bp->link_up) {
2731                 if (link_10g) {
2732                         bnx2x_bmac_enable(bp, 0);
2733                         bnx2x_leds_set(bp, SPEED_10000);
2734
2735                 } else {
2736                         bnx2x_emac_enable(bp);
2737                         bnx2x_emac_program(bp);
2738
2739                         /* AN complete? */
2740                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2741                                 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2742                                         bnx2x_set_sgmii_tx_driver(bp);
2743                         }
2744                 }
2745                 bnx2x_link_up(bp);
2746
2747         } else { /* link down */
2748                 bnx2x_leds_unset(bp);
2749                 bnx2x_link_down(bp);
2750         }
2751
2752         bnx2x_init_mac_stats(bp);
2753 }
2754
2755 /*
2756  * Init service functions
2757  */
2758
2759 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2760 {
2761         u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2762                                         (bp->phy_addr + bp->ser_lane) : 0;
2763
2764         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2765         bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2766 }
2767
2768 static void bnx2x_set_master_ln(struct bnx2x *bp)
2769 {
2770         u32 new_master_ln;
2771
2772         /* set the master_ln for AN */
2773         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2774         bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2775                           &new_master_ln);
2776         bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2777                            (new_master_ln | bp->ser_lane));
2778 }
2779
2780 static void bnx2x_reset_unicore(struct bnx2x *bp)
2781 {
2782         u32 mii_control;
2783         int i;
2784
2785         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2786         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2787         /* reset the unicore */
2788         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2789                            (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2790
2791         /* wait for the reset to self clear */
2792         for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2793                 udelay(5);
2794
2795                 /* the reset erased the previous bank value */
2796                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2797                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2798                                   &mii_control);
2799
2800                 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2801                         udelay(5);
2802                         return;
2803                 }
2804         }
2805
2806         BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2807                   (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2808                   bp->phy_addr);
2809 }
2810
2811 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2812 {
2813         /* Each two bits represents a lane number:
2814            No swap is 0123 => 0x1b no need to enable the swap */
2815
2816         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2817         if (bp->rx_lane_swap != 0x1b) {
2818                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2819                                    (bp->rx_lane_swap |
2820                                     MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2821                                    MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2822         } else {
2823                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2824         }
2825
2826         if (bp->tx_lane_swap != 0x1b) {
2827                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2828                                    (bp->tx_lane_swap |
2829                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2830         } else {
2831                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2832         }
2833 }
2834
2835 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2836 {
2837         u32 control2;
2838
2839         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2840         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2841                           &control2);
2842
2843         if (bp->autoneg & AUTONEG_PARALLEL) {
2844                 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2845         } else {
2846                 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2847         }
2848         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2849                            control2);
2850
2851         if (bp->phy_flags & PHY_XGXS_FLAG) {
2852                 DP(NETIF_MSG_LINK, "XGXS\n");
2853                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2854
2855                 bnx2x_mdio22_write(bp,
2856                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2857                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2858
2859                 bnx2x_mdio22_read(bp,
2860                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2861                                 &control2);
2862
2863                 if (bp->autoneg & AUTONEG_PARALLEL) {
2864                         control2 |=
2865                     MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2866                 } else {
2867                         control2 &=
2868                    ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2869                 }
2870                 bnx2x_mdio22_write(bp,
2871                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2872                                 control2);
2873
2874                 /* Disable parallel detection of HiG */
2875                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2876                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2877                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2878                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2879         }
2880 }
2881
2882 static void bnx2x_set_autoneg(struct bnx2x *bp)
2883 {
2884         u32 reg_val;
2885
2886         /* CL37 Autoneg */
2887         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2888         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2889         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2890             (bp->autoneg & AUTONEG_CL37)) {
2891                 /* CL37 Autoneg Enabled */
2892                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2893         } else {
2894                 /* CL37 Autoneg Disabled */
2895                 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2896                              MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2897         }
2898         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2899
2900         /* Enable/Disable Autodetection */
2901         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2902         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2903         reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2904
2905         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2906             (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2907                 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2908         } else {
2909                 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2910         }
2911         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2912
2913         /* Enable TetonII and BAM autoneg */
2914         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2915         bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2916                           &reg_val);
2917         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2918             (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2919                 /* Enable BAM aneg Mode and TetonII aneg Mode */
2920                 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2921                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2922         } else {
2923                 /* TetonII and BAM Autoneg Disabled */
2924                 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2925                              MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2926         }
2927         bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2928                            reg_val);
2929
2930         /* Enable Clause 73 Aneg */
2931         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2932             (bp->autoneg & AUTONEG_CL73)) {
2933                 /* Enable BAM Station Manager */
2934                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2935                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2936                                    (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2937                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2938                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2939
2940                 /* Merge CL73 and CL37 aneg resolution */
2941                 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2942                                   &reg_val);
2943                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2944                                    (reg_val |
2945                         MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2946
2947                 /* Set the CL73 AN speed */
2948                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2949                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2950                 /* In the SerDes we support only the 1G.
2951                    In the XGXS we support the 10G KX4
2952                    but we currently do not support the KR */
2953                 if (bp->phy_flags & PHY_XGXS_FLAG) {
2954                         DP(NETIF_MSG_LINK, "XGXS\n");
2955                         /* 10G KX4 */
2956                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2957                 } else {
2958                         DP(NETIF_MSG_LINK, "SerDes\n");
2959                         /* 1000M KX */
2960                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2961                 }
2962                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2963
2964                 /* CL73 Autoneg Enabled */
2965                 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2966         } else {
2967                 /* CL73 Autoneg Disabled */
2968                 reg_val = 0;
2969         }
2970         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2971         bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2972 }
2973
2974 /* program SerDes, forced speed */
2975 static void bnx2x_program_serdes(struct bnx2x *bp)
2976 {
2977         u32 reg_val;
2978
2979         /* program duplex, disable autoneg */
2980         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2981         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2982         reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2983                      MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2984         if (bp->req_duplex == DUPLEX_FULL)
2985                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2986         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2987
2988         /* program speed
2989            - needed only if the speed is greater than 1G (2.5G or 10G) */
2990         if (bp->req_line_speed > SPEED_1000) {
2991                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2992                 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2993                 /* clearing the speed value before setting the right speed */
2994                 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2995                 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2996                             MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2997                 if (bp->req_line_speed == SPEED_10000)
2998                         reg_val |=
2999                                 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
3000                 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
3001         }
3002 }
3003
3004 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3005 {
3006         u32 val = 0;
3007
3008         /* configure the 48 bits for BAM AN */
3009         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3010
3011         /* set extended capabilities */
3012         if (bp->advertising & ADVERTISED_2500baseX_Full)
3013                 val |= MDIO_OVER_1G_UP1_2_5G;
3014         if (bp->advertising & ADVERTISED_10000baseT_Full)
3015                 val |= MDIO_OVER_1G_UP1_10G;
3016         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3017
3018         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3019 }
3020
3021 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3022 {
3023         u32 an_adv;
3024
3025         /* for AN, we are always publishing full duplex */
3026         an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3027
3028         /* resolve pause mode and advertisement
3029          * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3030         if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3031                 switch (bp->req_flow_ctrl) {
3032                 case FLOW_CTRL_AUTO:
3033                         if (bp->dev->mtu <= 4500) {
3034                                 an_adv |=
3035                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3036                                 bp->advertising |= (ADVERTISED_Pause |
3037                                                     ADVERTISED_Asym_Pause);
3038                         } else {
3039                                 an_adv |=
3040                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3041                                 bp->advertising |= ADVERTISED_Asym_Pause;
3042                         }
3043                         break;
3044
3045                 case FLOW_CTRL_TX:
3046                         an_adv |=
3047                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3048                         bp->advertising |= ADVERTISED_Asym_Pause;
3049                         break;
3050
3051                 case FLOW_CTRL_RX:
3052                         if (bp->dev->mtu <= 4500) {
3053                                 an_adv |=
3054                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3055                                 bp->advertising |= (ADVERTISED_Pause |
3056                                                     ADVERTISED_Asym_Pause);
3057                         } else {
3058                                 an_adv |=
3059                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3060                                 bp->advertising &= ~(ADVERTISED_Pause |
3061                                                      ADVERTISED_Asym_Pause);
3062                         }
3063                         break;
3064
3065                 case FLOW_CTRL_BOTH:
3066                         if (bp->dev->mtu <= 4500) {
3067                                 an_adv |=
3068                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3069                                 bp->advertising |= (ADVERTISED_Pause |
3070                                                     ADVERTISED_Asym_Pause);
3071                         } else {
3072                                 an_adv |=
3073                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3074                                 bp->advertising |= ADVERTISED_Asym_Pause;
3075                         }
3076                         break;
3077
3078                 case FLOW_CTRL_NONE:
3079                 default:
3080                         an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3081                         bp->advertising &= ~(ADVERTISED_Pause |
3082                                              ADVERTISED_Asym_Pause);
3083                         break;
3084                 }
3085         } else { /* forced mode */
3086                 switch (bp->req_flow_ctrl) {
3087                 case FLOW_CTRL_AUTO:
3088                         DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3089                                            " req_autoneg 0x%x\n",
3090                            bp->req_flow_ctrl, bp->req_autoneg);
3091                         break;
3092
3093                 case FLOW_CTRL_TX:
3094                         an_adv |=
3095                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3096                         bp->advertising |= ADVERTISED_Asym_Pause;
3097                         break;
3098
3099                 case FLOW_CTRL_RX:
3100                 case FLOW_CTRL_BOTH:
3101                         an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3102                         bp->advertising |= (ADVERTISED_Pause |
3103                                             ADVERTISED_Asym_Pause);
3104                         break;
3105
3106                 case FLOW_CTRL_NONE:
3107                 default:
3108                         an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3109                         bp->advertising &= ~(ADVERTISED_Pause |
3110                                              ADVERTISED_Asym_Pause);
3111                         break;
3112                 }
3113         }
3114
3115         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3116         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3117 }
3118
3119 static void bnx2x_restart_autoneg(struct bnx2x *bp)
3120 {
3121         if (bp->autoneg & AUTONEG_CL73) {
3122                 /* enable and restart clause 73 aneg */
3123                 u32 an_ctrl;
3124
3125                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3126                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3127                                   &an_ctrl);
3128                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3129                                    (an_ctrl |
3130                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3131                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3132
3133         } else {
3134                 /* Enable and restart BAM/CL37 aneg */
3135                 u32 mii_control;
3136
3137                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3138                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3139                                   &mii_control);
3140                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3141                                    (mii_control |
3142                                     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3143                                     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3144         }
3145 }
3146
3147 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3148 {
3149         u32 control1;
3150
3151         /* in SGMII mode, the unicore is always slave */
3152         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3153         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3154                           &control1);
3155         control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3156         /* set sgmii mode (and not fiber) */
3157         control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3158                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3159                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3160         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3161                            control1);
3162
3163         /* if forced speed */
3164         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3165                 /* set speed, disable autoneg */
3166                 u32 mii_control;
3167
3168                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3169                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3170                                   &mii_control);
3171                 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3172                                MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3173                                  MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3174
3175                 switch (bp->req_line_speed) {
3176                 case SPEED_100:
3177                         mii_control |=
3178                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3179                         break;
3180                 case SPEED_1000:
3181                         mii_control |=
3182                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3183                         break;
3184                 case SPEED_10:
3185                         /* there is nothing to set for 10M */
3186                         break;
3187                 default:
3188                         /* invalid speed for SGMII */
3189                         DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3190                            bp->req_line_speed);
3191                         break;
3192                 }
3193
3194                 /* setting the full duplex */
3195                 if (bp->req_duplex == DUPLEX_FULL)
3196                         mii_control |=
3197                                 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3198                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3199                                    mii_control);
3200
3201         } else { /* AN mode */
3202                 /* enable and restart AN */
3203                 bnx2x_restart_autoneg(bp);
3204         }
3205 }
3206
3207 static void bnx2x_link_int_enable(struct bnx2x *bp)
3208 {
3209         int port = bp->port;
3210         u32 ext_phy_type;
3211         u32 mask;
3212
3213         /* setting the status to report on link up
3214            for either XGXS or SerDes */
3215         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3216                        (NIG_STATUS_XGXS0_LINK10G |
3217                         NIG_STATUS_XGXS0_LINK_STATUS |
3218                         NIG_STATUS_SERDES0_LINK_STATUS));
3219
3220         if (bp->phy_flags & PHY_XGXS_FLAG) {
3221                 mask = (NIG_MASK_XGXS0_LINK10G |
3222                         NIG_MASK_XGXS0_LINK_STATUS);
3223                 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3224                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3225                 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3226                     (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3227                     (ext_phy_type !=
3228                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3229                         mask |= NIG_MASK_MI_INT;
3230                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
3231                 }
3232
3233         } else { /* SerDes */
3234                 mask = NIG_MASK_SERDES0_LINK_STATUS;
3235                 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3236                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3237                 if ((ext_phy_type !=
3238                                 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3239                     (ext_phy_type !=
3240                                 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3241                         mask |= NIG_MASK_MI_INT;
3242                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
3243                 }
3244         }
3245         bnx2x_bits_en(bp,
3246                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3247                       mask);
3248         DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3249            " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3250            " 10G %x, XGXS_LINK %x\n", port,
3251            (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3252            REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3253            REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3254            REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3255            REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3256            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3257            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3258         );
3259 }
3260
3261 static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3262 {
3263         u32 ext_phy_addr = ((bp->ext_phy_config &
3264                              PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3265                             PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3266         u32 fw_ver1, fw_ver2;
3267
3268         /* Need to wait 200ms after reset */
3269         msleep(200);
3270         /* Boot port from external ROM
3271          * Set ser_boot_ctl bit in the MISC_CTRL1 register
3272          */
3273         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3274                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3275                                 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3276
3277         /* Reset internal microprocessor */
3278         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280                                 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281         /* set micro reset = 0 */
3282         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3283                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3284                                 EXT_PHY_KR_ROM_MICRO_RESET);
3285         /* Reset internal microprocessor */
3286         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3288                                 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3289         /* wait for 100ms for code download via SPI port */
3290         msleep(100);
3291
3292         /* Clear ser_boot_ctl bit */
3293         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3294                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3295                                 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3296         /* Wait 100ms */
3297         msleep(100);
3298
3299         /* Print the PHY FW version */
3300         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3301                                EXT_PHY_KR_PMA_PMD_DEVAD,
3302                                0xca19, &fw_ver1);
3303         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3304                                EXT_PHY_KR_PMA_PMD_DEVAD,
3305                                0xca1a, &fw_ver2);
3306         DP(NETIF_MSG_LINK,
3307            "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3308 }
3309
3310 static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3311 {
3312         u32 ext_phy_addr = ((bp->ext_phy_config &
3313                              PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3314                             PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3315
3316         /* Force KR or KX */
3317         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3318                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3319                                 0x2040);
3320         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3321                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3322                                 0x000b);
3323         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3324                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3325                                 0x0000);
3326         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3327                                 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3328                                 0x0000);
3329 }
3330
3331 static void bnx2x_ext_phy_init(struct bnx2x *bp)
3332 {
3333         u32 ext_phy_type;
3334         u32 ext_phy_addr;
3335         u32 cnt;
3336         u32 ctrl;
3337         u32 val = 0;
3338
3339         if (bp->phy_flags & PHY_XGXS_FLAG) {
3340                 ext_phy_addr = ((bp->ext_phy_config &
3341                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3342                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3343
3344                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3345                 /* Make sure that the soft reset is off (expect for the 8072:
3346                  * due to the lock, it will be done inside the specific
3347                  * handling)
3348                  */
3349                 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3350                     (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3351                    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3352                     (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3353                         /* Wait for soft reset to get cleared upto 1 sec */
3354                         for (cnt = 0; cnt < 1000; cnt++) {
3355                                 bnx2x_mdio45_read(bp, ext_phy_addr,
3356                                                   EXT_PHY_OPT_PMA_PMD_DEVAD,
3357                                                   EXT_PHY_OPT_CNTL, &ctrl);
3358                                 if (!(ctrl & (1<<15)))
3359                                         break;
3360                                 msleep(1);
3361                         }
3362                         DP(NETIF_MSG_LINK,
3363                            "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3364                 }
3365
3366                 switch (ext_phy_type) {
3367                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3368                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
3369                         break;
3370
3371                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3372                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
3373
3374                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3375                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3376                                             EXT_PHY_OPT_PMD_MISC_CNTL,
3377                                             0x8288);
3378                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3380                                             EXT_PHY_OPT_PHY_IDENTIFIER,
3381                                             0x7fbf);
3382                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3383                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3384                                             EXT_PHY_OPT_CMU_PLL_BYPASS,
3385                                             0x0100);
3386                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3387                                             EXT_PHY_OPT_WIS_DEVAD,
3388                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
3389                         break;
3390
3391                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3392                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
3393
3394                         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3395                                 /* Force speed */
3396                                 if (bp->req_line_speed == SPEED_10000) {
3397                                         DP(NETIF_MSG_LINK,
3398                                            "XGXS 8706 force 10Gbps\n");
3399                                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3400                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3401                                                 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3402                                                 0x400);
3403                                 } else {
3404                                         /* Force 1Gbps */
3405                                         DP(NETIF_MSG_LINK,
3406                                            "XGXS 8706 force 1Gbps\n");
3407
3408                                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3409                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3410                                                 EXT_PHY_OPT_CNTL,
3411                                                 0x0040);
3412
3413                                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415                                                 EXT_PHY_OPT_CNTL2,
3416                                                 0x000D);
3417                                 }
3418
3419                                 /* Enable LASI */
3420                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421                                                     EXT_PHY_OPT_PMA_PMD_DEVAD,
3422                                                     EXT_PHY_OPT_LASI_CNTL,
3423                                                     0x1);
3424                         } else {
3425                                 /* AUTONEG */
3426                                 /* Allow CL37 through CL73 */
3427                                 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3428                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3429                                                     EXT_PHY_AUTO_NEG_DEVAD,
3430                                                     EXT_PHY_OPT_AN_CL37_CL73,
3431                                                     0x040c);
3432
3433                                 /* Enable Full-Duplex advertisment on CL37 */
3434                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3435                                                     EXT_PHY_AUTO_NEG_DEVAD,
3436                                                     EXT_PHY_OPT_AN_CL37_FD,
3437                                                     0x0020);
3438                                 /* Enable CL37 AN */
3439                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3440                                                     EXT_PHY_AUTO_NEG_DEVAD,
3441                                                     EXT_PHY_OPT_AN_CL37_AN,
3442                                                     0x1000);
3443                                 /* Advertise 10G/1G support */
3444                                 if (bp->advertising &
3445                                     ADVERTISED_1000baseT_Full)
3446                                         val = (1<<5);
3447                                 if (bp->advertising &
3448                                     ADVERTISED_10000baseT_Full)
3449                                         val |= (1<<7);
3450
3451                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3452                                                     EXT_PHY_AUTO_NEG_DEVAD,
3453                                                     EXT_PHY_OPT_AN_ADV, val);
3454                                 /* Enable LASI */
3455                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3456                                                     EXT_PHY_OPT_PMA_PMD_DEVAD,
3457                                                     EXT_PHY_OPT_LASI_CNTL,
3458                                                     0x1);
3459
3460                                 /* Enable clause 73 AN */
3461                                 bnx2x_mdio45_write(bp, ext_phy_addr,
3462                                                    EXT_PHY_AUTO_NEG_DEVAD,
3463                                                    EXT_PHY_OPT_CNTL,
3464                                                    0x1200);
3465                         }
3466                         break;
3467
3468                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3469                         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3470                         /* Wait for soft reset to get cleared upto 1 sec */
3471                         for (cnt = 0; cnt < 1000; cnt++) {
3472                                 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3473                                                 ext_phy_addr,
3474                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3475                                                 EXT_PHY_OPT_CNTL, &ctrl);
3476                                 if (!(ctrl & (1<<15)))
3477                                         break;
3478                                 msleep(1);
3479                         }
3480                         DP(NETIF_MSG_LINK,
3481                            "8072 control reg 0x%x (after %d ms)\n",
3482                            ctrl, cnt);
3483
3484                         bnx2x_bcm8072_external_rom_boot(bp);
3485                         DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3486
3487                         /* enable LASI */
3488                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3489                                                 ext_phy_addr,
3490                                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3491                                                 0x9000, 0x0400);
3492                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3493                                                 ext_phy_addr,
3494                                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3495                                                 EXT_PHY_KR_LASI_CNTL, 0x0004);
3496
3497                         /* If this is forced speed, set to KR or KX
3498                          * (all other are not supported)
3499                          */
3500                         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3501                                 if (bp->req_line_speed == SPEED_10000) {
3502                                         bnx2x_bcm8072_force_10G(bp);
3503                                         DP(NETIF_MSG_LINK,
3504                                            "Forced speed 10G on 8072\n");
3505                                         /* unlock */
3506                                         bnx2x_hw_unlock(bp,
3507                                                 HW_LOCK_RESOURCE_8072_MDIO);
3508                                         break;
3509                                 } else
3510                                         val = (1<<5);
3511                         } else {
3512
3513                                 /* Advertise 10G/1G support */
3514                                 if (bp->advertising &
3515                                                 ADVERTISED_1000baseT_Full)
3516                                         val = (1<<5);
3517                                 if (bp->advertising &
3518                                                 ADVERTISED_10000baseT_Full)
3519                                         val |= (1<<7);
3520                         }
3521                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3522                                         ext_phy_addr,
3523                                         EXT_PHY_KR_AUTO_NEG_DEVAD,
3524                                         0x11, val);
3525                         /* Add support for CL37 ( passive mode ) I */
3526                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3527                                                 ext_phy_addr,
3528                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
3529                                                 0x8370, 0x040c);
3530                         /* Add support for CL37 ( passive mode ) II */
3531                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3532                                                 ext_phy_addr,
3533                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
3534                                                 0xffe4, 0x20);
3535                         /* Add support for CL37 ( passive mode ) III */
3536                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3537                                                 ext_phy_addr,
3538                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
3539                                                 0xffe0, 0x1000);
3540                         /* Restart autoneg */
3541                         msleep(500);
3542                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3543                                         ext_phy_addr,
3544                                         EXT_PHY_KR_AUTO_NEG_DEVAD,
3545                                         EXT_PHY_KR_CTRL, 0x1200);
3546                         DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3547                            "1G %ssupported  10G %ssupported\n",
3548                            (val & (1<<5)) ? "" : "not ",
3549                            (val & (1<<7)) ? "" : "not ");
3550
3551                         /* unlock */
3552                         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3553                         break;
3554
3555                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3556                         DP(NETIF_MSG_LINK,
3557                            "Setting the SFX7101 LASI indication\n");
3558                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3559                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3560                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
3561                         DP(NETIF_MSG_LINK,
3562                            "Setting the SFX7101 LED to blink on traffic\n");
3563                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3564                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3565                                             0xC007, (1<<3));
3566
3567                         /* read modify write pause advertizing */
3568                         bnx2x_mdio45_read(bp, ext_phy_addr,
3569                                           EXT_PHY_KR_AUTO_NEG_DEVAD,
3570                                           EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3571                         val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3572                         /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3573                         if (bp->advertising & ADVERTISED_Pause)
3574                                 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3575
3576                         if (bp->advertising & ADVERTISED_Asym_Pause) {
3577                                 val |=
3578                                  EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3579                         }
3580                         DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3581                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3582                                             EXT_PHY_KR_AUTO_NEG_DEVAD,
3583                                             EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3584                         /* Restart autoneg */
3585                         bnx2x_mdio45_read(bp, ext_phy_addr,
3586                                           EXT_PHY_KR_AUTO_NEG_DEVAD,
3587                                           EXT_PHY_KR_CTRL, &val);
3588                         val |= 0x200;
3589                         bnx2x_mdio45_write(bp, ext_phy_addr,
3590                                             EXT_PHY_KR_AUTO_NEG_DEVAD,
3591                                             EXT_PHY_KR_CTRL, val);
3592                         break;
3593
3594                 default:
3595                         BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3596                                   bp->ext_phy_config);
3597                         break;
3598                 }
3599
3600         } else { /* SerDes */
3601 /*              ext_phy_addr = ((bp->ext_phy_config &
3602                                  PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3603                                 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3604 */
3605                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3606                 switch (ext_phy_type) {
3607                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3608                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
3609                         break;
3610
3611                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3612                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
3613                         break;
3614
3615                 default:
3616                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3617                            bp->ext_phy_config);
3618                         break;
3619                 }
3620         }
3621 }
3622
3623 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3624 {
3625         u32 ext_phy_type;
3626         u32 ext_phy_addr = ((bp->ext_phy_config &
3627                              PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3628                             PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3629         u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3630
3631         /* The PHY reset is controled by GPIO 1
3632          * Give it 1ms of reset pulse
3633          */
3634         if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3635             (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3636                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3637                                MISC_REGISTERS_GPIO_OUTPUT_LOW);
3638                 msleep(1);
3639                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3640                                MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3641         }
3642
3643         if (bp->phy_flags & PHY_XGXS_FLAG) {
3644                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3645                 switch (ext_phy_type) {
3646                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3647                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
3648                         break;
3649
3650                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3651                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3652                         DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3653                         bnx2x_mdio45_write(bp, ext_phy_addr,
3654                                            EXT_PHY_OPT_PMA_PMD_DEVAD,
3655                                            EXT_PHY_OPT_CNTL, 0xa040);
3656                         break;
3657
3658                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3659                         DP(NETIF_MSG_LINK, "XGXS 8072\n");
3660                         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3661                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3662                                                 ext_phy_addr,
3663                                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3664                                                 0, 1<<15);
3665                         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3666                         break;
3667
3668                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3669                         DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
3670                         break;
3671
3672                 default:
3673                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3674                            bp->ext_phy_config);
3675                         break;
3676                 }
3677
3678         } else { /* SerDes */
3679                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3680                 switch (ext_phy_type) {
3681                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3682                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
3683                         break;
3684
3685                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3686                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
3687                         break;
3688
3689                 default:
3690                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3691                            bp->ext_phy_config);
3692                         break;
3693                 }
3694         }
3695 }
3696
3697 static void bnx2x_link_initialize(struct bnx2x *bp)
3698 {
3699         int port = bp->port;
3700
3701         /* disable attentions */
3702         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3703                        (NIG_MASK_XGXS0_LINK_STATUS |
3704                         NIG_MASK_XGXS0_LINK10G |
3705                         NIG_MASK_SERDES0_LINK_STATUS |
3706                         NIG_MASK_MI_INT));
3707
3708         /* Activate the external PHY */
3709         bnx2x_ext_phy_reset(bp);
3710
3711         bnx2x_set_aer_mmd(bp);
3712
3713         if (bp->phy_flags & PHY_XGXS_FLAG)
3714                 bnx2x_set_master_ln(bp);
3715
3716         /* reset the SerDes and wait for reset bit return low */
3717         bnx2x_reset_unicore(bp);
3718
3719         bnx2x_set_aer_mmd(bp);
3720
3721         /* setting the masterLn_def again after the reset */
3722         if (bp->phy_flags & PHY_XGXS_FLAG) {
3723                 bnx2x_set_master_ln(bp);
3724                 bnx2x_set_swap_lanes(bp);
3725         }
3726
3727         /* Set Parallel Detect */
3728         if (bp->req_autoneg & AUTONEG_SPEED)
3729                 bnx2x_set_parallel_detection(bp);
3730
3731         if (bp->phy_flags & PHY_XGXS_FLAG) {
3732                 if (bp->req_line_speed &&
3733                     bp->req_line_speed < SPEED_1000) {
3734                         bp->phy_flags |= PHY_SGMII_FLAG;
3735                 } else {
3736                         bp->phy_flags &= ~PHY_SGMII_FLAG;
3737                 }
3738         }
3739
3740         if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3741                 u16 bank, rx_eq;
3742
3743                 rx_eq = ((bp->serdes_config &
3744                           PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3745                          PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3746
3747                 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3748                 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3749                             bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3750                         MDIO_SET_REG_BANK(bp, bank);
3751                         bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3752                                            ((rx_eq &
3753                                 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3754                                 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3755                 }
3756
3757                 /* forced speed requested? */
3758                 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3759                         DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3760
3761                         /* disable autoneg */
3762                         bnx2x_set_autoneg(bp);
3763
3764                         /* program speed and duplex */
3765                         bnx2x_program_serdes(bp);
3766
3767                 } else { /* AN_mode */
3768                         DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3769
3770                         /* AN enabled */
3771                         bnx2x_set_brcm_cl37_advertisment(bp);
3772
3773                         /* program duplex & pause advertisement (for aneg) */
3774                         bnx2x_set_ieee_aneg_advertisment(bp);
3775
3776                         /* enable autoneg */
3777                         bnx2x_set_autoneg(bp);
3778
3779                         /* enable and restart AN */
3780                         bnx2x_restart_autoneg(bp);
3781                 }
3782
3783         } else { /* SGMII mode */
3784                 DP(NETIF_MSG_LINK, "SGMII\n");
3785
3786                 bnx2x_initialize_sgmii_process(bp);
3787         }
3788
3789         /* init ext phy and enable link state int */
3790         bnx2x_ext_phy_init(bp);
3791
3792         /* enable the interrupt */
3793         bnx2x_link_int_enable(bp);
3794 }
3795
3796 static void bnx2x_phy_deassert(struct bnx2x *bp)
3797 {
3798         int port = bp->port;
3799         u32 val;
3800
3801         if (bp->phy_flags & PHY_XGXS_FLAG) {
3802                 DP(NETIF_MSG_LINK, "XGXS\n");
3803                 val = XGXS_RESET_BITS;
3804
3805         } else { /* SerDes */
3806                 DP(NETIF_MSG_LINK, "SerDes\n");
3807                 val = SERDES_RESET_BITS;
3808         }
3809
3810         val = val << (port*16);
3811
3812         /* reset and unreset the SerDes/XGXS */
3813         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3814         msleep(5);
3815         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3816 }
3817
3818 static int bnx2x_phy_init(struct bnx2x *bp)
3819 {
3820         DP(NETIF_MSG_LINK, "started\n");
3821         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3822                 bp->phy_flags |= PHY_EMAC_FLAG;
3823                 bp->link_up = 1;
3824                 bp->line_speed = SPEED_10000;
3825                 bp->duplex = DUPLEX_FULL;
3826                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3827                 bnx2x_emac_enable(bp);
3828                 bnx2x_link_report(bp);
3829                 return 0;
3830
3831         } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3832                 bp->phy_flags |= PHY_BMAC_FLAG;
3833                 bp->link_up = 1;
3834                 bp->line_speed = SPEED_10000;
3835                 bp->duplex = DUPLEX_FULL;
3836                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3837                 bnx2x_bmac_enable(bp, 0);
3838                 bnx2x_link_report(bp);
3839                 return 0;
3840
3841         } else {
3842                 bnx2x_phy_deassert(bp);
3843                 bnx2x_link_initialize(bp);
3844         }
3845
3846         return 0;
3847 }
3848
3849 static void bnx2x_link_reset(struct bnx2x *bp)
3850 {
3851         int port = bp->port;
3852         u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3853
3854         /* update shared memory */
3855         bp->link_status = 0;
3856         bnx2x_update_mng(bp);
3857
3858         /* disable attentions */
3859         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3860                        (NIG_MASK_XGXS0_LINK_STATUS |
3861                         NIG_MASK_XGXS0_LINK10G |
3862                         NIG_MASK_SERDES0_LINK_STATUS |
3863                         NIG_MASK_MI_INT));
3864
3865         /* activate nig drain */
3866         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3867
3868         /* disable nig egress interface */
3869         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3870         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3871
3872         /* Stop BigMac rx */
3873         bnx2x_bmac_rx_disable(bp);
3874
3875         /* disable emac */
3876         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3877
3878         msleep(10);
3879
3880         /* The PHY reset is controled by GPIO 1
3881          * Hold it as output low
3882          */
3883         if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3884             (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3885                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3886                                MISC_REGISTERS_GPIO_OUTPUT_LOW);
3887                 DP(NETIF_MSG_LINK, "reset external PHY\n");
3888         }
3889
3890         /* reset the SerDes/XGXS */
3891         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3892                (0x1ff << (port*16)));
3893
3894         /* reset BigMac */
3895         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3896                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3897
3898         /* disable nig ingress interface */
3899         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3900         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3901
3902         /* set link down */
3903         bp->link_up = 0;
3904 }
3905
3906 #ifdef BNX2X_XGXS_LB
3907 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3908 {
3909         int port = bp->port;
3910
3911         if (is_10g) {
3912                 u32 md_devad;
3913
3914                 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3915
3916                 /* change the uni_phy_addr in the nig */
3917                 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3918                        &md_devad);
3919                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3920
3921                 /* change the aer mmd */
3922                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3923                 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3924
3925                 /* config combo IEEE0 control reg for loopback */
3926                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3927                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3928                                    0x6041);
3929
3930                 /* set aer mmd back */
3931                 bnx2x_set_aer_mmd(bp);
3932
3933                 /* and md_devad */
3934                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3935
3936         } else {
3937                 u32 mii_control;
3938
3939                 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3940
3941                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3942                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3943                                   &mii_control);
3944                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3945                                    (mii_control |
3946                                     MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3947         }
3948 }
3949 #endif
3950
3951 /* end of PHY/MAC */
3952
3953 /* slow path */
3954
3955 /*
3956  * General service functions
3957  */
3958
3959 /* the slow path queue is odd since completions arrive on the fastpath ring */
3960 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3961                          u32 data_hi, u32 data_lo, int common)
3962 {
3963         int port = bp->port;
3964
3965         DP(NETIF_MSG_TIMER,
3966            "spe (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
3967            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3968            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3969            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3970
3971 #ifdef BNX2X_STOP_ON_ERROR
3972         if (unlikely(bp->panic))
3973                 return -EIO;
3974 #endif
3975
3976         spin_lock(&bp->spq_lock);
3977
3978         if (!bp->spq_left) {
3979                 BNX2X_ERR("BUG! SPQ ring full!\n");
3980                 spin_unlock(&bp->spq_lock);
3981                 bnx2x_panic();
3982                 return -EBUSY;
3983         }
3984
3985         /* CID needs port number to be encoded int it */
3986         bp->spq_prod_bd->hdr.conn_and_cmd_data =
3987                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3988                                      HW_CID(bp, cid)));
3989         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3990         if (common)
3991                 bp->spq_prod_bd->hdr.type |=
3992                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3993
3994         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3995         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3996
3997         bp->spq_left--;
3998
3999         if (bp->spq_prod_bd == bp->spq_last_bd) {
4000                 bp->spq_prod_bd = bp->spq;
4001                 bp->spq_prod_idx = 0;
4002                 DP(NETIF_MSG_TIMER, "end of spq\n");
4003
4004         } else {
4005                 bp->spq_prod_bd++;
4006                 bp->spq_prod_idx++;
4007         }
4008
4009         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4010                bp->spq_prod_idx);
4011
4012         spin_unlock(&bp->spq_lock);
4013         return 0;
4014 }
4015
4016 /* acquire split MCP access lock register */
4017 static int bnx2x_lock_alr(struct bnx2x *bp)
4018 {
4019         int rc = 0;
4020         u32 i, j, val;
4021
4022         might_sleep();
4023         i = 100;
4024         for (j = 0; j < i*10; j++) {
4025                 val = (1UL << 31);
4026                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4027                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4028                 if (val & (1L << 31))
4029                         break;
4030
4031                 msleep(5);
4032         }
4033
4034         if (!(val & (1L << 31))) {
4035                 BNX2X_ERR("Cannot acquire nvram interface\n");
4036
4037                 rc = -EBUSY;
4038         }
4039
4040         return rc;
4041 }
4042
4043 /* Release split MCP access lock register */
4044 static void bnx2x_unlock_alr(struct bnx2x *bp)
4045 {
4046         u32 val = 0;
4047
4048         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4049 }
4050
4051 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4052 {
4053         struct host_def_status_block *def_sb = bp->def_status_blk;
4054         u16 rc = 0;
4055
4056         barrier(); /* status block is written to by the chip */
4057
4058         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4059                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4060                 rc |= 1;
4061         }
4062         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4063                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4064                 rc |= 2;
4065         }
4066         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4067                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4068                 rc |= 4;
4069         }
4070         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4071                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4072                 rc |= 8;
4073         }
4074         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4075                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4076                 rc |= 16;
4077         }
4078         return rc;
4079 }
4080
4081 /*
4082  * slow path service functions
4083  */
4084
4085 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4086 {
4087         int port = bp->port;
4088         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4089         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4090                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4091         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4092                                        NIG_REG_MASK_INTERRUPT_PORT0;
4093
4094         if (~bp->aeu_mask & (asserted & 0xff))
4095                 BNX2X_ERR("IGU ERROR\n");
4096         if (bp->attn_state & asserted)
4097                 BNX2X_ERR("IGU ERROR\n");
4098
4099         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
4100            bp->aeu_mask, asserted);
4101         bp->aeu_mask &= ~(asserted & 0xff);
4102         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4103
4104         REG_WR(bp, aeu_addr, bp->aeu_mask);
4105
4106         bp->attn_state |= asserted;
4107
4108         if (asserted & ATTN_HARD_WIRED_MASK) {
4109                 if (asserted & ATTN_NIG_FOR_FUNC) {
4110
4111                         /* save nig interrupt mask */
4112                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
4113                         REG_WR(bp, nig_int_mask_addr, 0);
4114
4115                         bnx2x_link_update(bp);
4116
4117                         /* handle unicore attn? */
4118                 }
4119                 if (asserted & ATTN_SW_TIMER_4_FUNC)
4120                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4121
4122                 if (asserted & GPIO_2_FUNC)
4123                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4124
4125                 if (asserted & GPIO_3_FUNC)
4126                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4127
4128                 if (asserted & GPIO_4_FUNC)
4129                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4130
4131                 if (port == 0) {
4132                         if (asserted & ATTN_GENERAL_ATTN_1) {
4133                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4134                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4135                         }
4136                         if (asserted & ATTN_GENERAL_ATTN_2) {
4137                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4138                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4139                         }
4140                         if (asserted & ATTN_GENERAL_ATTN_3) {
4141                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4142                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4143                         }
4144                 } else {
4145                         if (asserted & ATTN_GENERAL_ATTN_4) {
4146                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4147                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4148                         }
4149                         if (asserted & ATTN_GENERAL_ATTN_5) {
4150                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4151                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4152                         }
4153                         if (asserted & ATTN_GENERAL_ATTN_6) {
4154                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4155                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4156                         }
4157                 }
4158
4159         } /* if hardwired */
4160
4161         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4162            asserted, BAR_IGU_INTMEM + igu_addr);
4163         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4164
4165         /* now set back the mask */
4166         if (asserted & ATTN_NIG_FOR_FUNC)
4167                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
4168 }
4169
4170 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4171 {
4172         int port = bp->port;
4173         int reg_offset;
4174         u32 val;
4175
4176         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4177
4178                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4179                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4180
4181                 val = REG_RD(bp, reg_offset);
4182                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4183                 REG_WR(bp, reg_offset, val);
4184
4185                 BNX2X_ERR("SPIO5 hw attention\n");
4186
4187                 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4188                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4189                         /* Fan failure attention */
4190
4191                         /* The PHY reset is controled by GPIO 1 */
4192                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4193                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
4194                         /* Low power mode is controled by GPIO 2 */
4195                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4196                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
4197                         /* mark the failure */
4198                         bp->ext_phy_config &=
4199                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4200                         bp->ext_phy_config |=
4201                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4202                         SHMEM_WR(bp,
4203                                  dev_info.port_hw_config[port].
4204                                                         external_phy_config,
4205                                  bp->ext_phy_config);
4206                         /* log the failure */
4207                         printk(KERN_ERR PFX "Fan Failure on Network"
4208                                " Controller %s has caused the driver to"
4209                                " shutdown the card to prevent permanent"
4210                                " damage.  Please contact Dell Support for"
4211                                " assistance\n", bp->dev->name);
4212                         break;
4213
4214                 default:
4215                         break;
4216                 }
4217         }
4218 }
4219
4220 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4221 {
4222         u32 val;
4223
4224         if (attn & BNX2X_DOORQ_ASSERT) {
4225
4226                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4227                 BNX2X_ERR("DB hw attention 0x%x\n", val);
4228                 /* DORQ discard attention */
4229                 if (val & 0x2)
4230                         BNX2X_ERR("FATAL error from DORQ\n");
4231         }
4232 }
4233
4234 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235 {
4236         u32 val;
4237
4238         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4239
4240                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4241                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4242                 /* CFC error attention */
4243                 if (val & 0x2)
4244                         BNX2X_ERR("FATAL error from CFC\n");
4245         }
4246
4247         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4248
4249                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4250                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4251                 /* RQ_USDMDP_FIFO_OVERFLOW */
4252                 if (val & 0x18000)
4253                         BNX2X_ERR("FATAL error from PXP\n");
4254         }
4255 }
4256
4257 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4258 {
4259         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4260
4261                 if (attn & BNX2X_MC_ASSERT_BITS) {
4262
4263                         BNX2X_ERR("MC assert!\n");
4264                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4265                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4266                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4267                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4268                         bnx2x_panic();
4269
4270                 } else if (attn & BNX2X_MCP_ASSERT) {
4271
4272                         BNX2X_ERR("MCP assert!\n");
4273                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4274                         bnx2x_mc_assert(bp);
4275
4276                 } else
4277                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4278         }
4279
4280         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4281
4282                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4283                 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
4284         }
4285 }
4286
4287 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4288 {
4289         struct attn_route attn;
4290         struct attn_route group_mask;
4291         int port = bp->port;
4292         int index;
4293         u32 reg_addr;
4294         u32 val;
4295
4296         /* need to take HW lock because MCP or other port might also
4297            try to handle this event */
4298         bnx2x_lock_alr(bp);
4299
4300         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4301         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4302         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4303         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4304         DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4305
4306         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4307                 if (deasserted & (1 << index)) {
4308                         group_mask = bp->attn_group[index];
4309
4310                         DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4311                            (unsigned long long)group_mask.sig[0]);
4312
4313                         bnx2x_attn_int_deasserted3(bp,
4314                                         attn.sig[3] & group_mask.sig[3]);
4315                         bnx2x_attn_int_deasserted1(bp,
4316                                         attn.sig[1] & group_mask.sig[1]);
4317                         bnx2x_attn_int_deasserted2(bp,
4318                                         attn.sig[2] & group_mask.sig[2]);
4319                         bnx2x_attn_int_deasserted0(bp,
4320                                         attn.sig[0] & group_mask.sig[0]);
4321
4322                         if ((attn.sig[0] & group_mask.sig[0] &
4323                                                 HW_INTERRUT_ASSERT_SET_0) ||
4324                             (attn.sig[1] & group_mask.sig[1] &
4325                                                 HW_INTERRUT_ASSERT_SET_1) ||
4326                             (attn.sig[2] & group_mask.sig[2] &
4327                                                 HW_INTERRUT_ASSERT_SET_2))
4328                                 BNX2X_ERR("FATAL HW block attention"
4329                                           "  set0 0x%x  set1 0x%x"
4330                                           "  set2 0x%x\n",
4331                                           (attn.sig[0] & group_mask.sig[0] &
4332                                            HW_INTERRUT_ASSERT_SET_0),
4333                                           (attn.sig[1] & group_mask.sig[1] &
4334                                            HW_INTERRUT_ASSERT_SET_1),
4335                                           (attn.sig[2] & group_mask.sig[2] &
4336                                            HW_INTERRUT_ASSERT_SET_2));
4337
4338                         if ((attn.sig[0] & group_mask.sig[0] &
4339                                                 HW_PRTY_ASSERT_SET_0) ||
4340                             (attn.sig[1] & group_mask.sig[1] &
4341                                                 HW_PRTY_ASSERT_SET_1) ||
4342                             (attn.sig[2] & group_mask.sig[2] &
4343                                                 HW_PRTY_ASSERT_SET_2))
4344                                BNX2X_ERR("FATAL HW block parity attention\n");
4345                 }
4346         }
4347
4348         bnx2x_unlock_alr(bp);
4349
4350         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4351
4352         val = ~deasserted;
4353 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4354            val, BAR_IGU_INTMEM + reg_addr); */
4355         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4356
4357         if (bp->aeu_mask & (deasserted & 0xff))
4358                 BNX2X_ERR("IGU BUG\n");
4359         if (~bp->attn_state & deasserted)
4360                 BNX2X_ERR("IGU BUG\n");
4361
4362         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4363                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
4364
4365         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4366         bp->aeu_mask |= (deasserted & 0xff);
4367
4368         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4369         REG_WR(bp, reg_addr, bp->aeu_mask);
4370
4371         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4372         bp->attn_state &= ~deasserted;
4373         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4374 }
4375
4376 static void bnx2x_attn_int(struct bnx2x *bp)
4377 {
4378         /* read local copy of bits */
4379         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4380         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4381         u32 attn_state = bp->attn_state;
4382
4383         /* look for changed bits */
4384         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
4385         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
4386
4387         DP(NETIF_MSG_HW,
4388            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
4389            attn_bits, attn_ack, asserted, deasserted);
4390
4391         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4392                 BNX2X_ERR("bad attention state\n");
4393
4394         /* handle bits that were raised */
4395         if (asserted)
4396                 bnx2x_attn_int_asserted(bp, asserted);
4397
4398         if (deasserted)
4399                 bnx2x_attn_int_deasserted(bp, deasserted);
4400 }
4401
4402 static void bnx2x_sp_task(struct work_struct *work)
4403 {
4404         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4405         u16 status;
4406
4407         /* Return here if interrupt is disabled */
4408         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4409                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
4410                 return;
4411         }
4412
4413         status = bnx2x_update_dsb_idx(bp);
4414         if (status == 0)
4415                 BNX2X_ERR("spurious slowpath interrupt!\n");
4416
4417         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4418
4419         /* HW attentions */
4420         if (status & 0x1)
4421                 bnx2x_attn_int(bp);
4422
4423         /* CStorm events: query_stats, port delete ramrod */
4424         if (status & 0x2)
4425                 bp->stat_pending = 0;
4426
4427         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4428                      IGU_INT_NOP, 1);
4429         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4430                      IGU_INT_NOP, 1);
4431         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4432                      IGU_INT_NOP, 1);
4433         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4434                      IGU_INT_NOP, 1);
4435         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4436                      IGU_INT_ENABLE, 1);
4437
4438 }
4439
4440 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4441 {
4442         struct net_device *dev = dev_instance;
4443         struct bnx2x *bp = netdev_priv(dev);
4444
4445         /* Return here if interrupt is disabled */
4446         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4447                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
4448                 return IRQ_HANDLED;
4449         }
4450
4451         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4452
4453 #ifdef BNX2X_STOP_ON_ERROR
4454         if (unlikely(bp->panic))
4455                 return IRQ_HANDLED;
4456 #endif
4457
4458         schedule_work(&bp->sp_task);
4459
4460         return IRQ_HANDLED;
4461 }
4462
4463 /* end of slow path */
4464
4465 /* Statistics */
4466
4467 /****************************************************************************
4468 * Macros
4469 ****************************************************************************/
4470
4471 #define UPDATE_STAT(s, t) \
4472         do { \
4473                 estats->t += new->s - old->s; \
4474                 old->s = new->s; \
4475         } while (0)
4476
4477 /* sum[hi:lo] += add[hi:lo] */
4478 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4479         do { \
4480                 s_lo += a_lo; \
4481                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4482         } while (0)
4483
4484 /* difference = minuend - subtrahend */
4485 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4486         do { \
4487                 if (m_lo < s_lo) {      /* underflow */ \
4488                         d_hi = m_hi - s_hi; \
4489                         if (d_hi > 0) { /* we can 'loan' 1 */ \
4490                                 d_hi--; \
4491                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4492                         } else {        /* m_hi <= s_hi */ \
4493                                 d_hi = 0; \
4494                                 d_lo = 0; \
4495                         } \
4496                 } else {                /* m_lo >= s_lo */ \
4497                         if (m_hi < s_hi) { \
4498                             d_hi = 0; \
4499                             d_lo = 0; \
4500                         } else {        /* m_hi >= s_hi */ \
4501                             d_hi = m_hi - s_hi; \
4502                             d_lo = m_lo - s_lo; \
4503                         } \
4504                 } \
4505         } while (0)
4506
4507 /* minuend -= subtrahend */
4508 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4509         do { \
4510                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4511         } while (0)
4512
4513 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4514         do { \
4515                 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4516                         diff.lo, new->s_lo, old->s_lo); \
4517                 old->s_hi = new->s_hi; \
4518                 old->s_lo = new->s_lo; \
4519                 ADD_64(estats->t_hi, diff.hi, \
4520                        estats->t_lo, diff.lo); \
4521         } while (0)
4522
4523 /* sum[hi:lo] += add */
4524 #define ADD_EXTEND_64(s_hi, s_lo, a) \
4525         do { \
4526                 s_lo += a; \
4527                 s_hi += (s_lo < a) ? 1 : 0; \
4528         } while (0)
4529
4530 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4531         do { \
4532                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4533         } while (0)
4534
4535 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4536         do { \
4537                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4538                 old_tclient->s = le32_to_cpu(tclient->s); \
4539                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4540         } while (0)
4541
4542 /*
4543  * General service functions
4544  */
4545
4546 static inline long bnx2x_hilo(u32 *hiref)
4547 {
4548         u32 lo = *(hiref + 1);
4549 #if (BITS_PER_LONG == 64)
4550         u32 hi = *hiref;
4551
4552         return HILO_U64(hi, lo);
4553 #else
4554         return lo;
4555 #endif
4556 }
4557
4558 /*
4559  * Init service functions
4560  */
4561
4562 static void bnx2x_init_mac_stats(struct bnx2x *bp)
4563 {
4564         struct dmae_command *dmae;
4565         int port = bp->port;
4566         int loader_idx = port * 8;
4567         u32 opcode;
4568         u32 mac_addr;
4569
4570         bp->executer_idx = 0;
4571         if (bp->fw_mb) {
4572                 /* MCP */
4573                 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4574                           DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4575 #ifdef __BIG_ENDIAN
4576                           DMAE_CMD_ENDIANITY_B_DW_SWAP |
4577 #else
4578                           DMAE_CMD_ENDIANITY_DW_SWAP |
4579 #endif
4580                           (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4581
4582                 if (bp->link_up)
4583                         opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4584
4585                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4586                 dmae->opcode = opcode;
4587                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4588                                            sizeof(u32));
4589                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4590                                            sizeof(u32));
4591                 dmae->dst_addr_lo = bp->fw_mb >> 2;
4592                 dmae->dst_addr_hi = 0;
4593                 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4594                              sizeof(u32)) >> 2;
4595                 if (bp->link_up) {
4596                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4597                         dmae->comp_addr_hi = 0;
4598                         dmae->comp_val = 1;
4599                 } else {
4600                         dmae->comp_addr_lo = 0;
4601                         dmae->comp_addr_hi = 0;
4602                         dmae->comp_val = 0;
4603                 }
4604         }
4605
4606         if (!bp->link_up) {
4607                 /* no need to collect statistics in link down */
4608                 return;
4609         }
4610
4611         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4612                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4613                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4614 #ifdef __BIG_ENDIAN
4615                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4616 #else
4617                   DMAE_CMD_ENDIANITY_DW_SWAP |
4618 #endif
4619                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4620
4621         if (bp->phy_flags & PHY_BMAC_FLAG) {
4622
4623                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4624                                    NIG_REG_INGRESS_BMAC0_MEM);
4625
4626                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4627                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4628                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4629                 dmae->opcode = opcode;
4630                 dmae->src_addr_lo = (mac_addr +
4631                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4632                 dmae->src_addr_hi = 0;
4633                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4634                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4635                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4636                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4637                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4638                 dmae->comp_addr_hi = 0;
4639                 dmae->comp_val = 1;
4640
4641                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4642                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4643                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4644                 dmae->opcode = opcode;
4645                 dmae->src_addr_lo = (mac_addr +
4646                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4647                 dmae->src_addr_hi = 0;
4648                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4649                                         offsetof(struct bmac_stats, rx_gr64));
4650                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4651                                         offsetof(struct bmac_stats, rx_gr64));
4652                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4653                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4654                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4655                 dmae->comp_addr_hi = 0;
4656                 dmae->comp_val = 1;
4657
4658         } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4659
4660                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4661
4662                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4663                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4664                 dmae->opcode = opcode;
4665                 dmae->src_addr_lo = (mac_addr +
4666                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4667                 dmae->src_addr_hi = 0;
4668                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4669                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4670                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4671                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4672                 dmae->comp_addr_hi = 0;
4673                 dmae->comp_val = 1;
4674
4675                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4676                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4677                 dmae->opcode = opcode;
4678                 dmae->src_addr_lo = (mac_addr +
4679                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4680                 dmae->src_addr_hi = 0;
4681                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4682                                            offsetof(struct emac_stats,
4683                                                     rx_falsecarriererrors));
4684                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4685                                            offsetof(struct emac_stats,
4686                                                     rx_falsecarriererrors));
4687                 dmae->len = 1;
4688                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4689                 dmae->comp_addr_hi = 0;
4690                 dmae->comp_val = 1;
4691
4692                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4693                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4694                 dmae->opcode = opcode;
4695                 dmae->src_addr_lo = (mac_addr +
4696                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4697                 dmae->src_addr_hi = 0;
4698                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4699                                            offsetof(struct emac_stats,
4700                                                     tx_ifhcoutoctets));
4701                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4702                                            offsetof(struct emac_stats,
4703                                                     tx_ifhcoutoctets));
4704                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4705                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4706                 dmae->comp_addr_hi = 0;
4707                 dmae->comp_val = 1;
4708         }
4709
4710         /* NIG */
4711         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4712         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4713                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4714                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4715 #ifdef __BIG_ENDIAN
4716                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4717 #else
4718                         DMAE_CMD_ENDIANITY_DW_SWAP |
4719 #endif
4720                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4721         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4722                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4723         dmae->src_addr_hi = 0;
4724         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4725         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4726         dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4727         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4728                                     offsetof(struct nig_stats, done));
4729         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4730                                     offsetof(struct nig_stats, done));
4731         dmae->comp_val = 0xffffffff;
4732 }
4733
4734 static void bnx2x_init_stats(struct bnx2x *bp)
4735 {
4736         int port = bp->port;
4737
4738         bp->stats_state = STATS_STATE_DISABLE;
4739         bp->executer_idx = 0;
4740
4741         bp->old_brb_discard = REG_RD(bp,
4742                                      NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4743
4744         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4745         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4746         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4747
4748         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4749         REG_WR(bp, BAR_XSTRORM_INTMEM +
4750                XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4751
4752         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4753         REG_WR(bp, BAR_TSTRORM_INTMEM +
4754                TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4755
4756         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4757         REG_WR(bp, BAR_CSTRORM_INTMEM +
4758                CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4759
4760         REG_WR(bp, BAR_XSTRORM_INTMEM +
4761                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4762                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4763         REG_WR(bp, BAR_XSTRORM_INTMEM +
4764                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4765                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4766
4767         REG_WR(bp, BAR_TSTRORM_INTMEM +
4768                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4769                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4770         REG_WR(bp, BAR_TSTRORM_INTMEM +
4771                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4772                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4773 }
4774
4775 static void bnx2x_stop_stats(struct bnx2x *bp)
4776 {
4777         might_sleep();
4778         if (bp->stats_state != STATS_STATE_DISABLE) {
4779                 int timeout = 10;
4780
4781                 bp->stats_state = STATS_STATE_STOP;
4782                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4783
4784                 while (bp->stats_state != STATS_STATE_DISABLE) {
4785                         if (!timeout) {
4786                                 BNX2X_ERR("timeout waiting for stats stop\n");
4787                                 break;
4788                         }
4789                         timeout--;
4790                         msleep(100);
4791                 }
4792         }
4793         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4794 }
4795
4796 /*
4797  * Statistics service functions
4798  */
4799
4800 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4801 {
4802         struct regp diff;
4803         struct regp sum;
4804         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4805         struct bmac_stats *old = &bp->old_bmac;
4806         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4807
4808         sum.hi = 0;
4809         sum.lo = 0;
4810
4811         UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4812                       tx_gtbyt.lo, total_bytes_transmitted_lo);
4813
4814         UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4815                       tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4816         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4817
4818         UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4819                       tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4820         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4821
4822         UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4823                       tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4824         SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4825                estats->total_unicast_packets_transmitted_lo, sum.lo);
4826
4827         UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4828         UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4829         UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4830         UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4831         UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4832         UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4833         UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4834         UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4835         UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4836         UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4837         UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4838
4839         UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4840         UPDATE_STAT(rx_grund.lo, runt_packets_received);
4841         UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4842         UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4843         UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4844         /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4845         UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4846         UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4847
4848         UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4849                       rx_grerb.lo, stat_IfHCInBadOctets_lo);
4850         UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4851                       tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4852         UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4853         /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4854         estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4855 }
4856
4857 static void bnx2x_update_emac_stats(struct bnx2x *bp)
4858 {
4859         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4860         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4861
4862         UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4863                                              total_bytes_transmitted_lo);
4864         UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4865                                         total_unicast_packets_transmitted_hi,
4866                                         total_unicast_packets_transmitted_lo);
4867         UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4868                                       total_multicast_packets_transmitted_hi,
4869                                       total_multicast_packets_transmitted_lo);
4870         UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4871                                       total_broadcast_packets_transmitted_hi,
4872                                       total_broadcast_packets_transmitted_lo);
4873
4874         estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4875         estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4876         estats->single_collision_transmit_frames +=
4877                                 new->tx_dot3statssinglecollisionframes;
4878         estats->multiple_collision_transmit_frames +=
4879                                 new->tx_dot3statsmultiplecollisionframes;
4880         estats->late_collision_frames += new->tx_dot3statslatecollisions;
4881         estats->excessive_collision_frames +=
4882                                 new->tx_dot3statsexcessivecollisions;
4883         estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4884         estats->frames_transmitted_65_127_bytes +=
4885                                 new->tx_etherstatspkts65octetsto127octets;
4886         estats->frames_transmitted_128_255_bytes +=
4887                                 new->tx_etherstatspkts128octetsto255octets;
4888         estats->frames_transmitted_256_511_bytes +=
4889                                 new->tx_etherstatspkts256octetsto511octets;
4890         estats->frames_transmitted_512_1023_bytes +=
4891                                 new->tx_etherstatspkts512octetsto1023octets;
4892         estats->frames_transmitted_1024_1522_bytes +=
4893                                 new->tx_etherstatspkts1024octetsto1522octet;
4894         estats->frames_transmitted_1523_9022_bytes +=
4895                                 new->tx_etherstatspktsover1522octets;
4896
4897         estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4898         estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4899         estats->false_carrier_detections += new->rx_falsecarriererrors;
4900         estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4901         estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4902         estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4903         estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4904         estats->control_frames_received += new->rx_maccontrolframesreceived;
4905         estats->error_runt_packets_received += new->rx_etherstatsfragments;
4906         estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4907
4908         UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4909                                                stat_IfHCInBadOctets_lo);
4910         UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4911                                                 stat_IfHCOutBadOctets_lo);
4912         estats->stat_Dot3statsInternalMacTransmitErrors +=
4913                                 new->tx_dot3statsinternalmactransmiterrors;
4914         estats->stat_Dot3StatsCarrierSenseErrors +=
4915                                 new->rx_dot3statscarriersenseerrors;
4916         estats->stat_Dot3StatsDeferredTransmissions +=
4917                                 new->tx_dot3statsdeferredtransmissions;
4918         estats->stat_FlowControlDone += new->tx_flowcontroldone;
4919         estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4920 }
4921
4922 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4923 {
4924         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4925         struct tstorm_common_stats *tstats = &stats->tstorm_common;
4926         struct tstorm_per_client_stats *tclient =
4927                                                 &tstats->client_statistics[0];
4928         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4929         struct xstorm_common_stats *xstats = &stats->xstorm_common;
4930         struct nig_stats *nstats = bnx2x_sp(bp, nig);
4931         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4932         u32 diff;
4933
4934         /* are DMAE stats valid? */
4935         if (nstats->done != 0xffffffff) {
4936                 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4937                 return -1;
4938         }
4939
4940         /* are storm stats valid? */
4941         if (tstats->done.hi != 0xffffffff) {
4942                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4943                 return -2;
4944         }
4945         if (xstats->done.hi != 0xffffffff) {
4946                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4947                 return -3;
4948         }
4949
4950         estats->total_bytes_received_hi =
4951         estats->valid_bytes_received_hi =
4952                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
4953         estats->total_bytes_received_lo =
4954         estats->valid_bytes_received_lo =
4955                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
4956         ADD_64(estats->total_bytes_received_hi,
4957                le32_to_cpu(tclient->rcv_error_bytes.hi),
4958                estats->total_bytes_received_lo,
4959                le32_to_cpu(tclient->rcv_error_bytes.lo));
4960
4961         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4962                                         total_unicast_packets_received_hi,
4963                                         total_unicast_packets_received_lo);
4964         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4965                                         total_multicast_packets_received_hi,
4966                                         total_multicast_packets_received_lo);
4967         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4968                                         total_broadcast_packets_received_hi,
4969                                         total_broadcast_packets_received_lo);
4970
4971         estats->frames_received_64_bytes = MAC_STX_NA;
4972         estats->frames_received_65_127_bytes = MAC_STX_NA;
4973         estats->frames_received_128_255_bytes = MAC_STX_NA;
4974         estats->frames_received_256_511_bytes = MAC_STX_NA;
4975         estats->frames_received_512_1023_bytes = MAC_STX_NA;
4976         estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4977         estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4978
4979         estats->x_total_sent_bytes_hi =
4980                                 le32_to_cpu(xstats->total_sent_bytes.hi);
4981         estats->x_total_sent_bytes_lo =
4982                                 le32_to_cpu(xstats->total_sent_bytes.lo);
4983         estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4984
4985         estats->t_rcv_unicast_bytes_hi =
4986                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4987         estats->t_rcv_unicast_bytes_lo =
4988                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4989         estats->t_rcv_broadcast_bytes_hi =
4990                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4991         estats->t_rcv_broadcast_bytes_lo =
4992                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4993         estats->t_rcv_multicast_bytes_hi =
4994                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4995         estats->t_rcv_multicast_bytes_lo =
4996                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4997         estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4998
4999         estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
5000         estats->packets_too_big_discard =
5001                                 le32_to_cpu(tclient->packets_too_big_discard);
5002         estats->jabber_packets_received = estats->packets_too_big_discard +
5003                                           estats->stat_Dot3statsFramesTooLong;
5004         estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
5005         estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
5006         estats->mac_discard = le32_to_cpu(tclient->mac_discard);
5007         estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
5008         estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
5009         estats->brb_truncate_discard =
5010                                 le32_to_cpu(tstats->brb_truncate_discard);
5011
5012         estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
5013         bp->old_brb_discard = nstats->brb_discard;
5014
5015         estats->brb_packet = nstats->brb_packet;
5016         estats->brb_truncate = nstats->brb_truncate;
5017         estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
5018         estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
5019         estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
5020         estats->mng_discard = nstats->mng_discard;
5021         estats->mng_octet_inp = nstats->mng_octet_inp;
5022         estats->mng_octet_out = nstats->mng_octet_out;
5023         estats->mng_packet_inp = nstats->mng_packet_inp;
5024         estats->mng_packet_out = nstats->mng_packet_out;
5025         estats->pbf_octets = nstats->pbf_octets;
5026         estats->pbf_packet = nstats->pbf_packet;
5027         estats->safc_inp = nstats->safc_inp;
5028
5029         xstats->done.hi = 0;
5030         tstats->done.hi = 0;
5031         nstats->done = 0;
5032
5033         return 0;
5034 }
5035
5036 static void bnx2x_update_net_stats(struct bnx2x *bp)
5037 {
5038         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5039         struct net_device_stats *nstats = &bp->dev->stats;
5040
5041         nstats->rx_packets =
5042                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
5043                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
5044                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
5045
5046         nstats->tx_packets =
5047                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
5048                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
5049                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
5050
5051         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
5052
5053         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
5054
5055         nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
5056         nstats->tx_dropped = 0;
5057
5058         nstats->multicast =
5059                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
5060
5061         nstats->collisions = estats->single_collision_transmit_frames +
5062                              estats->multiple_collision_transmit_frames +
5063                              estats->late_collision_frames +
5064                              estats->excessive_collision_frames;
5065
5066         nstats->rx_length_errors = estats->runt_packets_received +
5067                                    estats->jabber_packets_received;
5068         nstats->rx_over_errors = estats->brb_discard +
5069                                  estats->brb_truncate_discard;
5070         nstats->rx_crc_errors = estats->crc_receive_errors;
5071         nstats->rx_frame_errors = estats->alignment_errors;
5072         nstats->rx_fifo_errors = estats->no_buff_discard;
5073         nstats->rx_missed_errors = estats->xxoverflow_discard;
5074
5075         nstats->rx_errors = nstats->rx_length_errors +
5076                             nstats->rx_over_errors +
5077                             nstats->rx_crc_errors +
5078                             nstats->rx_frame_errors +
5079                             nstats->rx_fifo_errors +
5080                             nstats->rx_missed_errors;
5081
5082         nstats->tx_aborted_errors = estats->late_collision_frames +
5083                                     estats->excessive_collision_frames;
5084         nstats->tx_carrier_errors = estats->false_carrier_detections;
5085         nstats->tx_fifo_errors = 0;
5086         nstats->tx_heartbeat_errors = 0;
5087         nstats->tx_window_errors = 0;
5088
5089         nstats->tx_errors = nstats->tx_aborted_errors +
5090                             nstats->tx_carrier_errors;
5091
5092         estats->mac_stx_start = ++estats->mac_stx_end;
5093 }
5094
5095 static void bnx2x_update_stats(struct bnx2x *bp)
5096 {
5097         int i;
5098
5099         if (!bnx2x_update_storm_stats(bp)) {
5100
5101                 if (bp->phy_flags & PHY_BMAC_FLAG) {
5102                         bnx2x_update_bmac_stats(bp);
5103
5104                 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5105                         bnx2x_update_emac_stats(bp);
5106
5107                 } else { /* unreached */
5108                         BNX2X_ERR("no MAC active\n");
5109                         return;
5110                 }
5111
5112                 bnx2x_update_net_stats(bp);
5113         }
5114
5115         if (bp->msglevel & NETIF_MSG_TIMER) {
5116                 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5117                 struct net_device_stats *nstats = &bp->dev->stats;
5118
5119                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5120                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
5121                                   "  tx pkt (%lx)\n",
5122                        bnx2x_tx_avail(bp->fp),
5123                        *bp->fp->tx_cons_sb, nstats->tx_packets);
5124                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
5125                                   "  rx pkt (%lx)\n",
5126                        (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5127                        *bp->fp->rx_cons_sb, nstats->rx_packets);
5128                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
5129                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5130                        estats->driver_xoff, estats->brb_discard);
5131                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
5132                         "packets_too_big_discard %u  no_buff_discard %u  "
5133                         "mac_discard %u  mac_filter_discard %u  "
5134                         "xxovrflow_discard %u  brb_truncate_discard %u  "
5135                         "ttl0_discard %u\n",
5136                        estats->checksum_discard,
5137                        estats->packets_too_big_discard,
5138                        estats->no_buff_discard, estats->mac_discard,
5139                        estats->mac_filter_discard, estats->xxoverflow_discard,
5140                        estats->brb_truncate_discard, estats->ttl0_discard);
5141
5142                 for_each_queue(bp, i) {
5143                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5144                                bnx2x_fp(bp, i, tx_pkt),
5145                                bnx2x_fp(bp, i, rx_pkt),
5146                                bnx2x_fp(bp, i, rx_calls));
5147                 }
5148         }
5149
5150         if (bp->state != BNX2X_STATE_OPEN) {
5151                 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5152                 return;
5153         }
5154
5155 #ifdef BNX2X_STOP_ON_ERROR
5156         if (unlikely(bp->panic))
5157                 return;
5158 #endif
5159
5160         /* loader */
5161         if (bp->executer_idx) {
5162                 struct dmae_command *dmae = &bp->dmae;
5163                 int port = bp->port;
5164                 int loader_idx = port * 8;
5165
5166                 memset(dmae, 0, sizeof(struct dmae_command));
5167
5168                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5169                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5170                                 DMAE_CMD_DST_RESET |
5171 #ifdef __BIG_ENDIAN
5172                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5173 #else
5174                                 DMAE_CMD_ENDIANITY_DW_SWAP |
5175 #endif
5176                                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5177                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5178                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5179                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5180                                      sizeof(struct dmae_command) *
5181                                      (loader_idx + 1)) >> 2;
5182                 dmae->dst_addr_hi = 0;
5183                 dmae->len = sizeof(struct dmae_command) >> 2;
5184                 dmae->len--;    /* !!! for A0/1 only */
5185                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5186                 dmae->comp_addr_hi = 0;
5187                 dmae->comp_val = 1;
5188
5189                 bnx2x_post_dmae(bp, dmae, loader_idx);
5190         }
5191
5192         if (bp->stats_state != STATS_STATE_ENABLE) {
5193                 bp->stats_state = STATS_STATE_DISABLE;
5194                 return;
5195         }
5196
5197         if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5198                 /* stats ramrod has it's own slot on the spe */
5199                 bp->spq_left++;
5200                 bp->stat_pending = 1;
5201         }
5202 }
5203
5204 static void bnx2x_timer(unsigned long data)
5205 {
5206         struct bnx2x *bp = (struct bnx2x *) data;
5207
5208         if (!netif_running(bp->dev))
5209                 return;
5210
5211         if (atomic_read(&bp->intr_sem) != 0)
5212                 goto timer_restart;
5213
5214         if (poll) {
5215                 struct bnx2x_fastpath *fp = &bp->fp[0];
5216                 int rc;
5217
5218                 bnx2x_tx_int(fp, 1000);
5219                 rc = bnx2x_rx_int(fp, 1000);
5220         }
5221
5222         if (!nomcp) {
5223                 int port = bp->port;
5224                 u32 drv_pulse;
5225                 u32 mcp_pulse;
5226
5227                 ++bp->fw_drv_pulse_wr_seq;
5228                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5229                 /* TBD - add SYSTEM_TIME */
5230                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5231                 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
5232
5233                 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
5234                              MCP_PULSE_SEQ_MASK);
5235                 /* The delta between driver pulse and mcp response
5236                  * should be 1 (before mcp response) or 0 (after mcp response)
5237                  */
5238                 if ((drv_pulse != mcp_pulse) &&
5239                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5240                         /* someone lost a heartbeat... */
5241                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5242                                   drv_pulse, mcp_pulse);
5243                 }
5244         }
5245
5246         if (bp->stats_state == STATS_STATE_DISABLE)
5247                 goto timer_restart;
5248
5249         bnx2x_update_stats(bp);
5250
5251 timer_restart:
5252         mod_timer(&bp->timer, jiffies + bp->current_interval);
5253 }
5254
5255 /* end of Statistics */
5256
5257 /* nic init */
5258
5259 /*
5260  * nic init service functions
5261  */
5262
5263 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5264                           dma_addr_t mapping, int id)
5265 {
5266         int port = bp->port;
5267         u64 section;
5268         int index;
5269
5270         /* USTORM */
5271         section = ((u64)mapping) + offsetof(struct host_status_block,
5272                                             u_status_block);
5273         sb->u_status_block.status_block_id = id;
5274
5275         REG_WR(bp, BAR_USTRORM_INTMEM +
5276                USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5277         REG_WR(bp, BAR_USTRORM_INTMEM +
5278                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5279                U64_HI(section));
5280
5281         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5282                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5283                          USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5284
5285         /* CSTORM */
5286         section = ((u64)mapping) + offsetof(struct host_status_block,
5287                                             c_status_block);
5288         sb->c_status_block.status_block_id = id;
5289
5290         REG_WR(bp, BAR_CSTRORM_INTMEM +
5291                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5292         REG_WR(bp, BAR_CSTRORM_INTMEM +
5293                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5294                U64_HI(section));
5295
5296         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5297                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5298                          CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5299
5300         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5301 }
5302
5303 static void bnx2x_init_def_sb(struct bnx2x *bp,
5304                               struct host_def_status_block *def_sb,
5305                               dma_addr_t mapping, int id)
5306 {
5307         int port = bp->port;
5308         int index, val, reg_offset;
5309         u64 section;
5310
5311         /* ATTN */
5312         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5313                                             atten_status_block);
5314         def_sb->atten_status_block.status_block_id = id;
5315
5316         bp->def_att_idx = 0;
5317         bp->attn_state = 0;
5318
5319         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5320                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5321
5322         for (index = 0; index < 3; index++) {
5323                 bp->attn_group[index].sig[0] = REG_RD(bp,
5324                                                      reg_offset + 0x10*index);
5325                 bp->attn_group[index].sig[1] = REG_RD(bp,
5326                                                reg_offset + 0x4 + 0x10*index);
5327                 bp->attn_group[index].sig[2] = REG_RD(bp,
5328                                                reg_offset + 0x8 + 0x10*index);
5329                 bp->attn_group[index].sig[3] = REG_RD(bp,
5330                                                reg_offset + 0xc + 0x10*index);
5331         }
5332
5333         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5334                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
5335
5336         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5337                              HC_REG_ATTN_MSG0_ADDR_L);
5338
5339         REG_WR(bp, reg_offset, U64_LO(section));
5340         REG_WR(bp, reg_offset + 4, U64_HI(section));
5341
5342         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5343
5344         val = REG_RD(bp, reg_offset);
5345         val |= id;
5346         REG_WR(bp, reg_offset, val);
5347
5348         /* USTORM */
5349         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5350                                             u_def_status_block);
5351         def_sb->u_def_status_block.status_block_id = id;
5352
5353         bp->def_u_idx = 0;
5354
5355         REG_WR(bp, BAR_USTRORM_INTMEM +
5356                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5357         REG_WR(bp, BAR_USTRORM_INTMEM +
5358                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5359                U64_HI(section));
5360         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5361                BNX2X_BTR);
5362
5363         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5364                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5365                          USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5366
5367         /* CSTORM */
5368         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5369                                             c_def_status_block);
5370         def_sb->c_def_status_block.status_block_id = id;
5371
5372         bp->def_c_idx = 0;
5373
5374         REG_WR(bp, BAR_CSTRORM_INTMEM +
5375                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5376         REG_WR(bp, BAR_CSTRORM_INTMEM +
5377                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5378                U64_HI(section));
5379         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5380                BNX2X_BTR);
5381
5382         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5383                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5384                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5385
5386         /* TSTORM */
5387         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5388                                             t_def_status_block);
5389         def_sb->t_def_status_block.status_block_id = id;
5390
5391         bp->def_t_idx = 0;
5392
5393         REG_WR(bp, BAR_TSTRORM_INTMEM +
5394                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5395         REG_WR(bp, BAR_TSTRORM_INTMEM +
5396                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5397                U64_HI(section));
5398         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5399                BNX2X_BTR);
5400
5401         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5402                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5403                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5404
5405         /* XSTORM */
5406         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5407                                             x_def_status_block);
5408         def_sb->x_def_status_block.status_block_id = id;
5409
5410         bp->def_x_idx = 0;
5411
5412         REG_WR(bp, BAR_XSTRORM_INTMEM +
5413                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5414         REG_WR(bp, BAR_XSTRORM_INTMEM +
5415                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5416                U64_HI(section));
5417         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5418                BNX2X_BTR);
5419
5420         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5421                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5422                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5423
5424         bp->stat_pending = 0;
5425
5426         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5427 }
5428
5429 static void bnx2x_update_coalesce(struct bnx2x *bp)
5430 {
5431         int port = bp->port;
5432         int i;
5433
5434         for_each_queue(bp, i) {
5435
5436                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5437                 REG_WR8(bp, BAR_USTRORM_INTMEM +
5438                         USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5439                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
5440                         bp->rx_ticks_int/12);
5441                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5442                          USTORM_SB_HC_DISABLE_OFFSET(port, i,
5443                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
5444                          bp->rx_ticks_int ? 0 : 1);
5445
5446                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5447                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5448                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5449                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
5450                         bp->tx_ticks_int/12);
5451                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5452                          CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5453                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
5454                          bp->tx_ticks_int ? 0 : 1);
5455         }
5456 }
5457
5458 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5459 {
5460         u16 ring_prod;
5461         int i, j;
5462         int port = bp->port;
5463
5464         bp->rx_buf_use_size = bp->dev->mtu;
5465
5466         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5467         bp->rx_buf_size = bp->rx_buf_use_size + 64;
5468
5469         for_each_queue(bp, j) {
5470                 struct bnx2x_fastpath *fp = &bp->fp[j];
5471
5472                 fp->rx_bd_cons = 0;
5473                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;