[BNX2X]: Correct Link management
[linux-2.6.git] / drivers / net / bnx2x.c
1 /* bnx2x.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Eliezer Tamir <eliezert@broadcom.com>
10  * Based on code from Michael Chan's bnx2 driver
11  * UDP CSUM errata workaround by Arik Gendelman
12  * Slowpath rework by Vladislav Zolotarov
13  * Statistics and Link management by Yitchak Gertner
14  *
15  */
16
17 /* define this to make the driver freeze on error
18  * to allow getting debug info
19  * (you will need to reboot afterwards)
20  */
21 /*#define BNX2X_STOP_ON_ERROR*/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h>  /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47         #include <linux/if_vlan.h>
48         #define BCM_VLAN 1
49 #endif
50 #include <net/ip.h>
51 #include <net/tcp.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
58 #include <linux/io.h>
59
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
63 #include "bnx2x.h"
64 #include "bnx2x_init.h"
65
66 #define DRV_MODULE_VERSION      "0.40.15"
67 #define DRV_MODULE_RELDATE      "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER            0x040200
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT              (5*HZ)
72
73 static char version[] __devinitdata =
74         "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #404 $");
82
83 static int use_inta;
84 static int poll;
85 static int onefunc;
86 static int nomcp;
87 static int debug;
88 static int use_multi;
89
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
99
100 #ifdef BNX2X_MULTI
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103 #endif
104
105 enum bnx2x_board_type {
106         BCM57710 = 0,
107 };
108
109 /* indexed by board_t, above */
110 static struct {
111         char *name;
112 } board_info[] __devinitdata = {
113         { "Broadcom NetXtreme II BCM57710 XGb" }
114 };
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { 0 }
120 };
121
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
127
128 /* used only at init
129  * locking is done by mcp
130  */
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132 {
133         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136                                PCICFG_VENDOR_ID_OFFSET);
137 }
138
139 #ifdef BNX2X_IND_RD
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141 {
142         u32 val;
143
144         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147                                PCICFG_VENDOR_ID_OFFSET);
148
149         return val;
150 }
151 #endif
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171 /*              DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178                              u32 dst_addr, u32 len32)
179 {
180         struct dmae_command *dmae = &bp->dmae;
181         int port = bp->port;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int timeout = 200;
184
185         memset(dmae, 0, sizeof(struct dmae_command));
186
187         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190 #ifdef __BIG_ENDIAN
191                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
192 #else
193                         DMAE_CMD_ENDIANITY_DW_SWAP |
194 #endif
195                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196         dmae->src_addr_lo = U64_LO(dma_addr);
197         dmae->src_addr_hi = U64_HI(dma_addr);
198         dmae->dst_addr_lo = dst_addr >> 2;
199         dmae->dst_addr_hi = 0;
200         dmae->len = len32;
201         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203         dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205 /*
206         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
208                     "dst_addr [%x:%08x (%08x)]\n"
209            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
210            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213 */
214 /*
215         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218 */
219
220         *wb_comp = 0;
221
222         bnx2x_post_dmae(bp, dmae, port * 8);
223
224         udelay(5);
225         /* adjust timeout for emulation/FPGA */
226         if (CHIP_REV_IS_SLOW(bp))
227                 timeout *= 100;
228         while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /*              DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230                 udelay(5);
231                 if (!timeout) {
232                         BNX2X_ERR("dmae timeout!\n");
233                         break;
234                 }
235                 timeout--;
236         }
237 }
238
239 #ifdef BNX2X_DMAE_RD
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241 {
242         struct dmae_command *dmae = &bp->dmae;
243         int port = bp->port;
244         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245         int timeout = 200;
246
247         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248         memset(dmae, 0, sizeof(struct dmae_command));
249
250         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253 #ifdef __BIG_ENDIAN
254                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
255 #else
256                         DMAE_CMD_ENDIANITY_DW_SWAP |
257 #endif
258                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259         dmae->src_addr_lo = src_addr >> 2;
260         dmae->src_addr_hi = 0;
261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263         dmae->len = len32;
264         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266         dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268 /*
269         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
271                     "dst_addr [%x:%08x (%08x)]\n"
272            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
273            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276 */
277
278         *wb_comp = 0;
279
280         bnx2x_post_dmae(bp, dmae, port * 8);
281
282         udelay(5);
283         while (*wb_comp != BNX2X_WB_COMP_VAL) {
284                 udelay(5);
285                 if (!timeout) {
286                         BNX2X_ERR("dmae timeout!\n");
287                         break;
288                 }
289                 timeout--;
290         }
291 /*
292         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295 */
296 }
297 #endif
298
299 static int bnx2x_mc_assert(struct bnx2x *bp)
300 {
301         int i, j;
302         int rc = 0;
303         char last_idx;
304         const char storm[] = {"XTCU"};
305         const u32 intmem_base[] = {
306                 BAR_XSTRORM_INTMEM,
307                 BAR_TSTRORM_INTMEM,
308                 BAR_CSTRORM_INTMEM,
309                 BAR_USTRORM_INTMEM
310         };
311
312         /* Go through all instances of all SEMIs */
313         for (i = 0; i < 4; i++) {
314                 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315                                    intmem_base[i]);
316                 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317                           storm[i], last_idx);
318
319                 /* print the asserts */
320                 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321                         u32 row0, row1, row2, row3;
322
323                         row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324                                       intmem_base[i]);
325                         row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326                                       intmem_base[i]);
327                         row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328                                       intmem_base[i]);
329                         row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330                                       intmem_base[i]);
331
332                         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333                                 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334                                           " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335                                           storm[i], j, row3, row2, row1, row0);
336                                 rc++;
337                         } else {
338                                 break;
339                         }
340                 }
341         }
342         return rc;
343 }
344
345 static void bnx2x_fw_dump(struct bnx2x *bp)
346 {
347         u32 mark, offset;
348         u32 data[9];
349         int word;
350
351         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
353
354         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355                 for (word = 0; word < 8; word++)
356                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357                                                   offset + 4*word));
358                 data[8] = 0x0;
359                 printk(KERN_ERR PFX "%s", (char *)data);
360         }
361         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362                 for (word = 0; word < 8; word++)
363                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364                                                   offset + 4*word));
365                 data[8] = 0x0;
366                 printk(KERN_ERR PFX "%s", (char *)data);
367         }
368         printk("\n" KERN_ERR PFX "end of fw dump\n");
369 }
370
371 static void bnx2x_panic_dump(struct bnx2x *bp)
372 {
373         int i;
374         u16 j, start, end;
375
376         BNX2X_ERR("begin crash dump -----------------\n");
377
378         for_each_queue(bp, i) {
379                 struct bnx2x_fastpath *fp = &bp->fp[i];
380                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
383                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)"
384                           "  *rx_cons_sb(%x)  rx_comp_prod(%x)"
385                           "  rx_comp_cons(%x)  fp_c_idx(%x)  fp_u_idx(%x)"
386                           "  bd data(%x,%x)\n",
387                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388                           fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389                           fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390                           fp->fp_u_idx, hw_prods->packets_prod,
391                           hw_prods->bds_prod);
392
393                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395                 for (j = start; j < end; j++) {
396                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399                                   sw_bd->skb, sw_bd->first_bd);
400                 }
401
402                 start = TX_BD(fp->tx_bd_cons - 10);
403                 end = TX_BD(fp->tx_bd_cons + 254);
404                 for (j = start; j < end; j++) {
405                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409                 }
410
411                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413                 for (j = start; j < end; j++) {
414                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
418                                   j, rx_bd[0], rx_bd[1], sw_bd->skb);
419                 }
420
421                 start = RCQ_BD(fp->rx_comp_cons - 10);
422                 end = RCQ_BD(fp->rx_comp_cons + 503);
423                 for (j = start; j < end; j++) {
424                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
428                 }
429         }
430
431         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_t_idx(%u)"
432                   "  def_x_idx(%u)  def_att_idx(%u)  attn_state(%u)"
433                   "  spq_prod_idx(%u)\n",
434                   bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
435                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438         bnx2x_mc_assert(bp);
439         BNX2X_ERR("end crash dump -----------------\n");
440
441         bp->stats_state = STATS_STATE_DISABLE;
442         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443 }
444
445 static void bnx2x_enable_int(struct bnx2x *bp)
446 {
447         int port = bp->port;
448         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449         u32 val = REG_RD(bp, addr);
450         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452         if (msix) {
453                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456         } else {
457                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
459                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
460                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
461         }
462
463         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  msi %d\n",
464            val, port, addr, msix);
465
466         REG_WR(bp, addr, val);
467 }
468
469 static void bnx2x_disable_int(struct bnx2x *bp)
470 {
471         int port = bp->port;
472         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
473         u32 val = REG_RD(bp, addr);
474
475         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
476                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
477                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
478                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
479
480         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
481            val, port, addr);
482
483         REG_WR(bp, addr, val);
484         if (REG_RD(bp, addr) != val)
485                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
486 }
487
488 static void bnx2x_disable_int_sync(struct bnx2x *bp)
489 {
490
491         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
492         int i;
493
494         atomic_inc(&bp->intr_sem);
495         /* prevent the HW from sending interrupts */
496         bnx2x_disable_int(bp);
497
498         /* make sure all ISRs are done */
499         if (msix) {
500                 for_each_queue(bp, i)
501                         synchronize_irq(bp->msix_table[i].vector);
502
503                 /* one more for the Slow Path IRQ */
504                 synchronize_irq(bp->msix_table[i].vector);
505         } else
506                 synchronize_irq(bp->pdev->irq);
507
508         /* make sure sp_task is not running */
509         cancel_work_sync(&bp->sp_task);
510
511 }
512
513 /* fast path code */
514
515 /*
516  * general service functions
517  */
518
519 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
520                                 u8 storm, u16 index, u8 op, u8 update)
521 {
522         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
523         struct igu_ack_register igu_ack;
524
525         igu_ack.status_block_index = index;
526         igu_ack.sb_id_and_flags =
527                         ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
528                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
529                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
530                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
531
532 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
533            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
534         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
535 }
536
537 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
538 {
539         struct host_status_block *fpsb = fp->status_blk;
540         u16 rc = 0;
541
542         barrier(); /* status block is written to by the chip */
543         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
544                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
545                 rc |= 1;
546         }
547         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
548                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
549                 rc |= 2;
550         }
551         return rc;
552 }
553
554 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
555 {
556         u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
557
558         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
559                 rx_cons_sb++;
560
561         if ((rx_cons_sb != fp->rx_comp_cons) ||
562             (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
563                 return 1;
564
565         return 0;
566 }
567
568 static u16 bnx2x_ack_int(struct bnx2x *bp)
569 {
570         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
571         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
572
573 /*      DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
574            result, BAR_IGU_INTMEM + igu_addr); */
575
576 #ifdef IGU_DEBUG
577 #warning IGU_DEBUG active
578         if (result == 0) {
579                 BNX2X_ERR("read %x from IGU\n", result);
580                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
581         }
582 #endif
583         return result;
584 }
585
586
587 /*
588  * fast path service functions
589  */
590
591 /* free skb in the packet ring at pos idx
592  * return idx of last bd freed
593  */
594 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
595                              u16 idx)
596 {
597         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
598         struct eth_tx_bd *tx_bd;
599         struct sk_buff *skb = tx_buf->skb;
600         u16 bd_idx = tx_buf->first_bd;
601         int nbd;
602
603         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
604            idx, tx_buf, skb);
605
606         /* unmap first bd */
607         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
608         tx_bd = &fp->tx_desc_ring[bd_idx];
609         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
610                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
611
612         nbd = le16_to_cpu(tx_bd->nbd) - 1;
613 #ifdef BNX2X_STOP_ON_ERROR
614         if (nbd > (MAX_SKB_FRAGS + 2)) {
615                 BNX2X_ERR("bad nbd!\n");
616                 bnx2x_panic();
617         }
618 #endif
619
620         /* Skip a parse bd and the TSO split header bd
621            since they have no mapping */
622         if (nbd)
623                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
624
625         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
626                                            ETH_TX_BD_FLAGS_TCP_CSUM |
627                                            ETH_TX_BD_FLAGS_SW_LSO)) {
628                 if (--nbd)
629                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
630                 tx_bd = &fp->tx_desc_ring[bd_idx];
631                 /* is this a TSO split header bd? */
632                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
633                         if (--nbd)
634                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
635                 }
636         }
637
638         /* now free frags */
639         while (nbd > 0) {
640
641                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
642                 tx_bd = &fp->tx_desc_ring[bd_idx];
643                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
644                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
645                 if (--nbd)
646                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
647         }
648
649         /* release skb */
650         BUG_TRAP(skb);
651         dev_kfree_skb(skb);
652         tx_buf->first_bd = 0;
653         tx_buf->skb = NULL;
654
655         return bd_idx;
656 }
657
658 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
659 {
660         u16 used;
661         u32 prod;
662         u32 cons;
663
664         /* Tell compiler that prod and cons can change */
665         barrier();
666         prod = fp->tx_bd_prod;
667         cons = fp->tx_bd_cons;
668
669         used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
670                 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
671
672         if (prod >= cons) {
673                 /* used = prod - cons - prod/size + cons/size */
674                 used -= NUM_TX_BD - NUM_TX_RINGS;
675         }
676
677         BUG_TRAP(used <= fp->bp->tx_ring_size);
678         BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
679
680         return (fp->bp->tx_ring_size - used);
681 }
682
683 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
684 {
685         struct bnx2x *bp = fp->bp;
686         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
687         int done = 0;
688
689 #ifdef BNX2X_STOP_ON_ERROR
690         if (unlikely(bp->panic))
691                 return;
692 #endif
693
694         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
695         sw_cons = fp->tx_pkt_cons;
696
697         while (sw_cons != hw_cons) {
698                 u16 pkt_cons;
699
700                 pkt_cons = TX_BD(sw_cons);
701
702                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
703
704                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %d\n",
705                    hw_cons, sw_cons, pkt_cons);
706
707 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
708                         rmb();
709                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
710                 }
711 */
712                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
713                 sw_cons++;
714                 done++;
715
716                 if (done == work)
717                         break;
718         }
719
720         fp->tx_pkt_cons = sw_cons;
721         fp->tx_bd_cons = bd_cons;
722
723         /* Need to make the tx_cons update visible to start_xmit()
724          * before checking for netif_queue_stopped().  Without the
725          * memory barrier, there is a small possibility that start_xmit()
726          * will miss it and cause the queue to be stopped forever.
727          */
728         smp_mb();
729
730         /* TBD need a thresh? */
731         if (unlikely(netif_queue_stopped(bp->dev))) {
732
733                 netif_tx_lock(bp->dev);
734
735                 if (netif_queue_stopped(bp->dev) &&
736                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
737                         netif_wake_queue(bp->dev);
738
739                 netif_tx_unlock(bp->dev);
740
741         }
742 }
743
744 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
745                            union eth_rx_cqe *rr_cqe)
746 {
747         struct bnx2x *bp = fp->bp;
748         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750
751         DP(NETIF_MSG_RX_STATUS,
752            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
753            fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
754
755         bp->spq_left++;
756
757         if (fp->index) {
758                 switch (command | fp->state) {
759                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
760                                                 BNX2X_FP_STATE_OPENING):
761                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
762                            cid);
763                         fp->state = BNX2X_FP_STATE_OPEN;
764                         break;
765
766                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
767                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
768                            cid);
769                         fp->state = BNX2X_FP_STATE_HALTED;
770                         break;
771
772                 default:
773                         BNX2X_ERR("unexpected MC reply(%d)  state is %x\n",
774                                   command, fp->state);
775                 }
776                 mb(); /* force bnx2x_wait_ramrod to see the change */
777                 return;
778         }
779
780         switch (command | bp->state) {
781         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
782                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
783                 bp->state = BNX2X_STATE_OPEN;
784                 break;
785
786         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
787                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
788                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
789                 fp->state = BNX2X_FP_STATE_HALTED;
790                 break;
791
792         case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
793                 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
794                 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
795                 break;
796
797         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
798                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
799                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
800                 break;
801
802         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
803                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
804                 break;
805
806         default:
807                 BNX2X_ERR("unexpected ramrod (%d)  state is %x\n",
808                           command, bp->state);
809         }
810
811         mb(); /* force bnx2x_wait_ramrod to see the change */
812 }
813
814 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
815                                      struct bnx2x_fastpath *fp, u16 index)
816 {
817         struct sk_buff *skb;
818         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
819         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
820         dma_addr_t mapping;
821
822         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823         if (unlikely(skb == NULL))
824                 return -ENOMEM;
825
826         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
827                                  PCI_DMA_FROMDEVICE);
828         if (unlikely(dma_mapping_error(mapping))) {
829
830                 dev_kfree_skb(skb);
831                 return -ENOMEM;
832         }
833
834         rx_buf->skb = skb;
835         pci_unmap_addr_set(rx_buf, mapping, mapping);
836
837         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
838         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
839
840         return 0;
841 }
842
843 /* note that we are not allocating a new skb,
844  * we are just moving one from cons to prod
845  * we are not creating a new mapping,
846  * so there is no need to check for dma_mapping_error().
847  */
848 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
849                                struct sk_buff *skb, u16 cons, u16 prod)
850 {
851         struct bnx2x *bp = fp->bp;
852         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
853         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
854         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
855         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
856
857         pci_dma_sync_single_for_device(bp->pdev,
858                                        pci_unmap_addr(cons_rx_buf, mapping),
859                                        bp->rx_offset + RX_COPY_THRESH,
860                                        PCI_DMA_FROMDEVICE);
861
862         prod_rx_buf->skb = cons_rx_buf->skb;
863         pci_unmap_addr_set(prod_rx_buf, mapping,
864                            pci_unmap_addr(cons_rx_buf, mapping));
865         *prod_bd = *cons_bd;
866 }
867
868 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
869 {
870         struct bnx2x *bp = fp->bp;
871         u16 bd_cons, bd_prod, comp_ring_cons;
872         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
873         int rx_pkt = 0;
874
875 #ifdef BNX2X_STOP_ON_ERROR
876         if (unlikely(bp->panic))
877                 return 0;
878 #endif
879
880         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
881         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
882                 hw_comp_cons++;
883
884         bd_cons = fp->rx_bd_cons;
885         bd_prod = fp->rx_bd_prod;
886         sw_comp_cons = fp->rx_comp_cons;
887         sw_comp_prod = fp->rx_comp_prod;
888
889         /* Memory barrier necessary as speculative reads of the rx
890          * buffer can be ahead of the index in the status block
891          */
892         rmb();
893
894         DP(NETIF_MSG_RX_STATUS,
895            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
896            fp->index, hw_comp_cons, sw_comp_cons);
897
898         while (sw_comp_cons != hw_comp_cons) {
899                 unsigned int len, pad;
900                 struct sw_rx_bd *rx_buf;
901                 struct sk_buff *skb;
902                 union eth_rx_cqe *cqe;
903
904                 comp_ring_cons = RCQ_BD(sw_comp_cons);
905                 bd_prod = RX_BD(bd_prod);
906                 bd_cons = RX_BD(bd_cons);
907
908                 cqe = &fp->rx_comp_ring[comp_ring_cons];
909
910                 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u  sw_comp_cons %u"
911                    "  comp_ring (%u)  bd_ring (%u,%u)\n",
912                    hw_comp_cons, sw_comp_cons,
913                    comp_ring_cons, bd_prod, bd_cons);
914                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
915                    "  queue %x  vlan %x  len %x\n",
916                    cqe->fast_path_cqe.type,
917                    cqe->fast_path_cqe.error_type_flags,
918                    cqe->fast_path_cqe.status_flags,
919                    cqe->fast_path_cqe.rss_hash_result,
920                    cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
921
922                 /* is this a slowpath msg? */
923                 if (unlikely(cqe->fast_path_cqe.type)) {
924                         bnx2x_sp_event(fp, cqe);
925                         goto next_cqe;
926
927                 /* this is an rx packet */
928                 } else {
929                         rx_buf = &fp->rx_buf_ring[bd_cons];
930                         skb = rx_buf->skb;
931
932                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
933                         pad = cqe->fast_path_cqe.placement_offset;
934
935                         pci_dma_sync_single_for_device(bp->pdev,
936                                         pci_unmap_addr(rx_buf, mapping),
937                                                        pad + RX_COPY_THRESH,
938                                                        PCI_DMA_FROMDEVICE);
939                         prefetch(skb);
940                         prefetch(((char *)(skb)) + 128);
941
942                         /* is this an error packet? */
943                         if (unlikely(cqe->fast_path_cqe.error_type_flags &
944                                                         ETH_RX_ERROR_FALGS)) {
945                         /* do we sometimes forward error packets anyway? */
946                                 DP(NETIF_MSG_RX_ERR,
947                                    "ERROR flags(%u) Rx packet(%u)\n",
948                                    cqe->fast_path_cqe.error_type_flags,
949                                    sw_comp_cons);
950                                 /* TBD make sure MC counts this as a drop */
951                                 goto reuse_rx;
952                         }
953
954                         /* Since we don't have a jumbo ring
955                          * copy small packets if mtu > 1500
956                          */
957                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
958                             (len <= RX_COPY_THRESH)) {
959                                 struct sk_buff *new_skb;
960
961                                 new_skb = netdev_alloc_skb(bp->dev,
962                                                            len + pad);
963                                 if (new_skb == NULL) {
964                                         DP(NETIF_MSG_RX_ERR,
965                                            "ERROR packet dropped "
966                                            "because of alloc failure\n");
967                                         /* TBD count this as a drop? */
968                                         goto reuse_rx;
969                                 }
970
971                                 /* aligned copy */
972                                 skb_copy_from_linear_data_offset(skb, pad,
973                                                     new_skb->data + pad, len);
974                                 skb_reserve(new_skb, pad);
975                                 skb_put(new_skb, len);
976
977                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
978
979                                 skb = new_skb;
980
981                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
982                                 pci_unmap_single(bp->pdev,
983                                         pci_unmap_addr(rx_buf, mapping),
984                                                  bp->rx_buf_use_size,
985                                                  PCI_DMA_FROMDEVICE);
986                                 skb_reserve(skb, pad);
987                                 skb_put(skb, len);
988
989                         } else {
990                                 DP(NETIF_MSG_RX_ERR,
991                                    "ERROR packet dropped because "
992                                    "of alloc failure\n");
993 reuse_rx:
994                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
995                                 goto next_rx;
996                         }
997
998                         skb->protocol = eth_type_trans(skb, bp->dev);
999
1000                         skb->ip_summed = CHECKSUM_NONE;
1001                         if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1002                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1003
1004                         /* TBD do we pass bad csum packets in promisc */
1005                 }
1006
1007 #ifdef BCM_VLAN
1008                 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1009                                 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1010                     && (bp->vlgrp != NULL))
1011                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1012                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1013                 else
1014 #endif
1015                 netif_receive_skb(skb);
1016
1017                 bp->dev->last_rx = jiffies;
1018
1019 next_rx:
1020                 rx_buf->skb = NULL;
1021
1022                 bd_cons = NEXT_RX_IDX(bd_cons);
1023                 bd_prod = NEXT_RX_IDX(bd_prod);
1024 next_cqe:
1025                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1027                 rx_pkt++;
1028
1029                 if ((rx_pkt == budget))
1030                         break;
1031         } /* while */
1032
1033         fp->rx_bd_cons = bd_cons;
1034         fp->rx_bd_prod = bd_prod;
1035         fp->rx_comp_cons = sw_comp_cons;
1036         fp->rx_comp_prod = sw_comp_prod;
1037
1038         REG_WR(bp, BAR_TSTRORM_INTMEM +
1039                TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1040
1041         mmiowb(); /* keep prod updates ordered */
1042
1043         fp->rx_pkt += rx_pkt;
1044         fp->rx_calls++;
1045
1046         return rx_pkt;
1047 }
1048
1049 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1050 {
1051         struct bnx2x_fastpath *fp = fp_cookie;
1052         struct bnx2x *bp = fp->bp;
1053         struct net_device *dev = bp->dev;
1054         int index = fp->index;
1055
1056         DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1057         bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1058
1059 #ifdef BNX2X_STOP_ON_ERROR
1060         if (unlikely(bp->panic))
1061                 return IRQ_HANDLED;
1062 #endif
1063
1064         prefetch(fp->rx_cons_sb);
1065         prefetch(fp->tx_cons_sb);
1066         prefetch(&fp->status_blk->c_status_block.status_block_index);
1067         prefetch(&fp->status_blk->u_status_block.status_block_index);
1068
1069         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1070         return IRQ_HANDLED;
1071 }
1072
1073 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1074 {
1075         struct net_device *dev = dev_instance;
1076         struct bnx2x *bp = netdev_priv(dev);
1077         u16 status = bnx2x_ack_int(bp);
1078
1079         if (unlikely(status == 0)) {
1080                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1081                 return IRQ_NONE;
1082         }
1083
1084         DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1085
1086 #ifdef BNX2X_STOP_ON_ERROR
1087         if (unlikely(bp->panic))
1088                 return IRQ_HANDLED;
1089 #endif
1090
1091         /* Return here if interrupt is shared and is disabled */
1092         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1093                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1094                 return IRQ_HANDLED;
1095         }
1096
1097         if (status & 0x2) {
1098                 struct bnx2x_fastpath *fp = &bp->fp[0];
1099
1100                 prefetch(fp->rx_cons_sb);
1101                 prefetch(fp->tx_cons_sb);
1102                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1103                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1104
1105                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1106
1107                 status &= ~0x2;
1108                 if (!status)
1109                         return IRQ_HANDLED;
1110         }
1111
1112         if (unlikely(status & 0x1)) {
1113
1114                 schedule_work(&bp->sp_task);
1115
1116                 status &= ~0x1;
1117                 if (!status)
1118                         return IRQ_HANDLED;
1119         }
1120
1121         DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1122            status);
1123
1124         return IRQ_HANDLED;
1125 }
1126
1127 /* end of fast path */
1128
1129 /* PHY/MAC */
1130
1131 /*
1132  * General service functions
1133  */
1134
1135 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1136 {
1137         int port = bp->port;
1138
1139         NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1140                ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1141                 SHARED_HW_CFG_LED_MODE_SHIFT));
1142         NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1143
1144         /* Set blinking rate to ~15.9Hz */
1145         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1146                LED_BLINK_RATE_VAL);
1147         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1148
1149         /* On Ax chip versions for speeds less than 10G
1150            LED scheme is different */
1151         if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1152                 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1153                 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1154                 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1155         }
1156 }
1157
1158 static void bnx2x_leds_unset(struct bnx2x *bp)
1159 {
1160         int port = bp->port;
1161
1162         NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1163         NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1164 }
1165
1166 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1167 {
1168         u32 val = REG_RD(bp, reg);
1169
1170         val |= bits;
1171         REG_WR(bp, reg, val);
1172         return val;
1173 }
1174
1175 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1176 {
1177         u32 val = REG_RD(bp, reg);
1178
1179         val &= ~bits;
1180         REG_WR(bp, reg, val);
1181         return val;
1182 }
1183
1184 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1185 {
1186         u32 cnt;
1187         u32 lock_status;
1188         u32 resource_bit = (1 << resource);
1189         u8 func = bp->port;
1190
1191         /* Validating that the resource is within range */
1192         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1193                 DP(NETIF_MSG_HW,
1194                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1195                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1196                 return -EINVAL;
1197         }
1198
1199         /* Validating that the resource is not already taken */
1200         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1201         if (lock_status & resource_bit) {
1202                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1203                    lock_status, resource_bit);
1204                 return -EEXIST;
1205         }
1206
1207         /* Try for 1 second every 5ms */
1208         for (cnt = 0; cnt < 200; cnt++) {
1209                 /* Try to acquire the lock */
1210                 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1211                        resource_bit);
1212                 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1213                 if (lock_status & resource_bit)
1214                         return 0;
1215
1216                 msleep(5);
1217         }
1218         DP(NETIF_MSG_HW, "Timeout\n");
1219         return -EAGAIN;
1220 }
1221
1222 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1223 {
1224         u32 lock_status;
1225         u32 resource_bit = (1 << resource);
1226         u8 func = bp->port;
1227
1228         /* Validating that the resource is within range */
1229         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1230                 DP(NETIF_MSG_HW,
1231                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1232                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1233                 return -EINVAL;
1234         }
1235
1236         /* Validating that the resource is currently taken */
1237         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1238         if (!(lock_status & resource_bit)) {
1239                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1240                    lock_status, resource_bit);
1241                 return -EFAULT;
1242         }
1243
1244         REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1245         return 0;
1246 }
1247
1248 static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1249 {
1250         /* The GPIO should be swapped if swap register is set and active */
1251         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1252                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1253         int gpio_shift = gpio_num +
1254                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1255         u32 gpio_mask = (1 << gpio_shift);
1256         u32 gpio_reg;
1257
1258         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1259                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1260                 return -EINVAL;
1261         }
1262
1263         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1264         /* read GPIO and mask except the float bits */
1265         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1266
1267         switch (mode) {
1268         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1269                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1270                    gpio_num, gpio_shift);
1271                 /* clear FLOAT and set CLR */
1272                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1273                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1274                 break;
1275
1276         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1277                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1278                    gpio_num, gpio_shift);
1279                 /* clear FLOAT and set SET */
1280                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1282                 break;
1283
1284         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1285                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1286                    gpio_num, gpio_shift);
1287                 /* set FLOAT */
1288                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1289                 break;
1290
1291         default:
1292                 break;
1293         }
1294
1295         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1296         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1297
1298         return 0;
1299 }
1300
1301 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1302 {
1303         u32 spio_mask = (1 << spio_num);
1304         u32 spio_reg;
1305
1306         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1307             (spio_num > MISC_REGISTERS_SPIO_7)) {
1308                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1309                 return -EINVAL;
1310         }
1311
1312         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1313         /* read SPIO and mask except the float bits */
1314         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1315
1316         switch (mode) {
1317         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1318                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1319                 /* clear FLOAT and set CLR */
1320                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1321                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1322                 break;
1323
1324         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1325                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1326                 /* clear FLOAT and set SET */
1327                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1328                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1329                 break;
1330
1331         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1332                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1333                 /* set FLOAT */
1334                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1335                 break;
1336
1337         default:
1338                 break;
1339         }
1340
1341         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1342         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1343
1344         return 0;
1345 }
1346
1347 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1348 {
1349         int port = bp->port;
1350         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1351         u32 tmp;
1352         int i, rc;
1353
1354 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  val 0x%08x\n",
1355            bp->phy_addr, reg, val); */
1356
1357         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1358
1359                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1360                 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1361                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1362                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1363                 udelay(40);
1364         }
1365
1366         tmp = ((bp->phy_addr << 21) | (reg << 16) |
1367                (val & EMAC_MDIO_COMM_DATA) |
1368                EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1369                EMAC_MDIO_COMM_START_BUSY);
1370         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1371
1372         for (i = 0; i < 50; i++) {
1373                 udelay(10);
1374
1375                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1376                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1377                         udelay(5);
1378                         break;
1379                 }
1380         }
1381
1382         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1383                 BNX2X_ERR("write phy register failed\n");
1384
1385                 rc = -EBUSY;
1386         } else {
1387                 rc = 0;
1388         }
1389
1390         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1391
1392                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1393                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1394                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1395         }
1396
1397         return rc;
1398 }
1399
1400 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1401 {
1402         int port = bp->port;
1403         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1404         u32 val;
1405         int i, rc;
1406
1407         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1408
1409                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1410                 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1411                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1412                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1413                 udelay(40);
1414         }
1415
1416         val = ((bp->phy_addr << 21) | (reg << 16) |
1417                EMAC_MDIO_COMM_COMMAND_READ_22 |
1418                EMAC_MDIO_COMM_START_BUSY);
1419         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1420
1421         for (i = 0; i < 50; i++) {
1422                 udelay(10);
1423
1424                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1425                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1426                         val &= EMAC_MDIO_COMM_DATA;
1427                         break;
1428                 }
1429         }
1430
1431         if (val & EMAC_MDIO_COMM_START_BUSY) {
1432                 BNX2X_ERR("read phy register failed\n");
1433
1434                 *ret_val = 0x0;
1435                 rc = -EBUSY;
1436         } else {
1437                 *ret_val = val;
1438                 rc = 0;
1439         }
1440
1441         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1442
1443                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1444                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1445                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1446         }
1447
1448 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  ret_val 0x%08x\n",
1449            bp->phy_addr, reg, *ret_val); */
1450
1451         return rc;
1452 }
1453
1454 static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1455                                    u32 phy_addr, u32 reg, u32 addr, u32 val)
1456 {
1457         u32 tmp;
1458         int i, rc = 0;
1459
1460         /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1461          * (a value of 49==0x31) and make sure that the AUTO poll is off
1462          */
1463         tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1464         tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1465         tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1466                 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1467         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1468         REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1469         udelay(40);
1470
1471         /* address */
1472         tmp = ((phy_addr << 21) | (reg << 16) | addr |
1473                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1474                EMAC_MDIO_COMM_START_BUSY);
1475         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1476
1477         for (i = 0; i < 50; i++) {
1478                 udelay(10);
1479
1480                 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1481                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1482                         udelay(5);
1483                         break;
1484                 }
1485         }
1486         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1487                 BNX2X_ERR("write phy register failed\n");
1488
1489                 rc = -EBUSY;
1490
1491         } else {
1492                 /* data */
1493                 tmp = ((phy_addr << 21) | (reg << 16) | val |
1494                        EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1495                        EMAC_MDIO_COMM_START_BUSY);
1496                 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1497
1498                 for (i = 0; i < 50; i++) {
1499                         udelay(10);
1500
1501                         tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1502                         if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1503                                 udelay(5);
1504                                 break;
1505                         }
1506                 }
1507
1508                 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1509                         BNX2X_ERR("write phy register failed\n");
1510
1511                         rc = -EBUSY;
1512                 }
1513         }
1514
1515         /* unset clause 45 mode, set the MDIO clock to a faster value
1516          * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1517          */
1518         tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1519         tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1520         tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1521         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1522                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1523         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1524
1525         return rc;
1526 }
1527
1528 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1529                               u32 addr, u32 val)
1530 {
1531         u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1532
1533         return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1534                                        reg, addr, val);
1535 }
1536
1537 static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1538                                   u32 phy_addr, u32 reg, u32 addr,
1539                                   u32 *ret_val)
1540 {
1541         u32 val;
1542         int i, rc = 0;
1543
1544         /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1545          * (a value of 49==0x31) and make sure that the AUTO poll is off
1546          */
1547         val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1548         val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1549         val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1550                 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1551         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1552         REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1553         udelay(40);
1554
1555         /* address */
1556         val = ((phy_addr << 21) | (reg << 16) | addr |
1557                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1558                EMAC_MDIO_COMM_START_BUSY);
1559         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1560
1561         for (i = 0; i < 50; i++) {
1562                 udelay(10);
1563
1564                 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1565                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1566                         udelay(5);
1567                         break;
1568                 }
1569         }
1570         if (val & EMAC_MDIO_COMM_START_BUSY) {
1571                 BNX2X_ERR("read phy register failed\n");
1572
1573                 *ret_val = 0;
1574                 rc = -EBUSY;
1575
1576         } else {
1577                 /* data */
1578                 val = ((phy_addr << 21) | (reg << 16) |
1579                        EMAC_MDIO_COMM_COMMAND_READ_45 |
1580                        EMAC_MDIO_COMM_START_BUSY);
1581                 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1582
1583                 for (i = 0; i < 50; i++) {
1584                         udelay(10);
1585
1586                         val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1587                         if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1588                                 val &= EMAC_MDIO_COMM_DATA;
1589                                 break;
1590                         }
1591                 }
1592
1593                 if (val & EMAC_MDIO_COMM_START_BUSY) {
1594                         BNX2X_ERR("read phy register failed\n");
1595
1596                         val = 0;
1597                         rc = -EBUSY;
1598                 }
1599
1600                 *ret_val = val;
1601         }
1602
1603         /* unset clause 45 mode, set the MDIO clock to a faster value
1604          * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1605          */
1606         val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1607         val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1608         val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1609         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1610                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1611         REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1612
1613         return rc;
1614 }
1615
1616 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1617                              u32 addr, u32 *ret_val)
1618 {
1619         u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1620
1621         return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1622                                       reg, addr, ret_val);
1623 }
1624
1625 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1626                                u32 addr, u32 val)
1627 {
1628         int i;
1629         u32 rd_val;
1630
1631         might_sleep();
1632         for (i = 0; i < 10; i++) {
1633                 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1634                 msleep(5);
1635                 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1636                 /* if the read value is not the same as the value we wrote,
1637                    we should write it again */
1638                 if (rd_val == val)
1639                         return 0;
1640         }
1641         BNX2X_ERR("MDIO write in CL45 failed\n");
1642         return -EBUSY;
1643 }
1644
1645 /*
1646  * link management
1647  */
1648
1649 static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1650 {
1651         switch (pause_result) {                 /* ASYM P ASYM P */
1652         case 0xb:                               /*   1  0   1  1 */
1653                 bp->flow_ctrl = FLOW_CTRL_TX;
1654                 break;
1655
1656         case 0xe:                               /*   1  1   1  0 */
1657                 bp->flow_ctrl = FLOW_CTRL_RX;
1658                 break;
1659
1660         case 0x5:                               /*   0  1   0  1 */
1661         case 0x7:                               /*   0  1   1  1 */
1662         case 0xd:                               /*   1  1   0  1 */
1663         case 0xf:                               /*   1  1   1  1 */
1664                 bp->flow_ctrl = FLOW_CTRL_BOTH;
1665                 break;
1666
1667         default:
1668                 break;
1669         }
1670 }
1671
1672 static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1673 {
1674         u32 ext_phy_addr;
1675         u32 ld_pause;   /* local */
1676         u32 lp_pause;   /* link partner */
1677         u32 an_complete; /* AN complete */
1678         u32 pause_result;
1679         u8 ret = 0;
1680
1681         ext_phy_addr = ((bp->ext_phy_config &
1682                          PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1683                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1684
1685         /* read twice */
1686         bnx2x_mdio45_read(bp, ext_phy_addr,
1687                           EXT_PHY_KR_AUTO_NEG_DEVAD,
1688                           EXT_PHY_KR_STATUS, &an_complete);
1689         bnx2x_mdio45_read(bp, ext_phy_addr,
1690                           EXT_PHY_KR_AUTO_NEG_DEVAD,
1691                           EXT_PHY_KR_STATUS, &an_complete);
1692
1693         if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1694                 ret = 1;
1695                 bnx2x_mdio45_read(bp, ext_phy_addr,
1696                                   EXT_PHY_KR_AUTO_NEG_DEVAD,
1697                                   EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1698                 bnx2x_mdio45_read(bp, ext_phy_addr,
1699                                   EXT_PHY_KR_AUTO_NEG_DEVAD,
1700                                   EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1701                 pause_result = (ld_pause &
1702                                 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1703                 pause_result |= (lp_pause &
1704                                  EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1705                 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1706                    pause_result);
1707                 bnx2x_pause_resolve(bp, pause_result);
1708         }
1709         return ret;
1710 }
1711
1712 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1713 {
1714         u32 ld_pause;   /* local driver */
1715         u32 lp_pause;   /* link partner */
1716         u32 pause_result;
1717
1718         bp->flow_ctrl = 0;
1719
1720         /* resolve from gp_status in case of AN complete and not sgmii */
1721         if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1722             (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1723             (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1724             (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1725
1726                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1727                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1728                                   &ld_pause);
1729                 bnx2x_mdio22_read(bp,
1730                         MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1731                                   &lp_pause);
1732                 pause_result = (ld_pause &
1733                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1734                 pause_result |= (lp_pause &
1735                                  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1736                 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1737                 bnx2x_pause_resolve(bp, pause_result);
1738         } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1739                    !(bnx2x_ext_phy_resove_fc(bp))) {
1740                 /* forced speed */
1741                 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1742                         switch (bp->req_flow_ctrl) {
1743                         case FLOW_CTRL_AUTO:
1744                                 if (bp->dev->mtu <= 4500)
1745                                         bp->flow_ctrl = FLOW_CTRL_BOTH;
1746                                 else
1747                                         bp->flow_ctrl = FLOW_CTRL_TX;
1748                                 break;
1749
1750                         case FLOW_CTRL_TX:
1751                                 bp->flow_ctrl = FLOW_CTRL_TX;
1752                                 break;
1753
1754                         case FLOW_CTRL_RX:
1755                                 if (bp->dev->mtu <= 4500)
1756                                         bp->flow_ctrl = FLOW_CTRL_RX;
1757                                 break;
1758
1759                         case FLOW_CTRL_BOTH:
1760                                 if (bp->dev->mtu <= 4500)
1761                                         bp->flow_ctrl = FLOW_CTRL_BOTH;
1762                                 else
1763                                         bp->flow_ctrl = FLOW_CTRL_TX;
1764                                 break;
1765
1766                         case FLOW_CTRL_NONE:
1767                         default:
1768                                 break;
1769                         }
1770                 } else { /* forced mode */
1771                         switch (bp->req_flow_ctrl) {
1772                         case FLOW_CTRL_AUTO:
1773                                 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1774                                                    " req_autoneg 0x%x\n",
1775                                    bp->req_flow_ctrl, bp->req_autoneg);
1776                                 break;
1777
1778                         case FLOW_CTRL_TX:
1779                         case FLOW_CTRL_RX:
1780                         case FLOW_CTRL_BOTH:
1781                                 bp->flow_ctrl = bp->req_flow_ctrl;
1782                                 break;
1783
1784                         case FLOW_CTRL_NONE:
1785                         default:
1786                                 break;
1787                         }
1788                 }
1789         }
1790         DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1791 }
1792
1793 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1794 {
1795         bp->link_status = 0;
1796
1797         if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1798                 DP(NETIF_MSG_LINK, "phy link up\n");
1799
1800                 bp->phy_link_up = 1;
1801                 bp->link_status |= LINK_STATUS_LINK_UP;
1802
1803                 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1804                         bp->duplex = DUPLEX_FULL;
1805                 else
1806                         bp->duplex = DUPLEX_HALF;
1807
1808                 bnx2x_flow_ctrl_resolve(bp, gp_status);
1809
1810                 switch (gp_status & GP_STATUS_SPEED_MASK) {
1811                 case GP_STATUS_10M:
1812                         bp->line_speed = SPEED_10;
1813                         if (bp->duplex == DUPLEX_FULL)
1814                                 bp->link_status |= LINK_10TFD;
1815                         else
1816                                 bp->link_status |= LINK_10THD;
1817                         break;
1818
1819                 case GP_STATUS_100M:
1820                         bp->line_speed = SPEED_100;
1821                         if (bp->duplex == DUPLEX_FULL)
1822                                 bp->link_status |= LINK_100TXFD;
1823                         else
1824                                 bp->link_status |= LINK_100TXHD;
1825                         break;
1826
1827                 case GP_STATUS_1G:
1828                 case GP_STATUS_1G_KX:
1829                         bp->line_speed = SPEED_1000;
1830                         if (bp->duplex == DUPLEX_FULL)
1831                                 bp->link_status |= LINK_1000TFD;
1832                         else
1833                                 bp->link_status |= LINK_1000THD;
1834                         break;
1835
1836                 case GP_STATUS_2_5G:
1837                         bp->line_speed = SPEED_2500;
1838                         if (bp->duplex == DUPLEX_FULL)
1839                                 bp->link_status |= LINK_2500TFD;
1840                         else
1841                                 bp->link_status |= LINK_2500THD;
1842                         break;
1843
1844                 case GP_STATUS_5G:
1845                 case GP_STATUS_6G:
1846                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1847                                   gp_status);
1848                         break;
1849
1850                 case GP_STATUS_10G_KX4:
1851                 case GP_STATUS_10G_HIG:
1852                 case GP_STATUS_10G_CX4:
1853                         bp->line_speed = SPEED_10000;
1854                         bp->link_status |= LINK_10GTFD;
1855                         break;
1856
1857                 case GP_STATUS_12G_HIG:
1858                         bp->line_speed = SPEED_12000;
1859                         bp->link_status |= LINK_12GTFD;
1860                         break;
1861
1862                 case GP_STATUS_12_5G:
1863                         bp->line_speed = SPEED_12500;
1864                         bp->link_status |= LINK_12_5GTFD;
1865                         break;
1866
1867                 case GP_STATUS_13G:
1868                         bp->line_speed = SPEED_13000;
1869                         bp->link_status |= LINK_13GTFD;
1870                         break;
1871
1872                 case GP_STATUS_15G:
1873                         bp->line_speed = SPEED_15000;
1874                         bp->link_status |= LINK_15GTFD;
1875                         break;
1876
1877                 case GP_STATUS_16G:
1878                         bp->line_speed = SPEED_16000;
1879                         bp->link_status |= LINK_16GTFD;
1880                         break;
1881
1882                 default:
1883                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1884                                   gp_status);
1885                         break;
1886                 }
1887
1888                 bp->link_status |= LINK_STATUS_SERDES_LINK;
1889
1890                 if (bp->req_autoneg & AUTONEG_SPEED) {
1891                         bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1892
1893                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1894                                 bp->link_status |=
1895                                         LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1896
1897                         if (bp->autoneg & AUTONEG_PARALLEL)
1898                                 bp->link_status |=
1899                                         LINK_STATUS_PARALLEL_DETECTION_USED;
1900                 }
1901
1902                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1903                        bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1904
1905                 if (bp->flow_ctrl & FLOW_CTRL_RX)
1906                        bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1907
1908         } else { /* link_down */
1909                 DP(NETIF_MSG_LINK, "phy link down\n");
1910
1911                 bp->phy_link_up = 0;
1912
1913                 bp->line_speed = 0;
1914                 bp->duplex = DUPLEX_FULL;
1915                 bp->flow_ctrl = 0;
1916         }
1917
1918         DP(NETIF_MSG_LINK, "gp_status 0x%x  phy_link_up %d\n"
1919            DP_LEVEL "  line_speed %d  duplex %d  flow_ctrl 0x%x"
1920                     "  link_status 0x%x\n",
1921            gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1922            bp->flow_ctrl, bp->link_status);
1923 }
1924
1925 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1926 {
1927         int port = bp->port;
1928
1929         /* first reset all status
1930          * we assume only one line will be change at a time */
1931         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1932                        (NIG_STATUS_XGXS0_LINK10G |
1933                         NIG_STATUS_XGXS0_LINK_STATUS |
1934                         NIG_STATUS_SERDES0_LINK_STATUS));
1935         if (bp->phy_link_up) {
1936                 if (is_10g) {
1937                         /* Disable the 10G link interrupt
1938                          * by writing 1 to the status register
1939                          */
1940                         DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1941                         bnx2x_bits_en(bp,
1942                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1943                                       NIG_STATUS_XGXS0_LINK10G);
1944
1945                 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1946                         /* Disable the link interrupt
1947                          * by writing 1 to the relevant lane
1948                          * in the status register
1949                          */
1950                         DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1951                         bnx2x_bits_en(bp,
1952                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1953                                       ((1 << bp->ser_lane) <<
1954                                        NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1955
1956                 } else { /* SerDes */
1957                         DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1958                         /* Disable the link interrupt
1959                          * by writing 1 to the status register
1960                          */
1961                         bnx2x_bits_en(bp,
1962                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1963                                       NIG_STATUS_SERDES0_LINK_STATUS);
1964                 }
1965
1966         } else { /* link_down */
1967         }
1968 }
1969
1970 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1971 {
1972         u32 ext_phy_type;
1973         u32 ext_phy_addr;
1974         u32 val1 = 0, val2;
1975         u32 rx_sd, pcs_status;
1976
1977         if (bp->phy_flags & PHY_XGXS_FLAG) {
1978                 ext_phy_addr = ((bp->ext_phy_config &
1979                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1980                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1981
1982                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1983                 switch (ext_phy_type) {
1984                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1985                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
1986                         val1 = 1;
1987                         break;
1988
1989                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1990                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
1991                         bnx2x_mdio45_read(bp, ext_phy_addr,
1992                                           EXT_PHY_OPT_WIS_DEVAD,
1993                                           EXT_PHY_OPT_LASI_STATUS, &val1);
1994                         DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1995
1996                         bnx2x_mdio45_read(bp, ext_phy_addr,
1997                                           EXT_PHY_OPT_WIS_DEVAD,
1998                                           EXT_PHY_OPT_LASI_STATUS, &val1);
1999                         DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2000
2001                         bnx2x_mdio45_read(bp, ext_phy_addr,
2002                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2003                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2004                         DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2005                         val1 = (rx_sd & 0x1);
2006                         break;
2007
2008                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2009                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
2010                         bnx2x_mdio45_read(bp, ext_phy_addr,
2011                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2012                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2013                         DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2014
2015                         bnx2x_mdio45_read(bp, ext_phy_addr,
2016                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2017                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2018                         DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2019
2020                         bnx2x_mdio45_read(bp, ext_phy_addr,
2021                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2022                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2023                         bnx2x_mdio45_read(bp, ext_phy_addr,
2024                                           EXT_PHY_OPT_PCS_DEVAD,
2025                                           EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2026                         bnx2x_mdio45_read(bp, ext_phy_addr,
2027                                           EXT_PHY_AUTO_NEG_DEVAD,
2028                                           EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2029
2030                         DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
2031                            "  pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2032                            rx_sd, pcs_status, val2, (val2 & (1<<1)));
2033                         /* link is up if both bit 0 of pmd_rx_sd and
2034                          * bit 0 of pcs_status are set, or if the autoneg bit
2035                            1 is set
2036                          */
2037                         val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2038                         break;
2039
2040                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2041                         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2042
2043                         /* clear the interrupt LASI status register */
2044                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2045                                                ext_phy_addr,
2046                                                EXT_PHY_KR_PCS_DEVAD,
2047                                                EXT_PHY_KR_LASI_STATUS, &val2);
2048                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2049                                                ext_phy_addr,
2050                                                EXT_PHY_KR_PCS_DEVAD,
2051                                                EXT_PHY_KR_LASI_STATUS, &val1);
2052                         DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2053                            val2, val1);
2054                         /* Check the LASI */
2055                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2056                                                ext_phy_addr,
2057                                                EXT_PHY_KR_PMA_PMD_DEVAD,
2058                                                0x9003, &val2);
2059                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2060                                                ext_phy_addr,
2061                                                EXT_PHY_KR_PMA_PMD_DEVAD,
2062                                                0x9003, &val1);
2063                         DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2064                            val2, val1);
2065                         /* Check the link status */
2066                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2067                                                ext_phy_addr,
2068                                                EXT_PHY_KR_PCS_DEVAD,
2069                                                EXT_PHY_KR_PCS_STATUS, &val2);
2070                         DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2071                         /* Check the link status on 1.1.2 */
2072                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2073                                           ext_phy_addr,
2074                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2075                                           EXT_PHY_KR_STATUS, &val2);
2076                         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2077                                           ext_phy_addr,
2078                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2079                                           EXT_PHY_KR_STATUS, &val1);
2080                         DP(NETIF_MSG_LINK,
2081                            "KR PMA status 0x%x->0x%x\n", val2, val1);
2082                         val1 = ((val1 & 4) == 4);
2083                         /* If 1G was requested assume the link is up */
2084                         if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2085                             (bp->req_line_speed == SPEED_1000))
2086                                 val1 = 1;
2087                         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2088                         break;
2089
2090                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2091                         bnx2x_mdio45_read(bp, ext_phy_addr,
2092                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2093                                           EXT_PHY_OPT_LASI_STATUS, &val2);
2094                         bnx2x_mdio45_read(bp, ext_phy_addr,
2095                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2096                                           EXT_PHY_OPT_LASI_STATUS, &val1);
2097                         DP(NETIF_MSG_LINK,
2098                            "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2099                         bnx2x_mdio45_read(bp, ext_phy_addr,
2100                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2101                                           EXT_PHY_KR_STATUS, &val2);
2102                         bnx2x_mdio45_read(bp, ext_phy_addr,
2103                                           EXT_PHY_OPT_PMA_PMD_DEVAD,
2104                                           EXT_PHY_KR_STATUS, &val1);
2105                         DP(NETIF_MSG_LINK,
2106                            "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2107                         val1 = ((val1 & 4) == 4);
2108                         /* if link is up
2109                          * print the AN outcome of the SFX7101 PHY
2110                          */
2111                         if (val1) {
2112                                 bnx2x_mdio45_read(bp, ext_phy_addr,
2113                                                   EXT_PHY_KR_AUTO_NEG_DEVAD,
2114                                                   0x21, &val2);
2115                                 DP(NETIF_MSG_LINK,
2116                                    "SFX7101 AN status 0x%x->%s\n", val2,
2117                                    (val2 & (1<<14)) ? "Master" : "Slave");
2118                         }
2119                         break;
2120
2121                 default:
2122                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2123                            bp->ext_phy_config);
2124                         val1 = 0;
2125                         break;
2126                 }
2127
2128         } else { /* SerDes */
2129                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2130                 switch (ext_phy_type) {
2131                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2132                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
2133                         val1 = 1;
2134                         break;
2135
2136                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2137                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
2138                         val1 = 1;
2139                         break;
2140
2141                 default:
2142                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2143                            bp->ext_phy_config);
2144                         val1 = 0;
2145                         break;
2146                 }
2147         }
2148
2149         return val1;
2150 }
2151
2152 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2153 {
2154         int port = bp->port;
2155         u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2156                                NIG_REG_INGRESS_BMAC0_MEM;
2157         u32 wb_write[2];
2158         u32 val;
2159
2160         DP(NETIF_MSG_LINK, "enabling BigMAC\n");
2161         /* reset and unreset the BigMac */
2162         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2163                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2164         msleep(5);
2165         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2166                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2167
2168         /* enable access for bmac registers */
2169         NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2170
2171         /* XGXS control */
2172         wb_write[0] = 0x3c;
2173         wb_write[1] = 0;
2174         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2175                     wb_write, 2);
2176
2177         /* tx MAC SA */
2178         wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2179                        (bp->dev->dev_addr[3] << 16) |
2180                        (bp->dev->dev_addr[4] << 8) |
2181                         bp->dev->dev_addr[5]);
2182         wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2183                         bp->dev->dev_addr[1]);
2184         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2185                     wb_write, 2);
2186
2187         /* tx control */
2188         val = 0xc0;
2189         if (bp->flow_ctrl & FLOW_CTRL_TX)
2190                 val |= 0x800000;
2191         wb_write[0] = val;
2192         wb_write[1] = 0;
2193         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2194
2195         /* set tx mtu */
2196         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2197         wb_write[1] = 0;
2198         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2199
2200         /* mac control */
2201         val = 0x3;
2202         if (is_lb) {
2203                 val |= 0x4;
2204                 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2205         }
2206         wb_write[0] = val;
2207         wb_write[1] = 0;
2208         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2209                     wb_write, 2);
2210
2211         /* rx control set to don't strip crc */
2212         val = 0x14;
2213         if (bp->flow_ctrl & FLOW_CTRL_RX)
2214                 val |= 0x20;
2215         wb_write[0] = val;
2216         wb_write[1] = 0;
2217         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2218
2219         /* set rx mtu */
2220         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2221         wb_write[1] = 0;
2222         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2223
2224         /* set cnt max size */
2225         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2226         wb_write[1] = 0;
2227         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2228                     wb_write, 2);
2229
2230         /* configure safc */
2231         wb_write[0] = 0x1000200;
2232         wb_write[1] = 0;
2233         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2234                     wb_write, 2);
2235
2236         /* fix for emulation */
2237         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2238                 wb_write[0] = 0xf000;
2239                 wb_write[1] = 0;
2240                 REG_WR_DMAE(bp,
2241                             bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2242                             wb_write, 2);
2243         }
2244
2245         /* reset old bmac stats */
2246         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2247
2248         NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2249
2250         /* select XGXS */
2251         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2252         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2253
2254         /* disable the NIG in/out to the emac */
2255         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2256         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2257         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2258
2259         /* enable the NIG in/out to the bmac */
2260         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2261
2262         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2263         val = 0;
2264         if (bp->flow_ctrl & FLOW_CTRL_TX)
2265                 val = 1;
2266         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2267         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2268
2269         bp->phy_flags |= PHY_BMAC_FLAG;
2270
2271         bp->stats_state = STATS_STATE_ENABLE;
2272 }
2273
2274 static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2275 {
2276         int port = bp->port;
2277         u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2278                                NIG_REG_INGRESS_BMAC0_MEM;
2279         u32 wb_write[2];
2280
2281         /* Only if the bmac is out of reset */
2282         if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2283                         (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2284                 /* Clear Rx Enable bit in BMAC_CONTROL register */
2285 #ifdef BNX2X_DMAE_RD
2286                 bnx2x_read_dmae(bp, bmac_addr +
2287                                 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2288                 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2289                 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2290 #else
2291                 wb_write[0] = REG_RD(bp,
2292                                 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2293                 wb_write[1] = REG_RD(bp,
2294                                 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2295 #endif
2296                 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2297                 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2298                             wb_write, 2);
2299                 msleep(1);
2300         }
2301 }
2302
2303 static void bnx2x_emac_enable(struct bnx2x *bp)
2304 {
2305         int port = bp->port;
2306         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2307         u32 val;
2308         int timeout;
2309
2310         DP(NETIF_MSG_LINK, "enabling EMAC\n");
2311         /* reset and unreset the emac core */
2312         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2313                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2314         msleep(5);
2315         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2316                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2317
2318         /* enable emac and not bmac */
2319         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2320
2321         /* for paladium */
2322         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2323                 /* Use lane 1 (of lanes 0-3) */
2324                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2325                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2326         }
2327         /* for fpga */
2328         else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2329                 /* Use lane 1 (of lanes 0-3) */
2330                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2331                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2332         }
2333         /* ASIC */
2334         else {
2335                 if (bp->phy_flags & PHY_XGXS_FLAG) {
2336                         DP(NETIF_MSG_LINK, "XGXS\n");
2337                         /* select the master lanes (out of 0-3) */
2338                         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2339                                bp->ser_lane);
2340                         /* select XGXS */
2341                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2342
2343                 } else { /* SerDes */
2344                         DP(NETIF_MSG_LINK, "SerDes\n");
2345                         /* select SerDes */
2346                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2347                 }
2348         }
2349
2350         /* enable emac */
2351         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2352
2353         /* init emac - use read-modify-write */
2354         /* self clear reset */
2355         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2356         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2357
2358         timeout = 200;
2359         while (val & EMAC_MODE_RESET) {
2360                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2361                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2362                 if (!timeout) {
2363                         BNX2X_ERR("EMAC timeout!\n");
2364                         break;
2365                 }
2366                 timeout--;
2367         }
2368
2369         /* reset tx part */
2370         EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2371
2372         timeout = 200;
2373         while (val & EMAC_TX_MODE_RESET) {
2374                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2375                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2376                 if (!timeout) {
2377                         BNX2X_ERR("EMAC timeout!\n");
2378                         break;
2379                 }
2380                 timeout--;
2381         }
2382
2383         if (CHIP_REV_IS_SLOW(bp)) {
2384                 /* config GMII mode */
2385                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2386                 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2387
2388         } else { /* ASIC */
2389                 /* pause enable/disable */
2390                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2391                                EMAC_RX_MODE_FLOW_EN);
2392                 if (bp->flow_ctrl & FLOW_CTRL_RX)
2393                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2394                                       EMAC_RX_MODE_FLOW_EN);
2395
2396                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2397                                EMAC_TX_MODE_EXT_PAUSE_EN);
2398                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2399                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2400                                       EMAC_TX_MODE_EXT_PAUSE_EN);
2401         }
2402
2403         /* KEEP_VLAN_TAG, promiscuous */
2404         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2405         val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2406         EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2407
2408         /* identify magic packets */
2409         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2410         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2411
2412         /* enable emac for jumbo packets */
2413         EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2414                 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2415                  (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2416
2417         /* strip CRC */
2418         NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2419
2420         val = ((bp->dev->dev_addr[0] << 8) |
2421                 bp->dev->dev_addr[1]);
2422         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2423
2424         val = ((bp->dev->dev_addr[2] << 24) |
2425                (bp->dev->dev_addr[3] << 16) |
2426                (bp->dev->dev_addr[4] << 8) |
2427                 bp->dev->dev_addr[5]);
2428         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2429
2430         /* disable the NIG in/out to the bmac */
2431         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2432         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2433         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2434
2435         /* enable the NIG in/out to the emac */
2436         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2437         val = 0;
2438         if (bp->flow_ctrl & FLOW_CTRL_TX)
2439                 val = 1;
2440         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2441         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2442
2443         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2444                 /* take the BigMac out of reset */
2445                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2446                        (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2447
2448                 /* enable access for bmac registers */
2449                 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2450         }
2451
2452         bp->phy_flags |= PHY_EMAC_FLAG;
2453
2454         bp->stats_state = STATS_STATE_ENABLE;
2455 }
2456
2457 static void bnx2x_emac_program(struct bnx2x *bp)
2458 {
2459         u16 mode = 0;
2460         int port = bp->port;
2461
2462         DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2463         bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2464                        (EMAC_MODE_25G_MODE |
2465                         EMAC_MODE_PORT_MII_10M |
2466                         EMAC_MODE_HALF_DUPLEX));
2467         switch (bp->line_speed) {
2468         case SPEED_10:
2469                 mode |= EMAC_MODE_PORT_MII_10M;
2470                 break;
2471
2472         case SPEED_100:
2473                 mode |= EMAC_MODE_PORT_MII;
2474                 break;
2475
2476         case SPEED_1000:
2477                 mode |= EMAC_MODE_PORT_GMII;
2478                 break;
2479
2480         case SPEED_2500:
2481                 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2482                 break;
2483
2484         default:
2485                 /* 10G not valid for EMAC */
2486                 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2487                 break;
2488         }
2489
2490         if (bp->duplex == DUPLEX_HALF)
2491                 mode |= EMAC_MODE_HALF_DUPLEX;
2492         bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2493                       mode);
2494
2495         bnx2x_leds_set(bp, bp->line_speed);
2496 }
2497
2498 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2499 {
2500         u32 lp_up2;
2501         u32 tx_driver;
2502
2503         /* read precomp */
2504         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2505         bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2506
2507         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2508         bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2509
2510         /* bits [10:7] at lp_up2, positioned at [15:12] */
2511         lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2512                    MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2513                   MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2514
2515         if ((lp_up2 != 0) &&
2516             (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2517                 /* replace tx_driver bits [15:12] */
2518                 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2519                 tx_driver |= lp_up2;
2520                 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2521         }
2522 }
2523
2524 static void bnx2x_pbf_update(struct bnx2x *bp)
2525 {
2526         int port = bp->port;
2527         u32 init_crd, crd;
2528         u32 count = 1000;
2529         u32 pause = 0;
2530
2531         /* disable port */
2532         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2533
2534         /* wait for init credit */
2535         init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2536         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2537         DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
2538
2539         while ((init_crd != crd) && count) {
2540                 msleep(5);
2541
2542                 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2543                 count--;
2544         }
2545         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546         if (init_crd != crd)
2547                 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2548
2549         if (bp->flow_ctrl & FLOW_CTRL_RX)
2550                 pause = 1;
2551         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2552         if (pause) {
2553                 /* update threshold */
2554                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2555                 /* update init credit */
2556                 init_crd = 778;         /* (800-18-4) */
2557
2558         } else {
2559                 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2560
2561                 /* update threshold */
2562                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2563                 /* update init credit */
2564                 switch (bp->line_speed) {
2565                 case SPEED_10:
2566                 case SPEED_100:
2567                 case SPEED_1000:
2568                         init_crd = thresh + 55 - 22;
2569                         break;
2570
2571                 case SPEED_2500:
2572                         init_crd = thresh + 138 - 22;
2573                         break;
2574
2575                 case SPEED_10000:
2576                         init_crd = thresh + 553 - 22;
2577                         break;
2578
2579                 default:
2580                         BNX2X_ERR("Invalid line_speed 0x%x\n",
2581                                   bp->line_speed);
2582                         break;
2583                 }
2584         }
2585         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2586         DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2587            bp->line_speed, init_crd);
2588
2589         /* probe the credit changes */
2590         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2591         msleep(5);
2592         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2593
2594         /* enable port */
2595         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2596 }
2597
2598 static void bnx2x_update_mng(struct bnx2x *bp)
2599 {
2600         if (!nomcp)
2601                 SHMEM_WR(bp, port_mb[bp->port].link_status,
2602                          bp->link_status);
2603 }
2604
2605 static void bnx2x_link_report(struct bnx2x *bp)
2606 {
2607         if (bp->link_up) {
2608                 netif_carrier_on(bp->dev);
2609                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2610
2611                 printk("%d Mbps ", bp->line_speed);
2612
2613                 if (bp->duplex == DUPLEX_FULL)
2614                         printk("full duplex");
2615                 else
2616                         printk("half duplex");
2617
2618                 if (bp->flow_ctrl) {
2619                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
2620                                 printk(", receive ");
2621                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2622                                         printk("& transmit ");
2623                         } else {
2624                                 printk(", transmit ");
2625                         }
2626                         printk("flow control ON");
2627                 }
2628                 printk("\n");
2629
2630         } else { /* link_down */
2631                 netif_carrier_off(bp->dev);
2632                 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2633         }
2634 }
2635
2636 static void bnx2x_link_up(struct bnx2x *bp)
2637 {
2638         int port = bp->port;
2639
2640         /* PBF - link up */
2641         bnx2x_pbf_update(bp);
2642
2643         /* disable drain */
2644         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2645
2646         /* update shared memory */
2647         bnx2x_update_mng(bp);
2648
2649         /* indicate link up */
2650         bnx2x_link_report(bp);
2651 }
2652
2653 static void bnx2x_link_down(struct bnx2x *bp)
2654 {
2655         int port = bp->port;
2656
2657         /* notify stats */
2658         if (bp->stats_state != STATS_STATE_DISABLE) {
2659                 bp->stats_state = STATS_STATE_STOP;
2660                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2661         }
2662
2663         /* indicate no mac active */
2664         bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2665
2666         /* update shared memory */
2667         bnx2x_update_mng(bp);
2668
2669         /* activate nig drain */
2670         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2671
2672         /* reset BigMac */
2673         bnx2x_bmac_rx_disable(bp);
2674         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2675                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2676
2677         /* indicate link down */
2678         bnx2x_link_report(bp);
2679 }
2680
2681 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2682
2683 /* This function is called upon link interrupt */
2684 static void bnx2x_link_update(struct bnx2x *bp)
2685 {
2686         int port = bp->port;
2687         int i;
2688         u32 gp_status;
2689         int link_10g;
2690
2691         DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2692            " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2693            " 10G %x, XGXS_LINK %x\n", port,
2694            (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2695            REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2696            REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2697            REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2698            REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2699            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2700            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2701         );
2702
2703         might_sleep();
2704         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2705         /* avoid fast toggling */
2706         for (i = 0; i < 10; i++) {
2707                 msleep(10);
2708                 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2709                                   &gp_status);
2710         }
2711
2712         bnx2x_link_settings_status(bp, gp_status);
2713
2714         /* anything 10 and over uses the bmac */
2715         link_10g = ((bp->line_speed >= SPEED_10000) &&
2716                     (bp->line_speed <= SPEED_16000));
2717
2718         bnx2x_link_int_ack(bp, link_10g);
2719
2720         /* link is up only if both local phy and external phy are up */
2721         bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2722         if (bp->link_up) {
2723                 if (link_10g) {
2724                         bnx2x_bmac_enable(bp, 0);
2725                         bnx2x_leds_set(bp, SPEED_10000);
2726
2727                 } else {
2728                         bnx2x_emac_enable(bp);
2729                         bnx2x_emac_program(bp);
2730
2731                         /* AN complete? */
2732                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2733                                 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2734                                         bnx2x_set_sgmii_tx_driver(bp);
2735                         }
2736                 }
2737                 bnx2x_link_up(bp);
2738
2739         } else { /* link down */
2740                 bnx2x_leds_unset(bp);
2741                 bnx2x_link_down(bp);
2742         }
2743
2744         bnx2x_init_mac_stats(bp);
2745 }
2746
2747 /*
2748  * Init service functions
2749  */
2750
2751 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2752 {
2753         u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2754                                         (bp->phy_addr + bp->ser_lane) : 0;
2755
2756         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2757         bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2758 }
2759
2760 static void bnx2x_set_master_ln(struct bnx2x *bp)
2761 {
2762         u32 new_master_ln;
2763
2764         /* set the master_ln for AN */
2765         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2766         bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2767                           &new_master_ln);
2768         bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2769                            (new_master_ln | bp->ser_lane));
2770 }
2771
2772 static void bnx2x_reset_unicore(struct bnx2x *bp)
2773 {
2774         u32 mii_control;
2775         int i;
2776
2777         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2778         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2779         /* reset the unicore */
2780         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2781                            (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2782
2783         /* wait for the reset to self clear */
2784         for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2785                 udelay(5);
2786
2787                 /* the reset erased the previous bank value */
2788                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2789                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2790                                   &mii_control);
2791
2792                 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2793                         udelay(5);
2794                         return;
2795                 }
2796         }
2797
2798         BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2799                   (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2800                   bp->phy_addr);
2801 }
2802
2803 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2804 {
2805         /* Each two bits represents a lane number:
2806            No swap is 0123 => 0x1b no need to enable the swap */
2807
2808         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2809         if (bp->rx_lane_swap != 0x1b) {
2810                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2811                                    (bp->rx_lane_swap |
2812                                     MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2813                                    MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2814         } else {
2815                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2816         }
2817
2818         if (bp->tx_lane_swap != 0x1b) {
2819                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2820                                    (bp->tx_lane_swap |
2821                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2822         } else {
2823                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2824         }
2825 }
2826
2827 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2828 {
2829         u32 control2;
2830
2831         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2832         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2833                           &control2);
2834
2835         if (bp->autoneg & AUTONEG_PARALLEL) {
2836                 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2837         } else {
2838                 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2839         }
2840         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2841                            control2);
2842
2843         if (bp->phy_flags & PHY_XGXS_FLAG) {
2844                 DP(NETIF_MSG_LINK, "XGXS\n");
2845                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2846
2847                 bnx2x_mdio22_write(bp,
2848                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2849                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2850
2851                 bnx2x_mdio22_read(bp,
2852                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2853                                 &control2);
2854
2855                 if (bp->autoneg & AUTONEG_PARALLEL) {
2856                         control2 |=
2857                     MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2858                 } else {
2859                         control2 &=
2860                    ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2861                 }
2862                 bnx2x_mdio22_write(bp,
2863                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2864                                 control2);
2865
2866                 /* Disable parallel detection of HiG */
2867                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2868                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2869                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2870                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2871         }
2872 }
2873
2874 static void bnx2x_set_autoneg(struct bnx2x *bp)
2875 {
2876         u32 reg_val;
2877
2878         /* CL37 Autoneg */
2879         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2880         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2881         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2882             (bp->autoneg & AUTONEG_CL37)) {
2883                 /* CL37 Autoneg Enabled */
2884                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2885         } else {
2886                 /* CL37 Autoneg Disabled */
2887                 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2888                              MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2889         }
2890         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2891
2892         /* Enable/Disable Autodetection */
2893         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2894         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2895         reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2896
2897         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2898             (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2899                 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2900         } else {
2901                 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2902         }
2903         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2904
2905         /* Enable TetonII and BAM autoneg */
2906         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2907         bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2908                           &reg_val);
2909         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2910             (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2911                 /* Enable BAM aneg Mode and TetonII aneg Mode */
2912                 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2913                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2914         } else {
2915                 /* TetonII and BAM Autoneg Disabled */
2916                 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2917                              MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2918         }
2919         bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2920                            reg_val);
2921
2922         /* Enable Clause 73 Aneg */
2923         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2924             (bp->autoneg & AUTONEG_CL73)) {
2925                 /* Enable BAM Station Manager */
2926                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2927                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2928                                    (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2929                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2930                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2931
2932                 /* Merge CL73 and CL37 aneg resolution */
2933                 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2934                                   &reg_val);
2935                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2936                                    (reg_val |
2937                         MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2938
2939                 /* Set the CL73 AN speed */
2940                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2941                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2942                 /* In the SerDes we support only the 1G.
2943                    In the XGXS we support the 10G KX4
2944                    but we currently do not support the KR */
2945                 if (bp->phy_flags & PHY_XGXS_FLAG) {
2946                         DP(NETIF_MSG_LINK, "XGXS\n");
2947                         /* 10G KX4 */
2948                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2949                 } else {
2950                         DP(NETIF_MSG_LINK, "SerDes\n");
2951                         /* 1000M KX */
2952                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2953                 }
2954                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2955
2956                 /* CL73 Autoneg Enabled */
2957                 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2958         } else {
2959                 /* CL73 Autoneg Disabled */
2960                 reg_val = 0;
2961         }
2962         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2963         bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2964 }
2965
2966 /* program SerDes, forced speed */
2967 static void bnx2x_program_serdes(struct bnx2x *bp)
2968 {
2969         u32 reg_val;
2970
2971         /* program duplex, disable autoneg */
2972         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2973         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2974         reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2975                      MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2976         if (bp->req_duplex == DUPLEX_FULL)
2977                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2978         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2979
2980         /* program speed
2981            - needed only if the speed is greater than 1G (2.5G or 10G) */
2982         if (bp->req_line_speed > SPEED_1000) {
2983                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2984                 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2985                 /* clearing the speed value before setting the right speed */
2986                 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2987                 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2988                             MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2989                 if (bp->req_line_speed == SPEED_10000)
2990                         reg_val |=
2991                                 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2992                 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2993         }
2994 }
2995
2996 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2997 {
2998         u32 val = 0;
2999
3000         /* configure the 48 bits for BAM AN */
3001         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3002
3003         /* set extended capabilities */
3004         if (bp->advertising & ADVERTISED_2500baseX_Full)
3005                 val |= MDIO_OVER_1G_UP1_2_5G;
3006         if (bp->advertising & ADVERTISED_10000baseT_Full)
3007                 val |= MDIO_OVER_1G_UP1_10G;
3008         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3009
3010         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3011 }
3012
3013 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3014 {
3015         u32 an_adv;
3016
3017         /* for AN, we are always publishing full duplex */
3018         an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3019
3020         /* resolve pause mode and advertisement
3021          * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3022         if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3023                 switch (bp->req_flow_ctrl) {
3024                 case FLOW_CTRL_AUTO:
3025                         if (bp->dev->mtu <= 4500) {
3026                                 an_adv |=
3027                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3028                                 bp->advertising |= (ADVERTISED_Pause |
3029                                                     ADVERTISED_Asym_Pause);
3030                         } else {
3031                                 an_adv |=
3032                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3033                                 bp->advertising |= ADVERTISED_Asym_Pause;
3034                         }
3035                         break;
3036
3037                 case FLOW_CTRL_TX:
3038                         an_adv |=
3039                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3040                         bp->advertising |= ADVERTISED_Asym_Pause;
3041                         break;
3042
3043                 case FLOW_CTRL_RX:
3044                         if (bp->dev->mtu <= 4500) {
3045                                 an_adv |=
3046                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3047                                 bp->advertising |= (ADVERTISED_Pause |
3048                                                     ADVERTISED_Asym_Pause);
3049                         } else {
3050                                 an_adv |=
3051                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3052                                 bp->advertising &= ~(ADVERTISED_Pause |
3053                                                      ADVERTISED_Asym_Pause);
3054                         }
3055                         break;
3056
3057                 case FLOW_CTRL_BOTH:
3058                         if (bp->dev->mtu <= 4500) {
3059                                 an_adv |=
3060                                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3061                                 bp->advertising |= (ADVERTISED_Pause |
3062                                                     ADVERTISED_Asym_Pause);
3063                         } else {
3064                                 an_adv |=
3065                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3066                                 bp->advertising |= ADVERTISED_Asym_Pause;
3067                         }
3068                         break;
3069
3070                 case FLOW_CTRL_NONE:
3071                 default:
3072                         an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3073                         bp->advertising &= ~(ADVERTISED_Pause |
3074                                              ADVERTISED_Asym_Pause);
3075                         break;
3076                 }
3077         } else { /* forced mode */
3078                 switch (bp->req_flow_ctrl) {
3079                 case FLOW_CTRL_AUTO:
3080                         DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3081                                            " req_autoneg 0x%x\n",
3082                            bp->req_flow_ctrl, bp->req_autoneg);
3083                         break;
3084
3085                 case FLOW_CTRL_TX:
3086                         an_adv |=
3087                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3088                         bp->advertising |= ADVERTISED_Asym_Pause;
3089                         break;
3090
3091                 case FLOW_CTRL_RX:
3092                 case FLOW_CTRL_BOTH:
3093                         an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3094                         bp->advertising |= (ADVERTISED_Pause |
3095                                             ADVERTISED_Asym_Pause);
3096                         break;
3097
3098                 case FLOW_CTRL_NONE:
3099                 default:
3100                         an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3101                         bp->advertising &= ~(ADVERTISED_Pause |
3102                                              ADVERTISED_Asym_Pause);
3103                         break;
3104                 }
3105         }
3106
3107         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3108         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3109 }
3110
3111 static void bnx2x_restart_autoneg(struct bnx2x *bp)
3112 {
3113         if (bp->autoneg & AUTONEG_CL73) {
3114                 /* enable and restart clause 73 aneg */
3115                 u32 an_ctrl;
3116
3117                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3118                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3119                                   &an_ctrl);
3120                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3121                                    (an_ctrl |
3122                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3123                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3124
3125         } else {
3126                 /* Enable and restart BAM/CL37 aneg */
3127                 u32 mii_control;
3128
3129                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3130                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3131                                   &mii_control);
3132                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3133                                    (mii_control |
3134                                     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3135                                     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3136         }
3137 }
3138
3139 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3140 {
3141         u32 control1;
3142
3143         /* in SGMII mode, the unicore is always slave */
3144         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3145         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3146                           &control1);
3147         control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3148         /* set sgmii mode (and not fiber) */
3149         control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3150                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3151                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3152         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3153                            control1);
3154
3155         /* if forced speed */
3156         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3157                 /* set speed, disable autoneg */
3158                 u32 mii_control;
3159
3160                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3161                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3162                                   &mii_control);
3163                 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3164                                MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3165                                  MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3166
3167                 switch (bp->req_line_speed) {
3168                 case SPEED_100:
3169                         mii_control |=
3170                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3171                         break;
3172                 case SPEED_1000:
3173                         mii_control |=
3174                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3175                         break;
3176                 case SPEED_10:
3177                         /* there is nothing to set for 10M */
3178                         break;
3179                 default:
3180                         /* invalid speed for SGMII */
3181                         DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3182                            bp->req_line_speed);
3183                         break;
3184                 }
3185
3186                 /* setting the full duplex */
3187                 if (bp->req_duplex == DUPLEX_FULL)
3188                         mii_control |=
3189                                 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3190                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3191                                    mii_control);
3192
3193         } else { /* AN mode */
3194                 /* enable and restart AN */
3195                 bnx2x_restart_autoneg(bp);
3196         }
3197 }
3198
3199 static void bnx2x_link_int_enable(struct bnx2x *bp)
3200 {
3201         int port = bp->port;
3202         u32 ext_phy_type;
3203         u32 mask;
3204
3205         /* setting the status to report on link up
3206            for either XGXS or SerDes */
3207         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3208                        (NIG_STATUS_XGXS0_LINK10G |
3209                         NIG_STATUS_XGXS0_LINK_STATUS |
3210                         NIG_STATUS_SERDES0_LINK_STATUS));
3211
3212         if (bp->phy_flags & PHY_XGXS_FLAG) {
3213                 mask = (NIG_MASK_XGXS0_LINK10G |
3214                         NIG_MASK_XGXS0_LINK_STATUS);
3215                 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3216                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3217                 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3218                     (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3219                     (ext_phy_type !=
3220                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3221                         mask |= NIG_MASK_MI_INT;
3222                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
3223                 }
3224
3225         } else { /* SerDes */
3226                 mask = NIG_MASK_SERDES0_LINK_STATUS;
3227                 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3228                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3229                 if ((ext_phy_type !=
3230                                 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3231                     (ext_phy_type !=
3232                                 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3233                         mask |= NIG_MASK_MI_INT;
3234                         DP(NETIF_MSG_LINK, "enabled external phy int\n");
3235                 }
3236         }
3237         bnx2x_bits_en(bp,
3238                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3239                       mask);
3240         DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3241            " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3242            " 10G %x, XGXS_LINK %x\n", port,
3243            (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3244            REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3245            REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3246            REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3247            REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3248            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3249            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3250         );
3251 }
3252
3253 static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3254 {
3255         u32 ext_phy_addr = ((bp->ext_phy_config &
3256                              PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3257                             PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3258         u32 fw_ver1, fw_ver2;
3259
3260         /* Need to wait 200ms after reset */
3261         msleep(200);
3262         /* Boot port from external ROM
3263          * Set ser_boot_ctl bit in the MISC_CTRL1 register
3264          */
3265         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3266                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3267                                 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3268
3269         /* Reset internal microprocessor */
3270         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3271                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3272                                 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3273         /* set micro reset = 0 */
3274         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3275                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3276                                 EXT_PHY_KR_ROM_MICRO_RESET);
3277         /* Reset internal microprocessor */
3278         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280                                 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281         /* wait for 100ms for code download via SPI port */
3282         msleep(100);
3283
3284         /* Clear ser_boot_ctl bit */
3285         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3286                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3287                                 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3288         /* Wait 100ms */
3289         msleep(100);
3290
3291         /* Print the PHY FW version */
3292         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3293                                EXT_PHY_KR_PMA_PMD_DEVAD,
3294                                0xca19, &fw_ver1);
3295         bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3296                                EXT_PHY_KR_PMA_PMD_DEVAD,
3297                                0xca1a, &fw_ver2);
3298         DP(NETIF_MSG_LINK,
3299            "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3300 }
3301
3302 static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3303 {
3304         u32 ext_phy_addr = ((bp->ext_phy_config &
3305                              PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3306                             PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3307
3308         /* Force KR or KX */
3309         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3310                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3311                                 0x2040);
3312         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3313                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3314                                 0x000b);
3315         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3316                                 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3317                                 0x0000);
3318         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3319                                 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3320                                 0x0000);
3321 }
3322
3323 static void bnx2x_ext_phy_init(struct bnx2x *bp)
3324 {
3325         u32 ext_phy_type;
3326         u32 ext_phy_addr;
3327         u32 cnt;
3328         u32 ctrl;
3329         u32 val = 0;
3330
3331         if (bp->phy_flags & PHY_XGXS_FLAG) {
3332                 ext_phy_addr = ((bp->ext_phy_config &
3333                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3334                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3335
3336                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3337                 /* Make sure that the soft reset is off (expect for the 8072:
3338                  * due to the lock, it will be done inside the specific
3339                  * handling)
3340                  */
3341                 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3342                     (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3343                    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3344                     (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3345                         /* Wait for soft reset to get cleared upto 1 sec */
3346                         for (cnt = 0; cnt < 1000; cnt++) {
3347                                 bnx2x_mdio45_read(bp, ext_phy_addr,
3348                                                   EXT_PHY_OPT_PMA_PMD_DEVAD,
3349                                                   EXT_PHY_OPT_CNTL, &ctrl);
3350                                 if (!(ctrl & (1<<15)))
3351                                         break;
3352                                 msleep(1);
3353                         }
3354                         DP(NETIF_MSG_LINK,
3355                            "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3356                 }
3357
3358                 switch (ext_phy_type) {
3359                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3360                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
3361                         break;
3362
3363                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3364                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
3365
3366                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3367                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3368                                             EXT_PHY_OPT_PMD_MISC_CNTL,
3369                                             0x8288);
3370                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3371                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3372                                             EXT_PHY_OPT_PHY_IDENTIFIER,
3373                                             0x7fbf);
3374                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3375                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3376                                             EXT_PHY_OPT_CMU_PLL_BYPASS,
3377                                             0x0100);
3378                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379                                             EXT_PHY_OPT_WIS_DEVAD,
3380                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
3381                         break;
3382
3383                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3384                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
3385
3386                         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3387                                 /* Force speed */
3388                                 if (bp->req_line_speed == SPEED_10000) {
3389                                         DP(NETIF_MSG_LINK,
3390                                            "XGXS 8706 force 10Gbps\n");
3391                                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3392                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3393                                                 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3394                                                 0x400);
3395                                 } else {
3396                                         /* Force 1Gbps */
3397                                         DP(NETIF_MSG_LINK,
3398                                            "XGXS 8706 force 1Gbps\n");
3399
3400                                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3401                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3402                                                 EXT_PHY_OPT_CNTL,
3403                                                 0x0040);
3404
3405                                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3406                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3407                                                 EXT_PHY_OPT_CNTL2,
3408                                                 0x000D);
3409                                 }
3410
3411                                 /* Enable LASI */
3412                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3413                                                     EXT_PHY_OPT_PMA_PMD_DEVAD,
3414                                                     EXT_PHY_OPT_LASI_CNTL,
3415                                                     0x1);
3416                         } else {
3417                                 /* AUTONEG */
3418                                 /* Allow CL37 through CL73 */
3419                                 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3420                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421                                                     EXT_PHY_AUTO_NEG_DEVAD,
3422                                                     EXT_PHY_OPT_AN_CL37_CL73,
3423                                                     0x040c);
3424
3425                                 /* Enable Full-Duplex advertisment on CL37 */
3426                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3427                                                     EXT_PHY_AUTO_NEG_DEVAD,
3428                                                     EXT_PHY_OPT_AN_CL37_FD,
3429                                                     0x0020);
3430                                 /* Enable CL37 AN */
3431                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3432                                                     EXT_PHY_AUTO_NEG_DEVAD,
3433                                                     EXT_PHY_OPT_AN_CL37_AN,
3434                                                     0x1000);
3435                                 /* Advertise 10G/1G support */
3436                                 if (bp->advertising &
3437                                     ADVERTISED_1000baseT_Full)
3438                                         val = (1<<5);
3439                                 if (bp->advertising &
3440                                     ADVERTISED_10000baseT_Full)
3441                                         val |= (1<<7);
3442
3443                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3444                                                     EXT_PHY_AUTO_NEG_DEVAD,
3445                                                     EXT_PHY_OPT_AN_ADV, val);
3446                                 /* Enable LASI */
3447                                 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3448                                                     EXT_PHY_OPT_PMA_PMD_DEVAD,
3449                                                     EXT_PHY_OPT_LASI_CNTL,
3450                                                     0x1);
3451
3452                                 /* Enable clause 73 AN */
3453                                 bnx2x_mdio45_write(bp, ext_phy_addr,
3454                                                    EXT_PHY_AUTO_NEG_DEVAD,
3455                                                    EXT_PHY_OPT_CNTL,
3456                                                    0x1200);
3457                         }
3458                         break;
3459
3460                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3461                         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3462                         /* Wait for soft reset to get cleared upto 1 sec */
3463                         for (cnt = 0; cnt < 1000; cnt++) {
3464                                 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3465                                                 ext_phy_addr,
3466                                                 EXT_PHY_OPT_PMA_PMD_DEVAD,
3467                                                 EXT_PHY_OPT_CNTL, &ctrl);
3468                                 if (!(ctrl & (1<<15)))
3469                                         break;
3470                                 msleep(1);
3471                         }
3472                         DP(NETIF_MSG_LINK,
3473                            "8072 control reg 0x%x (after %d ms)\n",
3474                            ctrl, cnt);
3475
3476                         bnx2x_bcm8072_external_rom_boot(bp);
3477                         DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3478
3479                         /* enable LASI */
3480                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3481                                                 ext_phy_addr,
3482                                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3483                                                 0x9000, 0x0400);
3484                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3485                                                 ext_phy_addr,
3486                                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3487                                                 EXT_PHY_KR_LASI_CNTL, 0x0004);
3488
3489                         /* If this is forced speed, set to KR or KX
3490                          * (all other are not supported)
3491                          */
3492                         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3493                                 if (bp->req_line_speed == SPEED_10000) {
3494                                         bnx2x_bcm8072_force_10G(bp);
3495                                         DP(NETIF_MSG_LINK,
3496                                            "Forced speed 10G on 8072\n");
3497                                         /* unlock */
3498                                         bnx2x_hw_unlock(bp,
3499                                                 HW_LOCK_RESOURCE_8072_MDIO);
3500                                         break;
3501                                 } else
3502                                         val = (1<<5);
3503                         } else {
3504
3505                                 /* Advertise 10G/1G support */
3506                                 if (bp->advertising &
3507                                                 ADVERTISED_1000baseT_Full)
3508                                         val = (1<<5);
3509                                 if (bp->advertising &
3510                                                 ADVERTISED_10000baseT_Full)
3511                                         val |= (1<<7);
3512                         }
3513                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3514                                         ext_phy_addr,
3515                                         EXT_PHY_KR_AUTO_NEG_DEVAD,
3516                                         0x11, val);
3517                         /* Add support for CL37 ( passive mode ) I */
3518                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3519                                                 ext_phy_addr,
3520                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
3521                                                 0x8370, 0x040c);
3522                         /* Add support for CL37 ( passive mode ) II */
3523                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3524                                                 ext_phy_addr,
3525                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
3526                                                 0xffe4, 0x20);
3527                         /* Add support for CL37 ( passive mode ) III */
3528                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3529                                                 ext_phy_addr,
3530                                                 EXT_PHY_KR_AUTO_NEG_DEVAD,
3531                                                 0xffe0, 0x1000);
3532                         /* Restart autoneg */
3533                         msleep(500);
3534                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3535                                         ext_phy_addr,
3536                                         EXT_PHY_KR_AUTO_NEG_DEVAD,
3537                                         EXT_PHY_KR_CTRL, 0x1200);
3538                         DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3539                            "1G %ssupported  10G %ssupported\n",
3540                            (val & (1<<5)) ? "" : "not ",
3541                            (val & (1<<7)) ? "" : "not ");
3542
3543                         /* unlock */
3544                         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3545                         break;
3546
3547                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3548                         DP(NETIF_MSG_LINK,
3549                            "Setting the SFX7101 LASI indication\n");
3550                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3551                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3552                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
3553                         DP(NETIF_MSG_LINK,
3554                            "Setting the SFX7101 LED to blink on traffic\n");
3555                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3556                                             EXT_PHY_OPT_PMA_PMD_DEVAD,
3557                                             0xC007, (1<<3));
3558
3559                         /* read modify write pause advertizing */
3560                         bnx2x_mdio45_read(bp, ext_phy_addr,
3561                                           EXT_PHY_KR_AUTO_NEG_DEVAD,
3562                                           EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3563                         val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3564                         /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3565                         if (bp->advertising & ADVERTISED_Pause)
3566                                 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3567
3568                         if (bp->advertising & ADVERTISED_Asym_Pause) {
3569                                 val |=
3570                                  EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3571                         }
3572                         DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3573                         bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3574                                             EXT_PHY_KR_AUTO_NEG_DEVAD,
3575                                             EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3576                         /* Restart autoneg */
3577                         bnx2x_mdio45_read(bp, ext_phy_addr,
3578                                           EXT_PHY_KR_AUTO_NEG_DEVAD,
3579                                           EXT_PHY_KR_CTRL, &val);
3580                         val |= 0x200;
3581                         bnx2x_mdio45_write(bp, ext_phy_addr,
3582                                             EXT_PHY_KR_AUTO_NEG_DEVAD,
3583                                             EXT_PHY_KR_CTRL, val);
3584                         break;
3585
3586                 default:
3587                         BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3588                                   bp->ext_phy_config);
3589                         break;
3590                 }
3591
3592         } else { /* SerDes */
3593 /*              ext_phy_addr = ((bp->ext_phy_config &
3594                                  PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3595                                 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3596 */
3597                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3598                 switch (ext_phy_type) {
3599                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3600                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
3601                         break;
3602
3603                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3604                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
3605                         break;
3606
3607                 default:
3608                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3609                            bp->ext_phy_config);
3610                         break;
3611                 }
3612         }
3613 }
3614
3615 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3616 {
3617         u32 ext_phy_type;
3618         u32 ext_phy_addr = ((bp->ext_phy_config &
3619                              PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3620                             PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3621         u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3622
3623         /* The PHY reset is controled by GPIO 1
3624          * Give it 1ms of reset pulse
3625          */
3626         if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3627             (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3628                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3629                                MISC_REGISTERS_GPIO_OUTPUT_LOW);
3630                 msleep(1);
3631                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3632                                MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3633         }
3634
3635         if (bp->phy_flags & PHY_XGXS_FLAG) {
3636                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3637                 switch (ext_phy_type) {
3638                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3639                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
3640                         break;
3641
3642                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3643                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3644                         DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3645                         bnx2x_mdio45_write(bp, ext_phy_addr,
3646                                            EXT_PHY_OPT_PMA_PMD_DEVAD,
3647                                            EXT_PHY_OPT_CNTL, 0xa040);
3648                         break;
3649
3650                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3651                         DP(NETIF_MSG_LINK, "XGXS 8072\n");
3652                         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3653                         bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3654                                                 ext_phy_addr,
3655                                                 EXT_PHY_KR_PMA_PMD_DEVAD,
3656                                                 0, 1<<15);
3657                         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3658                         break;
3659
3660                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3661                         DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
3662                         break;
3663
3664                 default:
3665                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3666                            bp->ext_phy_config);
3667                         break;
3668                 }
3669
3670         } else { /* SerDes */
3671                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3672                 switch (ext_phy_type) {
3673                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3674                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
3675                         break;
3676
3677                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3678                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
3679                         break;
3680
3681                 default:
3682                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3683                            bp->ext_phy_config);
3684                         break;
3685                 }
3686         }
3687 }
3688
3689 static void bnx2x_link_initialize(struct bnx2x *bp)
3690 {
3691         int port = bp->port;
3692
3693         /* disable attentions */
3694         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3695                        (NIG_MASK_XGXS0_LINK_STATUS |
3696                         NIG_MASK_XGXS0_LINK10G |
3697                         NIG_MASK_SERDES0_LINK_STATUS |
3698                         NIG_MASK_MI_INT));
3699
3700         /* Activate the external PHY */
3701         bnx2x_ext_phy_reset(bp);
3702
3703         bnx2x_set_aer_mmd(bp);
3704
3705         if (bp->phy_flags & PHY_XGXS_FLAG)
3706                 bnx2x_set_master_ln(bp);
3707
3708         /* reset the SerDes and wait for reset bit return low */
3709         bnx2x_reset_unicore(bp);
3710
3711         bnx2x_set_aer_mmd(bp);
3712
3713         /* setting the masterLn_def again after the reset */
3714         if (bp->phy_flags & PHY_XGXS_FLAG) {
3715                 bnx2x_set_master_ln(bp);
3716                 bnx2x_set_swap_lanes(bp);
3717         }
3718
3719         /* Set Parallel Detect */
3720         if (bp->req_autoneg & AUTONEG_SPEED)
3721                 bnx2x_set_parallel_detection(bp);
3722
3723         if (bp->phy_flags & PHY_XGXS_FLAG) {
3724                 if (bp->req_line_speed &&
3725                     bp->req_line_speed < SPEED_1000) {
3726                         bp->phy_flags |= PHY_SGMII_FLAG;
3727                 } else {
3728                         bp->phy_flags &= ~PHY_SGMII_FLAG;
3729                 }
3730         }
3731
3732         if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3733                 u16 bank, rx_eq;
3734
3735                 rx_eq = ((bp->serdes_config &
3736                           PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3737                          PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3738
3739                 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3740                 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3741                             bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3742                         MDIO_SET_REG_BANK(bp, bank);
3743                         bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3744                                            ((rx_eq &
3745                                 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3746                                 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3747                 }
3748
3749                 /* forced speed requested? */
3750                 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3751                         DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3752
3753                         /* disable autoneg */
3754                         bnx2x_set_autoneg(bp);
3755
3756                         /* program speed and duplex */
3757                         bnx2x_program_serdes(bp);
3758
3759                 } else { /* AN_mode */
3760                         DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3761
3762                         /* AN enabled */
3763                         bnx2x_set_brcm_cl37_advertisment(bp);
3764
3765                         /* program duplex & pause advertisement (for aneg) */
3766                         bnx2x_set_ieee_aneg_advertisment(bp);
3767
3768                         /* enable autoneg */
3769                         bnx2x_set_autoneg(bp);
3770
3771                         /* enable and restart AN */
3772                         bnx2x_restart_autoneg(bp);
3773                 }
3774
3775         } else { /* SGMII mode */
3776                 DP(NETIF_MSG_LINK, "SGMII\n");
3777
3778                 bnx2x_initialize_sgmii_process(bp);
3779         }
3780
3781         /* init ext phy and enable link state int */
3782         bnx2x_ext_phy_init(bp);
3783
3784         /* enable the interrupt */
3785         bnx2x_link_int_enable(bp);
3786 }
3787
3788 static void bnx2x_phy_deassert(struct bnx2x *bp)
3789 {
3790         int port = bp->port;
3791         u32 val;
3792
3793         if (bp->phy_flags & PHY_XGXS_FLAG) {
3794                 DP(NETIF_MSG_LINK, "XGXS\n");
3795                 val = XGXS_RESET_BITS;
3796
3797         } else { /* SerDes */
3798                 DP(NETIF_MSG_LINK, "SerDes\n");
3799                 val = SERDES_RESET_BITS;
3800         }
3801
3802         val = val << (port*16);
3803
3804         /* reset and unreset the SerDes/XGXS */
3805         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3806         msleep(5);
3807         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3808 }
3809
3810 static int bnx2x_phy_init(struct bnx2x *bp)
3811 {
3812         DP(NETIF_MSG_LINK, "started\n");
3813         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3814                 bp->phy_flags |= PHY_EMAC_FLAG;
3815                 bp->link_up = 1;
3816                 bp->line_speed = SPEED_10000;
3817                 bp->duplex = DUPLEX_FULL;
3818                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3819                 bnx2x_emac_enable(bp);
3820                 bnx2x_link_report(bp);
3821                 return 0;
3822
3823         } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3824                 bp->phy_flags |= PHY_BMAC_FLAG;
3825                 bp->link_up = 1;
3826                 bp->line_speed = SPEED_10000;
3827                 bp->duplex = DUPLEX_FULL;
3828                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3829                 bnx2x_bmac_enable(bp, 0);
3830                 bnx2x_link_report(bp);
3831                 return 0;
3832
3833         } else {
3834                 bnx2x_phy_deassert(bp);
3835                 bnx2x_link_initialize(bp);
3836         }
3837
3838         return 0;
3839 }
3840
3841 static void bnx2x_link_reset(struct bnx2x *bp)
3842 {
3843         int port = bp->port;
3844         u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3845
3846         /* update shared memory */
3847         bp->link_status = 0;
3848         bnx2x_update_mng(bp);
3849
3850         /* disable attentions */
3851         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3852                        (NIG_MASK_XGXS0_LINK_STATUS |
3853                         NIG_MASK_XGXS0_LINK10G |
3854                         NIG_MASK_SERDES0_LINK_STATUS |
3855                         NIG_MASK_MI_INT));
3856
3857         /* activate nig drain */
3858         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3859
3860         /* disable nig egress interface */
3861         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3862         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3863
3864         /* Stop BigMac rx */
3865         bnx2x_bmac_rx_disable(bp);
3866
3867         /* disable emac */
3868         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3869
3870         msleep(10);
3871
3872         /* The PHY reset is controled by GPIO 1
3873          * Hold it as output low
3874          */
3875         if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3876             (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3877                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3878                                MISC_REGISTERS_GPIO_OUTPUT_LOW);
3879                 DP(NETIF_MSG_LINK, "reset external PHY\n");
3880         }
3881
3882         /* reset the SerDes/XGXS */
3883         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3884                (0x1ff << (port*16)));
3885
3886         /* reset BigMac */
3887         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3888                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3889
3890         /* disable nig ingress interface */
3891         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3892         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3893
3894         /* set link down */
3895         bp->link_up = 0;
3896 }
3897
3898 #ifdef BNX2X_XGXS_LB
3899 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3900 {
3901         int port = bp->port;
3902
3903         if (is_10g) {
3904                 u32 md_devad;
3905
3906                 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3907
3908                 /* change the uni_phy_addr in the nig */
3909                 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3910                        &md_devad);
3911                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3912
3913                 /* change the aer mmd */
3914                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3915                 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3916
3917                 /* config combo IEEE0 control reg for loopback */
3918                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3919                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3920                                    0x6041);
3921
3922                 /* set aer mmd back */
3923                 bnx2x_set_aer_mmd(bp);
3924
3925                 /* and md_devad */
3926                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3927
3928         } else {
3929                 u32 mii_control;
3930
3931                 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3932
3933                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3934                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3935                                   &mii_control);
3936                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3937                                    (mii_control |
3938                                     MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3939         }
3940 }
3941 #endif
3942
3943 /* end of PHY/MAC */
3944
3945 /* slow path */
3946
3947 /*
3948  * General service functions
3949  */
3950
3951 /* the slow path queue is odd since completions arrive on the fastpath ring */
3952 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3953                          u32 data_hi, u32 data_lo, int common)
3954 {
3955         int port = bp->port;
3956
3957         DP(NETIF_MSG_TIMER,
3958            "spe (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
3959            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3960            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3961            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3962
3963 #ifdef BNX2X_STOP_ON_ERROR
3964         if (unlikely(bp->panic))
3965                 return -EIO;
3966 #endif
3967
3968         spin_lock(&bp->spq_lock);
3969
3970         if (!bp->spq_left) {
3971                 BNX2X_ERR("BUG! SPQ ring full!\n");
3972                 spin_unlock(&bp->spq_lock);
3973                 bnx2x_panic();
3974                 return -EBUSY;
3975         }
3976
3977         /* CID needs port number to be encoded int it */
3978         bp->spq_prod_bd->hdr.conn_and_cmd_data =
3979                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3980                                      HW_CID(bp, cid)));
3981         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3982         if (common)
3983                 bp->spq_prod_bd->hdr.type |=
3984                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3985
3986         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3987         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3988
3989         bp->spq_left--;
3990
3991         if (bp->spq_prod_bd == bp->spq_last_bd) {
3992                 bp->spq_prod_bd = bp->spq;
3993                 bp->spq_prod_idx = 0;
3994                 DP(NETIF_MSG_TIMER, "end of spq\n");
3995
3996         } else {
3997                 bp->spq_prod_bd++;
3998                 bp->spq_prod_idx++;
3999         }
4000
4001         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4002                bp->spq_prod_idx);
4003
4004         spin_unlock(&bp->spq_lock);
4005         return 0;
4006 }
4007
4008 /* acquire split MCP access lock register */
4009 static int bnx2x_lock_alr(struct bnx2x *bp)
4010 {
4011         int rc = 0;
4012         u32 i, j, val;
4013
4014         might_sleep();
4015         i = 100;
4016         for (j = 0; j < i*10; j++) {
4017                 val = (1UL << 31);
4018                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4019                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4020                 if (val & (1L << 31))
4021                         break;
4022
4023                 msleep(5);
4024         }
4025
4026         if (!(val & (1L << 31))) {
4027                 BNX2X_ERR("Cannot acquire nvram interface\n");
4028
4029                 rc = -EBUSY;
4030         }
4031
4032         return rc;
4033 }
4034
4035 /* Release split MCP access lock register */
4036 static void bnx2x_unlock_alr(struct bnx2x *bp)
4037 {
4038         u32 val = 0;
4039
4040         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4041 }
4042
4043 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4044 {
4045         struct host_def_status_block *def_sb = bp->def_status_blk;
4046         u16 rc = 0;
4047
4048         barrier(); /* status block is written to by the chip */
4049
4050         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4051                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4052                 rc |= 1;
4053         }
4054         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4055                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4056                 rc |= 2;
4057         }
4058         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4059                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4060                 rc |= 4;
4061         }
4062         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4063                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4064                 rc |= 8;
4065         }
4066         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4067                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4068                 rc |= 16;
4069         }
4070         return rc;
4071 }
4072
4073 /*
4074  * slow path service functions
4075  */
4076
4077 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4078 {
4079         int port = bp->port;
4080         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4081         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4082                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4083         u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4084                                    NIG_REG_MASK_INTERRUPT_PORT0;
4085
4086         if (~bp->aeu_mask & (asserted & 0xff))
4087                 BNX2X_ERR("IGU ERROR\n");
4088         if (bp->attn_state & asserted)
4089                 BNX2X_ERR("IGU ERROR\n");
4090
4091         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
4092            bp->aeu_mask, asserted);
4093         bp->aeu_mask &= ~(asserted & 0xff);
4094         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4095
4096         REG_WR(bp, aeu_addr, bp->aeu_mask);
4097
4098         bp->attn_state |= asserted;
4099
4100         if (asserted & ATTN_HARD_WIRED_MASK) {
4101                 if (asserted & ATTN_NIG_FOR_FUNC) {
4102                         u32 nig_status_port;
4103                         u32 nig_int_addr = port ?
4104                                         NIG_REG_STATUS_INTERRUPT_PORT1 :
4105                                         NIG_REG_STATUS_INTERRUPT_PORT0;
4106
4107                         bp->nig_mask = REG_RD(bp, nig_mask_addr);
4108                         REG_WR(bp, nig_mask_addr, 0);
4109
4110                         nig_status_port = REG_RD(bp, nig_int_addr);
4111                         bnx2x_link_update(bp);
4112
4113                         /* handle unicore attn? */
4114                 }
4115                 if (asserted & ATTN_SW_TIMER_4_FUNC)
4116                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4117
4118                 if (asserted & GPIO_2_FUNC)
4119                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4120
4121                 if (asserted & GPIO_3_FUNC)
4122                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4123
4124                 if (asserted & GPIO_4_FUNC)
4125                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4126
4127                 if (port == 0) {
4128                         if (asserted & ATTN_GENERAL_ATTN_1) {
4129                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4130                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4131                         }
4132                         if (asserted & ATTN_GENERAL_ATTN_2) {
4133                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4134                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4135                         }
4136                         if (asserted & ATTN_GENERAL_ATTN_3) {
4137                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4138                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4139                         }
4140                 } else {
4141                         if (asserted & ATTN_GENERAL_ATTN_4) {
4142                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4143                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4144                         }
4145                         if (asserted & ATTN_GENERAL_ATTN_5) {
4146                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4147                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4148                         }
4149                         if (asserted & ATTN_GENERAL_ATTN_6) {
4150                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4151                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4152                         }
4153                 }
4154
4155         } /* if hardwired */
4156
4157         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4158            asserted, BAR_IGU_INTMEM + igu_addr);
4159         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4160
4161         /* now set back the mask */
4162         if (asserted & ATTN_NIG_FOR_FUNC)
4163                 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4164 }
4165
4166 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4167 {
4168         int port = bp->port;
4169         int index;
4170         struct attn_route attn;
4171         struct attn_route group_mask;
4172         u32 reg_addr;
4173         u32 val;
4174
4175         /* need to take HW lock because MCP or other port might also
4176            try to handle this event */
4177         bnx2x_lock_alr(bp);
4178
4179         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4180         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4181         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4182         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4183         DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4184
4185         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4186                 if (deasserted & (1 << index)) {
4187                         group_mask = bp->attn_group[index];
4188
4189                         DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4190                            (unsigned long long)group_mask.sig[0]);
4191
4192                         if (attn.sig[3] & group_mask.sig[3] &
4193                             EVEREST_GEN_ATTN_IN_USE_MASK) {
4194
4195                                 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4196
4197                                         BNX2X_ERR("MC assert!\n");
4198                                         bnx2x_panic();
4199
4200                                 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4201
4202                                         BNX2X_ERR("MCP assert!\n");
4203                                         REG_WR(bp,
4204                                              MISC_REG_AEU_GENERAL_ATTN_11, 0);
4205                                         bnx2x_mc_assert(bp);
4206
4207                                 } else {
4208                                         BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4209                                 }
4210                         }
4211
4212                         if (attn.sig[1] & group_mask.sig[1] &
4213                             BNX2X_DOORQ_ASSERT) {
4214
4215                                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4216                                 BNX2X_ERR("DB hw attention 0x%x\n", val);
4217                                 /* DORQ discard attention */
4218                                 if (val & 0x2)
4219                                         BNX2X_ERR("FATAL error from DORQ\n");
4220                         }
4221
4222                         if (attn.sig[2] & group_mask.sig[2] &
4223                             AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4224
4225                                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4226                                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4227                                 /* CFC error attention */
4228                                 if (val & 0x2)
4229                                         BNX2X_ERR("FATAL error from CFC\n");
4230                         }
4231
4232                         if (attn.sig[2] & group_mask.sig[2] &
4233                             AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4234
4235                                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4236                                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4237                                 /* RQ_USDMDP_FIFO_OVERFLOW */
4238                                 if (val & 0x18000)
4239                                         BNX2X_ERR("FATAL error from PXP\n");
4240                         }
4241
4242                         if (attn.sig[3] & group_mask.sig[3] &
4243                             EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4244
4245                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4246                                        0x7ff);
4247                                 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4248                                    attn.sig[3]);
4249                         }
4250
4251                         if ((attn.sig[0] & group_mask.sig[0] &
4252                                                 HW_INTERRUT_ASSERT_SET_0) ||
4253                             (attn.sig[1] & group_mask.sig[1] &
4254                                                 HW_INTERRUT_ASSERT_SET_1) ||
4255                             (attn.sig[2] & group_mask.sig[2] &
4256                                                 HW_INTERRUT_ASSERT_SET_2))
4257                                 BNX2X_ERR("FATAL HW block attention\n");
4258
4259                         if ((attn.sig[0] & group_mask.sig[0] &
4260                                                 HW_PRTY_ASSERT_SET_0) ||
4261                             (attn.sig[1] & group_mask.sig[1] &
4262                                                 HW_PRTY_ASSERT_SET_1) ||
4263                             (attn.sig[2] & group_mask.sig[2] &
4264                                                 HW_PRTY_ASSERT_SET_2))
4265                                 BNX2X_ERR("FATAL HW block parity attention\n");
4266                 }
4267         }
4268
4269         bnx2x_unlock_alr(bp);
4270
4271         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4272
4273         val = ~deasserted;
4274 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4275            val, BAR_IGU_INTMEM + reg_addr); */
4276         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4277
4278         if (bp->aeu_mask & (deasserted & 0xff))
4279                 BNX2X_ERR("IGU BUG\n");
4280         if (~bp->attn_state & deasserted)
4281                 BNX2X_ERR("IGU BUG\n");
4282
4283         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4284                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
4285
4286         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4287         bp->aeu_mask |= (deasserted & 0xff);
4288
4289         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4290         REG_WR(bp, reg_addr, bp->aeu_mask);
4291
4292         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4293         bp->attn_state &= ~deasserted;
4294         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4295 }
4296
4297 static void bnx2x_attn_int(struct bnx2x *bp)
4298 {
4299         /* read local copy of bits */
4300         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4301         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4302         u32 attn_state = bp->attn_state;
4303
4304         /* look for changed bits */
4305         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
4306         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
4307
4308         DP(NETIF_MSG_HW,
4309            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
4310            attn_bits, attn_ack, asserted, deasserted);
4311
4312         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4313                 BNX2X_ERR("bad attention state\n");
4314
4315         /* handle bits that were raised */
4316         if (asserted)
4317                 bnx2x_attn_int_asserted(bp, asserted);
4318
4319         if (deasserted)
4320                 bnx2x_attn_int_deasserted(bp, deasserted);
4321 }
4322
4323 static void bnx2x_sp_task(struct work_struct *work)
4324 {
4325         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4326         u16 status;
4327
4328         /* Return here if interrupt is disabled */
4329         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4330                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4331                 return;
4332         }
4333
4334         status = bnx2x_update_dsb_idx(bp);
4335         if (status == 0)
4336                 BNX2X_ERR("spurious slowpath interrupt!\n");
4337
4338         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4339
4340         if (status & 0x1) {
4341                 /* HW attentions */
4342                 bnx2x_attn_int(bp);
4343         }
4344
4345         /* CStorm events: query_stats, cfc delete ramrods */
4346         if (status & 0x2)
4347                 bp->stat_pending = 0;
4348
4349         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4350                      IGU_INT_NOP, 1);
4351         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4352                      IGU_INT_NOP, 1);
4353         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4354                      IGU_INT_NOP, 1);
4355         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4356                      IGU_INT_NOP, 1);
4357         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4358                      IGU_INT_ENABLE, 1);
4359 }
4360
4361 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4362 {
4363         struct net_device *dev = dev_instance;
4364         struct bnx2x *bp = netdev_priv(dev);
4365
4366         /* Return here if interrupt is disabled */
4367         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4368                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4369                 return IRQ_HANDLED;
4370         }
4371
4372         bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4373
4374 #ifdef BNX2X_STOP_ON_ERROR
4375         if (unlikely(bp->panic))
4376                 return IRQ_HANDLED;
4377 #endif
4378
4379         schedule_work(&bp->sp_task);
4380
4381         return IRQ_HANDLED;
4382 }
4383
4384 /* end of slow path */
4385
4386 /* Statistics */
4387
4388 /****************************************************************************
4389 * Macros
4390 ****************************************************************************/
4391
4392 #define UPDATE_STAT(s, t) \
4393         do { \
4394                 estats->t += new->s - old->s; \
4395                 old->s = new->s; \
4396         } while (0)
4397
4398 /* sum[hi:lo] += add[hi:lo] */
4399 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4400         do { \
4401                 s_lo += a_lo; \
4402                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4403         } while (0)
4404
4405 /* difference = minuend - subtrahend */
4406 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4407         do { \
4408                 if (m_lo < s_lo) {      /* underflow */ \
4409                         d_hi = m_hi - s_hi; \
4410                         if (d_hi > 0) { /* we can 'loan' 1 */ \
4411                                 d_hi--; \
4412                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4413                         } else {        /* m_hi <= s_hi */ \
4414                                 d_hi = 0; \
4415                                 d_lo = 0; \
4416                         } \
4417                 } else {                /* m_lo >= s_lo */ \
4418                         if (m_hi < s_hi) { \
4419                             d_hi = 0; \
4420                             d_lo = 0; \
4421                         } else {        /* m_hi >= s_hi */ \
4422                             d_hi = m_hi - s_hi; \
4423                             d_lo = m_lo - s_lo; \
4424                         } \
4425                 } \
4426         } while (0)
4427
4428 /* minuend -= subtrahend */
4429 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4430         do { \
4431                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4432         } while (0)
4433
4434 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4435         do { \
4436                 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4437                         diff.lo, new->s_lo, old->s_lo); \
4438                 old->s_hi = new->s_hi; \
4439                 old->s_lo = new->s_lo; \
4440                 ADD_64(estats->t_hi, diff.hi, \
4441                        estats->t_lo, diff.lo); \
4442         } while (0)
4443
4444 /* sum[hi:lo] += add */
4445 #define ADD_EXTEND_64(s_hi, s_lo, a) \
4446         do { \
4447                 s_lo += a; \
4448                 s_hi += (s_lo < a) ? 1 : 0; \
4449         } while (0)
4450
4451 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4452         do { \
4453                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4454         } while (0)
4455
4456 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4457         do { \
4458                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4459                 old_tclient->s = le32_to_cpu(tclient->s); \
4460                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4461         } while (0)
4462
4463 /*
4464  * General service functions
4465  */
4466
4467 static inline long bnx2x_hilo(u32 *hiref)
4468 {
4469         u32 lo = *(hiref + 1);
4470 #if (BITS_PER_LONG == 64)
4471         u32 hi = *hiref;
4472
4473         return HILO_U64(hi, lo);
4474 #else
4475         return lo;
4476 #endif
4477 }
4478
4479 /*
4480  * Init service functions
4481  */
4482
4483 static void bnx2x_init_mac_stats(struct bnx2x *bp)
4484 {
4485         struct dmae_command *dmae;
4486         int port = bp->port;
4487         int loader_idx = port * 8;
4488         u32 opcode;
4489         u32 mac_addr;
4490
4491         bp->executer_idx = 0;
4492         if (bp->fw_mb) {
4493                 /* MCP */
4494                 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4495                           DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4496 #ifdef __BIG_ENDIAN
4497                           DMAE_CMD_ENDIANITY_B_DW_SWAP |
4498 #else
4499                           DMAE_CMD_ENDIANITY_DW_SWAP |
4500 #endif
4501                           (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4502
4503                 if (bp->link_up)
4504                         opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4505
4506                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4507                 dmae->opcode = opcode;
4508                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4509                                            sizeof(u32));
4510                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4511                                            sizeof(u32));
4512                 dmae->dst_addr_lo = bp->fw_mb >> 2;
4513                 dmae->dst_addr_hi = 0;
4514                 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4515                              sizeof(u32)) >> 2;
4516                 if (bp->link_up) {
4517                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4518                         dmae->comp_addr_hi = 0;
4519                         dmae->comp_val = 1;
4520                 } else {
4521                         dmae->comp_addr_lo = 0;
4522                         dmae->comp_addr_hi = 0;
4523                         dmae->comp_val = 0;
4524                 }
4525         }
4526
4527         if (!bp->link_up) {
4528                 /* no need to collect statistics in link down */
4529                 return;
4530         }
4531
4532         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4533                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4534                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4535 #ifdef __BIG_ENDIAN
4536                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4537 #else
4538                   DMAE_CMD_ENDIANITY_DW_SWAP |
4539 #endif
4540                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4541
4542         if (bp->phy_flags & PHY_BMAC_FLAG) {
4543
4544                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4545                                    NIG_REG_INGRESS_BMAC0_MEM);
4546
4547                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4548                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4549                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4550                 dmae->opcode = opcode;
4551                 dmae->src_addr_lo = (mac_addr +
4552                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4553                 dmae->src_addr_hi = 0;
4554                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4555                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4556                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4557                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4558                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4559                 dmae->comp_addr_hi = 0;
4560                 dmae->comp_val = 1;
4561
4562                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4563                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4564                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4565                 dmae->opcode = opcode;
4566                 dmae->src_addr_lo = (mac_addr +
4567                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4568                 dmae->src_addr_hi = 0;
4569                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4570                                         offsetof(struct bmac_stats, rx_gr64));
4571                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4572                                         offsetof(struct bmac_stats, rx_gr64));
4573                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4574                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4575                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4576                 dmae->comp_addr_hi = 0;
4577                 dmae->comp_val = 1;
4578
4579         } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4580
4581                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4582
4583                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4584                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4585                 dmae->opcode = opcode;
4586                 dmae->src_addr_lo = (mac_addr +
4587                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4588                 dmae->src_addr_hi = 0;
4589                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4590                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4591                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4592                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4593                 dmae->comp_addr_hi = 0;
4594                 dmae->comp_val = 1;
4595
4596                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4597                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4598                 dmae->opcode = opcode;
4599                 dmae->src_addr_lo = (mac_addr +
4600                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4601                 dmae->src_addr_hi = 0;
4602                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4603                                            offsetof(struct emac_stats,
4604                                                     rx_falsecarriererrors));
4605                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4606                                            offsetof(struct emac_stats,
4607                                                     rx_falsecarriererrors));
4608                 dmae->len = 1;
4609                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4610                 dmae->comp_addr_hi = 0;
4611                 dmae->comp_val = 1;
4612
4613                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4614                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4615                 dmae->opcode = opcode;
4616                 dmae->src_addr_lo = (mac_addr +
4617                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4618                 dmae->src_addr_hi = 0;
4619                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4620                                            offsetof(struct emac_stats,
4621                                                     tx_ifhcoutoctets));
4622                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4623                                            offsetof(struct emac_stats,
4624                                                     tx_ifhcoutoctets));
4625                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4626                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4627                 dmae->comp_addr_hi = 0;
4628                 dmae->comp_val = 1;
4629         }
4630
4631         /* NIG */
4632         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4633         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4634                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4635                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4636 #ifdef __BIG_ENDIAN
4637                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4638 #else
4639                         DMAE_CMD_ENDIANITY_DW_SWAP |
4640 #endif
4641                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4642         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4643                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4644         dmae->src_addr_hi = 0;
4645         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4646         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4647         dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4648         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4649                                     offsetof(struct nig_stats, done));
4650         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4651                                     offsetof(struct nig_stats, done));
4652         dmae->comp_val = 0xffffffff;
4653 }
4654
4655 static void bnx2x_init_stats(struct bnx2x *bp)
4656 {
4657         int port = bp->port;
4658
4659         bp->stats_state = STATS_STATE_DISABLE;
4660         bp->executer_idx = 0;
4661
4662         bp->old_brb_discard = REG_RD(bp,
4663                                      NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4664
4665         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4666         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4667         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4668
4669         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4670         REG_WR(bp, BAR_XSTRORM_INTMEM +
4671                XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4672
4673         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4674         REG_WR(bp, BAR_TSTRORM_INTMEM +
4675                TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4676
4677         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4678         REG_WR(bp, BAR_CSTRORM_INTMEM +
4679                CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4680
4681         REG_WR(bp, BAR_XSTRORM_INTMEM +
4682                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4683                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4684         REG_WR(bp, BAR_XSTRORM_INTMEM +
4685                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4686                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4687
4688         REG_WR(bp, BAR_TSTRORM_INTMEM +
4689                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4690                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4691         REG_WR(bp, BAR_TSTRORM_INTMEM +
4692                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4693                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4694 }
4695
4696 static void bnx2x_stop_stats(struct bnx2x *bp)
4697 {
4698         might_sleep();
4699         if (bp->stats_state != STATS_STATE_DISABLE) {
4700                 int timeout = 10;
4701
4702                 bp->stats_state = STATS_STATE_STOP;
4703                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4704
4705                 while (bp->stats_state != STATS_STATE_DISABLE) {
4706                         if (!timeout) {
4707                                 BNX2X_ERR("timeout waiting for stats stop\n");
4708                                 break;
4709                         }
4710                         timeout--;
4711                         msleep(100);
4712                 }
4713         }
4714         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4715 }
4716
4717 /*
4718  * Statistics service functions
4719  */
4720
4721 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4722 {
4723         struct regp diff;
4724         struct regp sum;
4725         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4726         struct bmac_stats *old = &bp->old_bmac;
4727         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4728
4729         sum.hi = 0;
4730         sum.lo = 0;
4731
4732         UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4733                       tx_gtbyt.lo, total_bytes_transmitted_lo);
4734
4735         UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4736                       tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4737         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4738
4739         UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4740                       tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4741         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4742
4743         UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4744                       tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4745         SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4746                estats->total_unicast_packets_transmitted_lo, sum.lo);
4747
4748         UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4749         UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4750         UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4751         UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4752         UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4753         UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4754         UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4755         UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4756         UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4757         UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4758         UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4759
4760         UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4761         UPDATE_STAT(rx_grund.lo, runt_packets_received);
4762         UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4763         UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4764         UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4765         /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4766         UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4767         UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4768
4769         UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4770                       rx_grerb.lo, stat_IfHCInBadOctets_lo);
4771         UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4772                       tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4773         UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4774         /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4775         estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4776 }
4777
4778 static void bnx2x_update_emac_stats(struct bnx2x *bp)
4779 {
4780         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4781         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4782
4783         UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4784                                              total_bytes_transmitted_lo);
4785         UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4786                                         total_unicast_packets_transmitted_hi,
4787                                         total_unicast_packets_transmitted_lo);
4788         UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4789                                       total_multicast_packets_transmitted_hi,
4790                                       total_multicast_packets_transmitted_lo);
4791         UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4792                                       total_broadcast_packets_transmitted_hi,
4793                                       total_broadcast_packets_transmitted_lo);
4794
4795         estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4796         estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4797         estats->single_collision_transmit_frames +=
4798                                 new->tx_dot3statssinglecollisionframes;
4799         estats->multiple_collision_transmit_frames +=
4800                                 new->tx_dot3statsmultiplecollisionframes;
4801         estats->late_collision_frames += new->tx_dot3statslatecollisions;
4802         estats->excessive_collision_frames +=
4803                                 new->tx_dot3statsexcessivecollisions;
4804         estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4805         estats->frames_transmitted_65_127_bytes +=
4806                                 new->tx_etherstatspkts65octetsto127octets;
4807         estats->frames_transmitted_128_255_bytes +=
4808                                 new->tx_etherstatspkts128octetsto255octets;
4809         estats->frames_transmitted_256_511_bytes +=
4810                                 new->tx_etherstatspkts256octetsto511octets;
4811         estats->frames_transmitted_512_1023_bytes +=
4812                                 new->tx_etherstatspkts512octetsto1023octets;
4813         estats->frames_transmitted_1024_1522_bytes +=
4814                                 new->tx_etherstatspkts1024octetsto1522octet;
4815         estats->frames_transmitted_1523_9022_bytes +=
4816                                 new->tx_etherstatspktsover1522octets;
4817
4818         estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4819         estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4820         estats->false_carrier_detections += new->rx_falsecarriererrors;
4821         estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4822         estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4823         estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4824         estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4825         estats->control_frames_received += new->rx_maccontrolframesreceived;
4826         estats->error_runt_packets_received += new->rx_etherstatsfragments;
4827         estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4828
4829         UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4830                                                stat_IfHCInBadOctets_lo);
4831         UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4832                                                 stat_IfHCOutBadOctets_lo);
4833         estats->stat_Dot3statsInternalMacTransmitErrors +=
4834                                 new->tx_dot3statsinternalmactransmiterrors;
4835         estats->stat_Dot3StatsCarrierSenseErrors +=
4836                                 new->rx_dot3statscarriersenseerrors;
4837         estats->stat_Dot3StatsDeferredTransmissions +=
4838                                 new->tx_dot3statsdeferredtransmissions;
4839         estats->stat_FlowControlDone += new->tx_flowcontroldone;
4840         estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4841 }
4842
4843 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4844 {
4845         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4846         struct tstorm_common_stats *tstats = &stats->tstorm_common;
4847         struct tstorm_per_client_stats *tclient =
4848                                                 &tstats->client_statistics[0];
4849         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4850         struct xstorm_common_stats *xstats = &stats->xstorm_common;
4851         struct nig_stats *nstats = bnx2x_sp(bp, nig);
4852         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4853         u32 diff;
4854
4855         /* are DMAE stats valid? */
4856         if (nstats->done != 0xffffffff) {
4857                 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4858                 return -1;
4859         }
4860
4861         /* are storm stats valid? */
4862         if (tstats->done.hi != 0xffffffff) {
4863                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4864                 return -2;
4865         }
4866         if (xstats->done.hi != 0xffffffff) {
4867                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4868                 return -3;
4869         }
4870
4871         estats->total_bytes_received_hi =
4872         estats->valid_bytes_received_hi =
4873                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
4874         estats->total_bytes_received_lo =
4875         estats->valid_bytes_received_lo =
4876                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
4877         ADD_64(estats->total_bytes_received_hi,
4878                le32_to_cpu(tclient->rcv_error_bytes.hi),
4879                estats->total_bytes_received_lo,
4880                le32_to_cpu(tclient->rcv_error_bytes.lo));
4881
4882         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4883                                         total_unicast_packets_received_hi,
4884                                         total_unicast_packets_received_lo);
4885         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4886                                         total_multicast_packets_received_hi,
4887                                         total_multicast_packets_received_lo);
4888         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4889                                         total_broadcast_packets_received_hi,
4890                                         total_broadcast_packets_received_lo);
4891
4892         estats->frames_received_64_bytes = MAC_STX_NA;
4893         estats->frames_received_65_127_bytes = MAC_STX_NA;
4894         estats->frames_received_128_255_bytes = MAC_STX_NA;
4895         estats->frames_received_256_511_bytes = MAC_STX_NA;
4896         estats->frames_received_512_1023_bytes = MAC_STX_NA;
4897         estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4898         estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4899
4900         estats->x_total_sent_bytes_hi =
4901                                 le32_to_cpu(xstats->total_sent_bytes.hi);
4902         estats->x_total_sent_bytes_lo =
4903                                 le32_to_cpu(xstats->total_sent_bytes.lo);
4904         estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4905
4906         estats->t_rcv_unicast_bytes_hi =
4907                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4908         estats->t_rcv_unicast_bytes_lo =
4909                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4910         estats->t_rcv_broadcast_bytes_hi =
4911                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4912         estats->t_rcv_broadcast_bytes_lo =
4913                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4914         estats->t_rcv_multicast_bytes_hi =
4915                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4916         estats->t_rcv_multicast_bytes_lo =
4917                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4918         estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4919
4920         estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4921         estats->packets_too_big_discard =
4922                                 le32_to_cpu(tclient->packets_too_big_discard);
4923         estats->jabber_packets_received = estats->packets_too_big_discard +
4924                                           estats->stat_Dot3statsFramesTooLong;
4925         estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4926         estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4927         estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4928         estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4929         estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4930         estats->brb_truncate_discard =
4931                                 le32_to_cpu(tstats->brb_truncate_discard);
4932
4933         estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4934         bp->old_brb_discard = nstats->brb_discard;
4935
4936         estats->brb_packet = nstats->brb_packet;
4937         estats->brb_truncate = nstats->brb_truncate;
4938         estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4939         estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4940         estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4941         estats->mng_discard = nstats->mng_discard;
4942         estats->mng_octet_inp = nstats->mng_octet_inp;
4943         estats->mng_octet_out = nstats->mng_octet_out;
4944         estats->mng_packet_inp = nstats->mng_packet_inp;
4945         estats->mng_packet_out = nstats->mng_packet_out;
4946         estats->pbf_octets = nstats->pbf_octets;
4947         estats->pbf_packet = nstats->pbf_packet;
4948         estats->safc_inp = nstats->safc_inp;
4949
4950         xstats->done.hi = 0;
4951         tstats->done.hi = 0;
4952         nstats->done = 0;
4953
4954         return 0;
4955 }
4956
4957 static void bnx2x_update_net_stats(struct bnx2x *bp)
4958 {
4959         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4960         struct net_device_stats *nstats = &bp->dev->stats;
4961
4962         nstats->rx_packets =
4963                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4964                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4965                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4966
4967         nstats->tx_packets =
4968                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4969                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4970                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4971
4972         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4973
4974         nstats->tx_bytes =
4975                 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4976
4977         nstats->rx_dropped = estats->checksum_discard +
4978                                    estats->mac_discard;
4979         nstats->tx_dropped = 0;
4980
4981         nstats->multicast =
4982                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4983
4984         nstats->collisions =
4985                 estats->single_collision_transmit_frames +
4986                 estats->multiple_collision_transmit_frames +
4987                 estats->late_collision_frames +
4988                 estats->excessive_collision_frames;
4989
4990         nstats->rx_length_errors = estats->runt_packets_received +
4991                                    estats->jabber_packets_received;
4992         nstats->rx_over_errors = estats->no_buff_discard;
4993         nstats->rx_crc_errors = estats->crc_receive_errors;
4994         nstats->rx_frame_errors = estats->alignment_errors;
4995         nstats->rx_fifo_errors = estats->brb_discard +
4996                                        estats->brb_truncate_discard;
4997         nstats->rx_missed_errors = estats->xxoverflow_discard;
4998
4999         nstats->rx_errors = nstats->rx_length_errors +
5000                             nstats->rx_over_errors +
5001                             nstats->rx_crc_errors +
5002                             nstats->rx_frame_errors +
5003                             nstats->rx_fifo_errors;
5004
5005         nstats->tx_aborted_errors = estats->late_collision_frames +
5006                                           estats->excessive_collision_frames;
5007         nstats->tx_carrier_errors = estats->false_carrier_detections;
5008         nstats->tx_fifo_errors = 0;
5009         nstats->tx_heartbeat_errors = 0;
5010         nstats->tx_window_errors = 0;
5011
5012         nstats->tx_errors = nstats->tx_aborted_errors +
5013                             nstats->tx_carrier_errors;
5014
5015         estats->mac_stx_start = ++estats->mac_stx_end;
5016 }
5017
5018 static void bnx2x_update_stats(struct bnx2x *bp)
5019 {
5020         int i;
5021
5022         if (!bnx2x_update_storm_stats(bp)) {
5023
5024                 if (bp->phy_flags & PHY_BMAC_FLAG) {
5025                         bnx2x_update_bmac_stats(bp);
5026
5027                 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5028                         bnx2x_update_emac_stats(bp);
5029
5030                 } else { /* unreached */
5031                         BNX2X_ERR("no MAC active\n");
5032                         return;
5033                 }
5034
5035                 bnx2x_update_net_stats(bp);
5036         }
5037
5038         if (bp->msglevel & NETIF_MSG_TIMER) {
5039                 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5040                 struct net_device_stats *nstats = &bp->dev->stats;
5041
5042                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5043                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
5044                                   "  tx pkt (%lx)\n",
5045                        bnx2x_tx_avail(bp->fp),
5046                        *bp->fp->tx_cons_sb, nstats->tx_packets);
5047                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
5048                                   "  rx pkt (%lx)\n",
5049                        (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5050                        *bp->fp->rx_cons_sb, nstats->rx_packets);
5051                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
5052                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5053                        estats->driver_xoff, estats->brb_discard);
5054                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
5055                         "packets_too_big_discard %u  no_buff_discard %u  "
5056                         "mac_discard %u  mac_filter_discard %u  "
5057                         "xxovrflow_discard %u  brb_truncate_discard %u  "
5058                         "ttl0_discard %u\n",
5059                        estats->checksum_discard,
5060                        estats->packets_too_big_discard,
5061                        estats->no_buff_discard, estats->mac_discard,
5062                        estats->mac_filter_discard, estats->xxoverflow_discard,
5063                        estats->brb_truncate_discard, estats->ttl0_discard);
5064
5065                 for_each_queue(bp, i) {
5066                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5067                                bnx2x_fp(bp, i, tx_pkt),
5068                                bnx2x_fp(bp, i, rx_pkt),
5069                                bnx2x_fp(bp, i, rx_calls));
5070                 }
5071         }
5072
5073         if (bp->state != BNX2X_STATE_OPEN) {
5074                 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5075                 return;
5076         }
5077
5078 #ifdef BNX2X_STOP_ON_ERROR
5079         if (unlikely(bp->panic))
5080                 return;
5081 #endif
5082
5083         /* loader */
5084         if (bp->executer_idx) {
5085                 struct dmae_command *dmae = &bp->dmae;
5086                 int port = bp->port;
5087                 int loader_idx = port * 8;
5088
5089                 memset(dmae, 0, sizeof(struct dmae_command));
5090
5091                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5092                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5093                                 DMAE_CMD_DST_RESET |
5094 #ifdef __BIG_ENDIAN
5095                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5096 #else
5097                                 DMAE_CMD_ENDIANITY_DW_SWAP |
5098 #endif
5099                                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5100                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5101                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5102                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5103                                      sizeof(struct dmae_command) *
5104                                      (loader_idx + 1)) >> 2;
5105                 dmae->dst_addr_hi = 0;
5106                 dmae->len = sizeof(struct dmae_command) >> 2;
5107                 dmae->len--;    /* !!! for A0/1 only */
5108                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5109                 dmae->comp_addr_hi = 0;
5110                 dmae->comp_val = 1;
5111
5112                 bnx2x_post_dmae(bp, dmae, loader_idx);
5113         }
5114
5115         if (bp->stats_state != STATS_STATE_ENABLE) {
5116                 bp->stats_state = STATS_STATE_DISABLE;
5117                 return;
5118         }
5119
5120         if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5121                 /* stats ramrod has it's own slot on the spe */
5122                 bp->spq_left++;
5123                 bp->stat_pending = 1;
5124         }
5125 }
5126
5127 static void bnx2x_timer(unsigned long data)
5128 {
5129         struct bnx2x *bp = (struct bnx2x *) data;
5130
5131         if (!netif_running(bp->dev))
5132                 return;
5133
5134         if (atomic_read(&bp->intr_sem) != 0)
5135                 goto timer_restart;
5136
5137         if (poll) {
5138                 struct bnx2x_fastpath *fp = &bp->fp[0];
5139                 int rc;
5140
5141                 bnx2x_tx_int(fp, 1000);
5142                 rc = bnx2x_rx_int(fp, 1000);
5143         }
5144
5145         if (!nomcp) {
5146                 int port = bp->port;
5147                 u32 drv_pulse;
5148                 u32 mcp_pulse;
5149
5150                 ++bp->fw_drv_pulse_wr_seq;
5151                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5152                 /* TBD - add SYSTEM_TIME */
5153                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5154                 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
5155
5156                 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
5157                              MCP_PULSE_SEQ_MASK);
5158                 /* The delta between driver pulse and mcp response
5159                  * should be 1 (before mcp response) or 0 (after mcp response)
5160                  */
5161                 if ((drv_pulse != mcp_pulse) &&
5162                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5163                         /* someone lost a heartbeat... */
5164                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5165                                   drv_pulse, mcp_pulse);
5166                 }
5167         }
5168
5169         if (bp->stats_state == STATS_STATE_DISABLE)
5170                 goto timer_restart;
5171
5172         bnx2x_update_stats(bp);
5173
5174 timer_restart:
5175         mod_timer(&bp->timer, jiffies + bp->current_interval);
5176 }
5177
5178 /* end of Statistics */
5179
5180 /* nic init */
5181
5182 /*
5183  * nic init service functions
5184  */
5185
5186 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5187                           dma_addr_t mapping, int id)
5188 {
5189         int port = bp->port;
5190         u64 section;
5191         int index;
5192
5193         /* USTORM */
5194         section = ((u64)mapping) + offsetof(struct host_status_block,
5195                                             u_status_block);
5196         sb->u_status_block.status_block_id = id;
5197
5198         REG_WR(bp, BAR_USTRORM_INTMEM +
5199                USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5200         REG_WR(bp, BAR_USTRORM_INTMEM +
5201                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5202                U64_HI(section));
5203
5204         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5205                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5206                          USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5207
5208         /* CSTORM */
5209         section = ((u64)mapping) + offsetof(struct host_status_block,
5210                                             c_status_block);
5211         sb->c_status_block.status_block_id = id;
5212
5213         REG_WR(bp, BAR_CSTRORM_INTMEM +
5214                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5215         REG_WR(bp, BAR_CSTRORM_INTMEM +
5216                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5217                U64_HI(section));
5218
5219         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5220                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5221                          CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5222
5223         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5224 }
5225
5226 static void bnx2x_init_def_sb(struct bnx2x *bp,
5227                               struct host_def_status_block *def_sb,
5228                               dma_addr_t mapping, int id)
5229 {
5230         int port = bp->port;
5231         int index, val, reg_offset;
5232         u64 section;
5233
5234         /* ATTN */
5235         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5236                                             atten_status_block);
5237         def_sb->atten_status_block.status_block_id = id;
5238
5239         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5240                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5241
5242         for (index = 0; index < 3; index++) {
5243                 bp->attn_group[index].sig[0] = REG_RD(bp,
5244                                                      reg_offset + 0x10*index);
5245                 bp->attn_group[index].sig[1] = REG_RD(bp,
5246                                                reg_offset + 0x4 + 0x10*index);
5247                 bp->attn_group[index].sig[2] = REG_RD(bp,
5248                                                reg_offset + 0x8 + 0x10*index);
5249                 bp->attn_group[index].sig[3] = REG_RD(bp,
5250                                                reg_offset + 0xc + 0x10*index);
5251         }
5252
5253         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5254                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
5255
5256         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5257                              HC_REG_ATTN_MSG0_ADDR_L);
5258
5259         REG_WR(bp, reg_offset, U64_LO(section));
5260         REG_WR(bp, reg_offset + 4, U64_HI(section));
5261
5262         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5263
5264         val = REG_RD(bp, reg_offset);
5265         val |= id;
5266         REG_WR(bp, reg_offset, val);
5267
5268         /* USTORM */
5269         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5270                                             u_def_status_block);
5271         def_sb->u_def_status_block.status_block_id = id;
5272
5273         REG_WR(bp, BAR_USTRORM_INTMEM +
5274                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5275         REG_WR(bp, BAR_USTRORM_INTMEM +
5276                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5277                U64_HI(section));
5278         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5279                BNX2X_BTR);
5280
5281         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5282                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5283                          USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5284
5285         /* CSTORM */
5286         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5287                                             c_def_status_block);
5288         def_sb->c_def_status_block.status_block_id = id;
5289
5290         REG_WR(bp, BAR_CSTRORM_INTMEM +
5291                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5292         REG_WR(bp, BAR_CSTRORM_INTMEM +
5293                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5294                U64_HI(section));
5295         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5296                BNX2X_BTR);
5297
5298         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5299                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5300                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5301
5302         /* TSTORM */
5303         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5304                                             t_def_status_block);
5305         def_sb->t_def_status_block.status_block_id = id;
5306
5307         REG_WR(bp, BAR_TSTRORM_INTMEM +
5308                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5309         REG_WR(bp, BAR_TSTRORM_INTMEM +
5310                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5311                U64_HI(section));
5312         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5313                BNX2X_BTR);
5314
5315         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5316                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5317                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5318
5319         /* XSTORM */
5320         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5321                                             x_def_status_block);
5322         def_sb->x_def_status_block.status_block_id = id;
5323
5324         REG_WR(bp, BAR_XSTRORM_INTMEM +
5325                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5326         REG_WR(bp, BAR_XSTRORM_INTMEM +
5327                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5328                U64_HI(section));
5329         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5330                BNX2X_BTR);
5331
5332         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5333                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5334                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5335
5336         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5337 }
5338
5339 static void bnx2x_update_coalesce(struct bnx2x *bp)
5340 {
5341         int port = bp->port;
5342         int i;
5343
5344         for_each_queue(bp, i) {
5345
5346                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5347                 REG_WR8(bp, BAR_USTRORM_INTMEM +
5348                         USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5349                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
5350                         bp->rx_ticks_int/12);
5351                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5352                          USTORM_SB_HC_DISABLE_OFFSET(port, i,
5353                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
5354                          bp->rx_ticks_int ? 0 : 1);
5355
5356                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5357                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5358                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5359                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
5360                         bp->tx_ticks_int/12);
5361                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5362                          CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5363                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
5364                          bp->tx_ticks_int ? 0 : 1);
5365         }
5366 }
5367
5368 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5369 {
5370         u16 ring_prod;
5371         int i, j;
5372         int port = bp->port;
5373
5374         bp->rx_buf_use_size = bp->dev->mtu;
5375
5376         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5377         bp->rx_buf_size = bp->rx_buf_use_size + 64;
5378
5379         for_each_queue(bp, j) {
5380                 struct bnx2x_fastpath *fp = &bp->fp[j];
5381
5382                 fp->rx_bd_cons = 0;
5383                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5384
5385                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5386                         struct eth_rx_bd *rx_bd;
5387
5388                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5389                         rx_bd->addr_hi =
5390                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5391                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5392                         rx_bd->addr_lo =
5393                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5394                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5395
5396                 }
5397
5398                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5399                         struct eth_rx_cqe_next_page *nextpg;
5400
5401                         nextpg = (struct eth_rx_cqe_next_page *)
5402                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5403                         nextpg->addr_hi =
5404                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5405                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5406                         nextpg->addr_lo =
5407                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5408                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5409                 }
5410
5411                 /* rx completion queue */
5412                 fp->rx_comp_cons = ring_prod = 0;
5413
5414                 for (i = 0; i < bp->rx_ring_size; i++) {
5415                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5416                                 BNX2X_ERR("was only able to allocate "
5417                                           "%d rx skbs\n", i);
5418                                 break;
5419                         }
5420                         ring_prod = NEXT_RX_IDX(ring_prod);
5421                         BUG_TRAP(ring_prod > i);
5422                 }
5423
5424                 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5425                 fp->rx_pkt = fp->rx_calls = 0;
5426
5427                 /* Warning! this will generate an interrupt (to the TSTORM) */
5428                 /* must only be done when chip is initialized */
5429                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5430                        TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5431                 if (j != 0)
5432                         continue;
5433
5434                 REG_WR(bp, BAR_USTRORM_INTMEM +
5435                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5436                        U64_LO(fp->rx_comp_mapping));
5437                 REG_WR(bp, BAR_USTRORM_INTMEM +
5438                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5439                        U64_HI(fp->rx_comp_mapping));
5440         }
5441 }
5442
5443 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5444 {
5445         int i, j;
5446
5447         for_each_queue(bp, j) {
5448                 struct bnx2x_fastpath *fp = &bp->fp[j];
5449
5450                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5451                         struct eth_tx_bd *tx_bd =
5452              &