]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
Merge tag 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux-3.10.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
index 83481e20f144f2f3ad0801f78d649a8e909cbe70..e11485ca037dc223fd6f9e7420387c16c4251a50 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2x_main.c: Broadcom Everest network driver.
  *
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -39,7 +39,6 @@
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
-#include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -52,6 +51,7 @@
 #include <linux/prefetch.h>
 #include <linux/zlib.h>
 #include <linux/io.h>
+#include <linux/semaphore.h>
 #include <linux/stringify.h>
 #include <linux/vmalloc.h>
 
@@ -74,6 +74,8 @@
 #define FW_FILE_NAME_E1H       "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
 #define FW_FILE_NAME_E2                "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
 
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
 /* Time in jiffies before concluding the transmitter is hung */
 #define TX_TIMEOUT             (5*HZ)
 
@@ -92,15 +94,11 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
-static int multi_mode = 1;
-module_param(multi_mode, int, 0);
-MODULE_PARM_DESC(multi_mode, " Multi queue mode "
-                            "(0 Disable; 1 Enable (default))");
 
 int num_queues;
 module_param(num_queues, int, 0);
-MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
-                               " (default is as a number of CPUs)");
+MODULE_PARM_DESC(num_queues,
+                " Set number of queues (default is as a number of CPUs)");
 
 static int disable_tpa;
 module_param(disable_tpa, int, 0);
@@ -108,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 
 #define INT_MODE_INTx                  1
 #define INT_MODE_MSI                   2
-static int int_mode;
+int int_mode;
 module_param(int_mode, int, 0);
 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
                                "(1 INT#x; 2 MSI)");
@@ -117,10 +115,6 @@ static int dropless_fc;
 module_param(dropless_fc, int, 0);
 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
 
-static int poll;
-module_param(poll, int, 0);
-MODULE_PARM_DESC(poll, " Use polling (for debug)");
-
 static int mrrs = -1;
 module_param(mrrs, int, 0);
 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
@@ -143,8 +137,13 @@ enum bnx2x_board_type {
        BCM57800_MF,
        BCM57810,
        BCM57810_MF,
-       BCM57840,
-       BCM57840_MF
+       BCM57840_O,
+       BCM57840_4_10,
+       BCM57840_2_20,
+       BCM57840_MFO,
+       BCM57840_MF,
+       BCM57811,
+       BCM57811_MF
 };
 
 /* indexed by board_type, above */
@@ -161,8 +160,12 @@ static struct {
        { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
        { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
        { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-       { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
-                                               "Ethernet Multi Function"}
+       { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+       { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+       { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
+       { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
+       { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
+       { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -192,12 +195,27 @@ static struct {
 #ifndef PCI_DEVICE_ID_NX2_57810_MF
 #define PCI_DEVICE_ID_NX2_57810_MF     CHIP_NUM_57810_MF
 #endif
-#ifndef PCI_DEVICE_ID_NX2_57840
-#define PCI_DEVICE_ID_NX2_57840                CHIP_NUM_57840
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O      CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10   CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20   CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO    CHIP_NUM_57840_MF_OBSOLETE
 #endif
 #ifndef PCI_DEVICE_ID_NX2_57840_MF
 #define PCI_DEVICE_ID_NX2_57840_MF     CHIP_NUM_57840_MF
 #endif
+#ifndef PCI_DEVICE_ID_NX2_57811
+#define PCI_DEVICE_ID_NX2_57811                CHIP_NUM_57811
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_MF
+#define PCI_DEVICE_ID_NX2_57811_MF     CHIP_NUM_57811_MF
+#endif
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -208,26 +226,35 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
-       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
        { 0 }
 };
 
 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 
+/* Global resources for unloading a previously loaded device */
+#define BNX2X_PREV_WAIT_NEEDED 1
+static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static LIST_HEAD(bnx2x_prev_list);
 /****************************************************************************
 * General service functions
 ****************************************************************************/
 
-static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+static void __storm_memset_dma_mapping(struct bnx2x *bp,
                                       u32 addr, dma_addr_t mapping)
 {
        REG_WR(bp,  addr, U64_LO(mapping));
        REG_WR(bp,  addr + 4, U64_HI(mapping));
 }
 
-static inline void storm_memset_spq_addr(struct bnx2x *bp,
-                                        dma_addr_t mapping, u16 abs_fid)
+static void storm_memset_spq_addr(struct bnx2x *bp,
+                                 dma_addr_t mapping, u16 abs_fid)
 {
        u32 addr = XSEM_REG_FAST_MEMORY +
                        XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
@@ -235,8 +262,8 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp,
        __storm_memset_dma_mapping(bp, addr, mapping);
 }
 
-static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
-                                        u16 pf_id)
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+                                 u16 pf_id)
 {
        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
                pf_id);
@@ -248,8 +275,8 @@ static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
                pf_id);
 }
 
-static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
-                                       u8 enable)
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+                                u8 enable)
 {
        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
                enable);
@@ -261,8 +288,8 @@ static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
                enable);
 }
 
-static inline void storm_memset_eq_data(struct bnx2x *bp,
-                               struct event_ring_data *eq_data,
+static void storm_memset_eq_data(struct bnx2x *bp,
+                                struct event_ring_data *eq_data,
                                u16 pfid)
 {
        size_t size = sizeof(struct event_ring_data);
@@ -272,8 +299,8 @@ static inline void storm_memset_eq_data(struct bnx2x *bp,
        __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
 }
 
-static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
-                                       u16 pfid)
+static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+                                u16 pfid)
 {
        u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
        REG_WR16(bp, addr, eq_prod);
@@ -308,67 +335,6 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 #define DMAE_DP_DST_PCI                "pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE       "dst_addr [none]"
 
-static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
-                         int msglvl)
-{
-       u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
-
-       switch (dmae->opcode & DMAE_COMMAND_DST) {
-       case DMAE_CMD_DST_PCI:
-               if (src_type == DMAE_CMD_SRC_PCI)
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-                          dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               else
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%08x], len [%d*4], dst [%x:%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_lo >> 2,
-                          dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               break;
-       case DMAE_CMD_DST_GRC:
-               if (src_type == DMAE_CMD_SRC_PCI)
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-                          dmae->len, dmae->dst_addr_lo >> 2,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               else
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%08x], len [%d*4], dst [%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_lo >> 2,
-                          dmae->len, dmae->dst_addr_lo >> 2,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               break;
-       default:
-               if (src_type == DMAE_CMD_SRC_PCI)
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
-                          "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-                          dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               else
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
-                          "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_lo >> 2,
-                          dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               break;
-       }
-
-}
 
 /* copy command into DMAE command memory and set DMAE command go */
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -379,9 +345,6 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
        cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
        for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
                REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
-
-               DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
-                  idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
        }
        REG_WR(bp, dmae_reg_go_c[idx], 1);
 }
@@ -446,10 +409,6 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
        int rc = 0;
 
-       DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
-          bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
-          bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
        /*
         * Lock the dmae channel. Disable BHs to prevent a dead-lock
         * as long as this code is called both from syscall context and
@@ -466,9 +425,10 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
        /* wait for completion */
        udelay(5);
        while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
-               DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
 
-               if (!cnt) {
+               if (!cnt ||
+                   (bp->recovery_state != BNX2X_RECOVERY_DONE &&
+                    bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
                        BNX2X_ERR("DMAE timeout!\n");
                        rc = DMAE_TIMEOUT;
                        goto unlock;
@@ -481,10 +441,6 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
                rc = DMAE_PCI_ERROR;
        }
 
-       DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
-          bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
-          bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
 unlock:
        spin_unlock_bh(&bp->dmae_lock);
        return rc;
@@ -498,9 +454,10 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        if (!bp->dmae_ready) {
                u32 *data = bnx2x_sp(bp, wb_data[0]);
 
-               DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
-                  "  using indirect\n", dst_addr, len32);
-               bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+               if (CHIP_IS_E1(bp))
+                       bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+               else
+                       bnx2x_init_str_wr(bp, dst_addr, data, len32);
                return;
        }
 
@@ -514,8 +471,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        dmae.dst_addr_hi = 0;
        dmae.len = len32;
 
-       bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
        /* issue the command and wait for completion */
        bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -528,10 +483,13 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
                u32 *data = bnx2x_sp(bp, wb_data[0]);
                int i;
 
-               DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
-                  "  using indirect\n", src_addr, len32);
-               for (i = 0; i < len32; i++)
-                       data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+               if (CHIP_IS_E1(bp))
+                       for (i = 0; i < len32; i++)
+                               data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+               else
+                       for (i = 0; i < len32; i++)
+                               data[i] = REG_RD(bp, src_addr + i*4);
+
                return;
        }
 
@@ -545,8 +503,6 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
        dmae.len = len32;
 
-       bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
        /* issue the command and wait for completion */
        bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -567,27 +523,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
        bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 }
 
-/* used only for slowpath so not inlined */
-static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
-{
-       u32 wb_write[2];
-
-       wb_write[0] = val_hi;
-       wb_write[1] = val_lo;
-       REG_WR_DMAE(bp, reg, wb_write, 2);
-}
-
-#ifdef USE_WB_RD
-static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
-{
-       u32 wb_data[2];
-
-       REG_RD_DMAE(bp, reg, wb_data, 2);
-
-       return HILO_U64(wb_data[0], wb_data[1]);
-}
-#endif
-
 static int bnx2x_mc_assert(struct bnx2x *bp)
 {
        char last_idx;
@@ -613,8 +548,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
                              XSTORM_ASSERT_LIST_OFFSET(i) + 12);
 
                if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-                       BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
-                                 " 0x%08x 0x%08x 0x%08x\n",
+                       BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
                                  i, row3, row2, row1, row0);
                        rc++;
                } else {
@@ -641,8 +575,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
                              TSTORM_ASSERT_LIST_OFFSET(i) + 12);
 
                if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-                       BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
-                                 " 0x%08x 0x%08x 0x%08x\n",
+                       BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
                                  i, row3, row2, row1, row0);
                        rc++;
                } else {
@@ -669,8 +602,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
                              CSTORM_ASSERT_LIST_OFFSET(i) + 12);
 
                if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-                       BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
-                                 " 0x%08x 0x%08x 0x%08x\n",
+                       BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
                                  i, row3, row2, row1, row0);
                        rc++;
                } else {
@@ -697,8 +629,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
                              USTORM_ASSERT_LIST_OFFSET(i) + 12);
 
                if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-                       BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
-                                 " 0x%08x 0x%08x 0x%08x\n",
+                       BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
                                  i, row3, row2, row1, row0);
                        rc++;
                } else {
@@ -727,13 +658,23 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 
        val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
        if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
-               printk("%s" "MCP PC at 0x%x\n", lvl, val);
+               BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
 
        if (BP_PATH(bp) == 0)
                trace_shmem_base = bp->common.shmem_base;
        else
                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
-       addr = trace_shmem_base - 0x0800 + 4;
+       addr = trace_shmem_base - 0x800;
+
+       /* validate TRCB signature */
+       mark = REG_RD(bp, addr);
+       if (mark != MFW_TRACE_SIGNATURE) {
+               BNX2X_ERR("Trace buffer signature is missing.");
+               return ;
+       }
+
+       /* read cyclic buffer pointer */
+       addr += 4;
        mark = REG_RD(bp, addr);
        mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
                        + ((mark + 0x3) & ~0x3) - 0x08000000;
@@ -755,7 +696,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
        printk("%s" "end of fw dump\n", lvl);
 }
 
-static inline void bnx2x_fw_dump(struct bnx2x *bp)
+static void bnx2x_fw_dump(struct bnx2x *bp)
 {
        bnx2x_fw_dump_lvl(bp, KERN_ERR);
 }
@@ -772,14 +713,14 @@ void bnx2x_panic_dump(struct bnx2x *bp)
 #endif
 
        bp->stats_state = STATS_STATE_DISABLED;
+       bp->eth_stats.unrecoverable_error++;
        DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
 
        BNX2X_ERR("begin crash dump -----------------\n");
 
        /* Indices */
        /* Common */
-       BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
-                 "  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+       BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
                  bp->def_idx, bp->def_att_idx, bp->attn_state,
                  bp->spq_prod_idx, bp->stats_counter);
        BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
@@ -826,24 +767,19 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                struct bnx2x_fp_txdata txdata;
 
                /* Rx */
-               BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
-                         "  rx_comp_prod(0x%x)"
-                         "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
+               BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
                          i, fp->rx_bd_prod, fp->rx_bd_cons,
                          fp->rx_comp_prod,
                          fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
-               BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
-                         "  fp_hc_idx(0x%x)\n",
+               BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
                          fp->rx_sge_prod, fp->last_max_sge,
                          le16_to_cpu(fp->fp_hc_idx));
 
                /* Tx */
                for_each_cos_in_tx_queue(fp, cos)
                {
-                       txdata = fp->txdata[cos];
-                       BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
-                                 "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
-                                 "  *tx_cons_sb(0x%x)\n",
+                       txdata = *fp->txdata_ptr[cos];
+                       BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
                                  i, txdata.tx_pkt_prod,
                                  txdata.tx_pkt_cons, txdata.tx_bd_prod,
                                  txdata.tx_bd_cons,
@@ -885,9 +821,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                                j * sizeof(u32));
 
                if (!CHIP_IS_E1x(bp)) {
-                       pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
-                               "vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
-                               "state(0x%x)\n",
+                       pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
                                sb_data_e2.common.p_func.pf_id,
                                sb_data_e2.common.p_func.vf_id,
                                sb_data_e2.common.p_func.vf_valid,
@@ -895,9 +829,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                                sb_data_e2.common.same_igu_sb_1b,
                                sb_data_e2.common.state);
                } else {
-                       pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
-                               "vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
-                               "state(0x%x)\n",
+                       pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
                                sb_data_e1x.common.p_func.pf_id,
                                sb_data_e1x.common.p_func.vf_id,
                                sb_data_e1x.common.p_func.vf_valid,
@@ -908,21 +840,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
 
                /* SB_SMs data */
                for (j = 0; j < HC_SB_MAX_SM; j++) {
-                       pr_cont("SM[%d] __flags (0x%x) "
-                              "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
-                              "time_to_expire (0x%x) "
-                              "timer_value(0x%x)\n", j,
-                              hc_sm_p[j].__flags,
-                              hc_sm_p[j].igu_sb_id,
-                              hc_sm_p[j].igu_seg_id,
-                              hc_sm_p[j].time_to_expire,
-                              hc_sm_p[j].timer_value);
+                       pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
+                               j, hc_sm_p[j].__flags,
+                               hc_sm_p[j].igu_sb_id,
+                               hc_sm_p[j].igu_seg_id,
+                               hc_sm_p[j].time_to_expire,
+                               hc_sm_p[j].timer_value);
                }
 
                /* Indecies data */
                for (j = 0; j < loop; j++) {
-                       pr_cont("INDEX[%d] flags (0x%x) "
-                                        "timeout (0x%x)\n", j,
+                       pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
                               hc_index_p[j].flags,
                               hc_index_p[j].timeout);
                }
@@ -941,7 +869,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                        struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
 
                        BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
-                                 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+                                 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
                }
 
                start = RX_SGE(fp->rx_sge_prod);
@@ -968,7 +896,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
        for_each_tx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
                for_each_cos_in_tx_queue(fp, cos) {
-                       struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+                       struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
 
                        start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
                        end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -976,8 +904,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                                struct sw_tx_bd *sw_bd =
                                        &txdata->tx_buf_ring[j];
 
-                               BNX2X_ERR("fp%d: txdata %d, "
-                                         "packet[%x]=[%p,%x]\n",
+                               BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
                                          i, cos, j, sw_bd->skb,
                                          sw_bd->first_bd);
                        }
@@ -987,8 +914,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                        for (j = start; j != end; j = TX_BD(j + 1)) {
                                u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
 
-                               BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
-                                         "[%x:%x:%x:%x]\n",
+                               BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
                                          i, cos, j, tx_bd[0], tx_bd[1],
                                          tx_bd[2], tx_bd[3]);
                        }
@@ -1007,8 +933,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
  * initialization.
  */
 #define FLR_WAIT_USEC          10000   /* 10 miliseconds */
-#define FLR_WAIT_INTERAVAL     50      /* usec */
-#define        FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+#define FLR_WAIT_INTERVAL      50      /* usec */
+#define        FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
 
 struct pbf_pN_buf_regs {
        int pN;
@@ -1041,7 +967,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
        while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
               (init_crd - crd_start))) {
                if (cur_cnt--) {
-                       udelay(FLR_WAIT_INTERAVAL);
+                       udelay(FLR_WAIT_INTERVAL);
                        crd = REG_RD(bp, regs->crd);
                        crd_freed = REG_RD(bp, regs->crd_freed);
                } else {
@@ -1055,7 +981,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
                }
        }
        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
-          poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
 }
 
 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
@@ -1073,7 +999,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
 
        while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
                if (cur_cnt--) {
-                       udelay(FLR_WAIT_INTERAVAL);
+                       udelay(FLR_WAIT_INTERVAL);
                        occup = REG_RD(bp, regs->lines_occup);
                        freed = REG_RD(bp, regs->lines_freed);
                } else {
@@ -1087,23 +1013,23 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
                }
        }
        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
-          poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
 }
 
-static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
-                                    u32 expected, u32 poll_count)
+static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+                                   u32 expected, u32 poll_count)
 {
        u32 cur_cnt = poll_count;
        u32 val;
 
        while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
-               udelay(FLR_WAIT_INTERAVAL);
+               udelay(FLR_WAIT_INTERVAL);
 
        return val;
 }
 
-static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
-                                                 char *msg, u32 poll_cnt)
+static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+                                          char *msg, u32 poll_cnt)
 {
        u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
        if (val != 0) {
@@ -1200,7 +1126,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
        (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
 
 
-static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
                                         u32 poll_cnt)
 {
        struct sdm_op_gen op_gen = {0};
@@ -1210,7 +1136,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
        int ret = 0;
 
        if (REG_RD(bp, comp_addr)) {
-               BNX2X_ERR("Cleanup complete is not 0\n");
+               BNX2X_ERR("Cleanup complete was not 0 before sending\n");
                return 1;
        }
 
@@ -1219,11 +1145,13 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
        op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
        op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
 
-       DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+       DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
        REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
 
        if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
                BNX2X_ERR("FW final cleanup did not succeed\n");
+               DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
+                  (REG_RD(bp, comp_addr)));
                ret = 1;
        }
        /* Zero completion for nxt FLR */
@@ -1232,16 +1160,11 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
        return ret;
 }
 
-static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
 {
-       int pos;
        u16 status;
 
-       pos = pci_pcie_cap(dev);
-       if (!pos)
-               return false;
-
-       pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+       pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
        return status & PCI_EXP_DEVSTA_TRPND;
 }
 
@@ -1334,6 +1257,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
        REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
        /* Poll HW usage counters */
+       DP(BNX2X_MSG_SP, "Polling usage counters\n");
        if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
                return -EBUSY;
 
@@ -1372,14 +1296,17 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
        int port = BP_PORT(bp);
        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
        u32 val = REG_RD(bp, addr);
-       int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-       int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+       bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+       bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+       bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
 
        if (msix) {
                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
                         HC_CONFIG_0_REG_INT_LINE_EN_0);
                val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+               if (single_msix)
+                       val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
        } else if (msi) {
                val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
                val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
@@ -1392,8 +1319,8 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 
                if (!CHIP_IS_E1(bp)) {
-                       DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
-                          val, port, addr);
+                       DP(NETIF_MSG_IFUP,
+                          "write %x to HC %d (addr 0x%x)\n", val, port, addr);
 
                        REG_WR(bp, addr, val);
 
@@ -1404,8 +1331,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
        if (CHIP_IS_E1(bp))
                REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
 
-       DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
-          val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+       DP(NETIF_MSG_IFUP,
+          "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
+          (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 
        REG_WR(bp, addr, val);
        /*
@@ -1435,8 +1363,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
 static void bnx2x_igu_int_enable(struct bnx2x *bp)
 {
        u32 val;
-       int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-       int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+       bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+       bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+       bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
 
        val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 
@@ -1446,6 +1375,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
                val |= (IGU_PF_CONF_FUNC_EN |
                        IGU_PF_CONF_MSI_MSIX_EN |
                        IGU_PF_CONF_ATTN_BIT_EN);
+
+               if (single_msix)
+                       val |= IGU_PF_CONF_SINGLE_ISR_EN;
        } else if (msi) {
                val &= ~IGU_PF_CONF_INT_LINE_EN;
                val |= (IGU_PF_CONF_FUNC_EN |
@@ -1460,11 +1392,14 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
                        IGU_PF_CONF_SINGLE_ISR_EN);
        }
 
-       DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
+       DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
           val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 
        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 
+       if (val & IGU_PF_CONF_INT_LINE_EN)
+               pci_intx(bp->pdev, true);
+
        barrier();
 
        /* init leading/trailing edge */
@@ -1518,7 +1453,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 
-       DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+       DP(NETIF_MSG_IFDOWN,
+          "write %x to HC %d (addr 0x%x)\n",
           val, port, addr);
 
        /* flush all outstanding writes */
@@ -1537,7 +1473,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
                 IGU_PF_CONF_INT_LINE_EN |
                 IGU_PF_CONF_ATTN_BIT_EN);
 
-       DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+       DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
 
        /* flush all outstanding writes */
        mmiowb();
@@ -1596,11 +1532,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
        int func = BP_FUNC(bp);
        u32 hw_lock_control_reg;
 
-       DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+       DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+          "Trying to take a lock on resource %d\n", resource);
 
        /* Validating that the resource is within range */
        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-               DP(NETIF_MSG_HW,
+               DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
                   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
                return false;
@@ -1618,7 +1555,8 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
        if (lock_status & resource_bit)
                return true;
 
-       DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+       DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+          "Failed to get a lock on resource %d\n", resource);
        return false;
 }
 
@@ -1630,7 +1568,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
  * Returns the recovery leader resource id according to the engine this function
  * belongs to. Currently only only 2 engines is supported.
  */
-static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
 {
        if (BP_PATH(bp))
                return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
@@ -1643,9 +1581,9 @@ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
  *
  * @bp: driver handle
  *
- * Tries to aquire a leader lock for cuurent engine.
+ * Tries to aquire a leader lock for current engine.
  */
-static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
 {
        return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 }
@@ -1660,7 +1598,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
        int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
        int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
        enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
-       struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+       struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
 
        DP(BNX2X_MSG_SP,
           "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
@@ -1679,7 +1617,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                break;
 
        case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
-               DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+               DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
                drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
                break;
 
@@ -1726,6 +1664,27 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 
        DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 
+       if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
+           (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
+               /* if Q update ramrod is completed for last Q in AFEX vif set
+                * flow, then ACK MCP at the end
+                *
+                * mark pending ACK to MCP bit.
+                * prevent case that both bits are cleared.
+                * At the end of load/unload driver checks that
+                * sp_state is cleaerd, and this order prevents
+                * races
+                */
+               smp_mb__before_clear_bit();
+               set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
+               wmb();
+               clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+               smp_mb__after_clear_bit();
+
+               /* schedule workqueue to send ack to MCP */
+               queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+       }
+
        return;
 }
 
@@ -1766,7 +1725,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
                        /* Handle Rx or Tx according to SB id */
                        prefetch(fp->rx_cons_sb);
                        for_each_cos_in_tx_queue(fp, cos)
-                               prefetch(fp->txdata[cos].tx_cons_sb);
+                               prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
                        prefetch(&fp->sb_running_index[SM_RX_ID]);
                        napi_schedule(&bnx2x_fp(bp, fp->index, napi));
                        status &= ~mask;
@@ -1821,8 +1780,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
 
        /* Validating that the resource is within range */
        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-               DP(NETIF_MSG_HW,
-                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+               BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
                return -EINVAL;
        }
@@ -1837,7 +1795,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
        /* Validating that the resource is not already taken */
        lock_status = REG_RD(bp, hw_lock_control_reg);
        if (lock_status & resource_bit) {
-               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+               BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
                   lock_status, resource_bit);
                return -EEXIST;
        }
@@ -1852,7 +1810,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
 
                msleep(5);
        }
-       DP(NETIF_MSG_HW, "Timeout\n");
+       BNX2X_ERR("Timeout\n");
        return -EAGAIN;
 }
 
@@ -1868,12 +1826,9 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
        int func = BP_FUNC(bp);
        u32 hw_lock_control_reg;
 
-       DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
-
        /* Validating that the resource is within range */
        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-               DP(NETIF_MSG_HW,
-                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+               BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
                return -EINVAL;
        }
@@ -1888,7 +1843,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
        /* Validating that the resource is currently taken */
        lock_status = REG_RD(bp, hw_lock_control_reg);
        if (!(lock_status & resource_bit)) {
-               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+               BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
                   lock_status, resource_bit);
                return -EFAULT;
        }
@@ -1949,7 +1904,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 
        switch (mode) {
        case MISC_REGISTERS_GPIO_OUTPUT_LOW:
-               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+               DP(NETIF_MSG_LINK,
+                  "Set GPIO %d (shift %d) -> output low\n",
                   gpio_num, gpio_shift);
                /* clear FLOAT and set CLR */
                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -1957,7 +1913,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
                break;
 
        case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
-               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+               DP(NETIF_MSG_LINK,
+                  "Set GPIO %d (shift %d) -> output high\n",
                   gpio_num, gpio_shift);
                /* clear FLOAT and set SET */
                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -1965,7 +1922,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
                break;
 
        case MISC_REGISTERS_GPIO_INPUT_HI_Z:
-               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+               DP(NETIF_MSG_LINK,
+                  "Set GPIO %d (shift %d) -> input\n",
                   gpio_num, gpio_shift);
                /* set FLOAT */
                gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -2049,16 +2007,18 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 
        switch (mode) {
        case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
-               DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
-                                  "output low\n", gpio_num, gpio_shift);
+               DP(NETIF_MSG_LINK,
+                  "Clear GPIO INT %d (shift %d) -> output low\n",
+                  gpio_num, gpio_shift);
                /* clear SET and set CLR */
                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
                break;
 
        case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
-               DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
-                                  "output high\n", gpio_num, gpio_shift);
+               DP(NETIF_MSG_LINK,
+                  "Set GPIO INT %d (shift %d) -> output high\n",
+                  gpio_num, gpio_shift);
                /* clear CLR and set SET */
                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
@@ -2091,21 +2051,21 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
 
        switch (mode) {
        case MISC_REGISTERS_SPIO_OUTPUT_LOW:
-               DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+               DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num);
                /* clear FLOAT and set CLR */
                spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
                spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
                break;
 
        case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
-               DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+               DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num);
                /* clear FLOAT and set SET */
                spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
                spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
                break;
 
        case MISC_REGISTERS_SPIO_INPUT_HI_Z:
-               DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+               DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num);
                /* set FLOAT */
                spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
                break;
@@ -2179,6 +2139,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
                        }
                }
 
+               if (load_mode == LOAD_LOOPBACK_EXT) {
+                       struct link_params *lp = &bp->link_params;
+                       lp->loopback_mode = LOOPBACK_EXT;
+               }
+
                rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 
                bnx2x_release_phy_lock(bp);
@@ -2235,40 +2200,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
        return rc;
 }
 
-static void bnx2x_init_port_minmax(struct bnx2x *bp)
-{
-       u32 r_param = bp->link_vars.line_speed / 8;
-       u32 fair_periodic_timeout_usec;
-       u32 t_fair;
-
-       memset(&(bp->cmng.rs_vars), 0,
-              sizeof(struct rate_shaping_vars_per_port));
-       memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
-
-       /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
-       bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
-
-       /* this is the threshold below which no timer arming will occur
-          1.25 coefficient is for the threshold to be a little bigger
-          than the real time, to compensate for timer in-accuracy */
-       bp->cmng.rs_vars.rs_threshold =
-                               (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
-
-       /* resolution of fairness timer */
-       fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
-       /* for 10G it is 1000usec. for 1G it is 10000usec. */
-       t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
-
-       /* this is the threshold below which we won't arm the timer anymore */
-       bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
-
-       /* we multiply by 1e3/8 to get bytes/msec.
-          We don't want the credits to pass a credit
-          of the t_fair*FAIR_MEM (algorithm resolution) */
-       bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
-       /* since each tick is 4 usec */
-       bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
-}
 
 /* Calculates the sum of vn_min_rates.
    It's needed for further normalizing of the min_rates.
@@ -2279,12 +2210,12 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
      In the later case fainess algorithm should be deactivated.
      If not all min_rates are zero then those that are zeroes will be set to 1.
  */
-static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+static void bnx2x_calc_vn_min(struct bnx2x *bp,
+                                     struct cmng_init_input *input)
 {
        int all_zero = 1;
        int vn;
 
-       bp->vn_weight_sum = 0;
        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
                u32 vn_cfg = bp->mf_config[vn];
                u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2292,106 +2223,56 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
 
                /* Skip hidden vns */
                if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
-                       continue;
-
+                       vn_min_rate = 0;
                /* If min rate is zero - set it to 1 */
-               if (!vn_min_rate)
+               else if (!vn_min_rate)
                        vn_min_rate = DEF_MIN_RATE;
                else
                        all_zero = 0;
 
-               bp->vn_weight_sum += vn_min_rate;
+               input->vnic_min_rate[vn] = vn_min_rate;
        }
 
        /* if ETS or all min rates are zeros - disable fairness */
        if (BNX2X_IS_ETS_ENABLED(bp)) {
-               bp->cmng.flags.cmng_enables &=
+               input->flags.cmng_enables &=
                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
                DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
        } else if (all_zero) {
-               bp->cmng.flags.cmng_enables &=
+               input->flags.cmng_enables &=
                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-               DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-                  "  fairness will be disabled\n");
+               DP(NETIF_MSG_IFUP,
+                  "All MIN values are zeroes fairness will be disabled\n");
        } else
-               bp->cmng.flags.cmng_enables |=
+               input->flags.cmng_enables |=
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
+                                   struct cmng_init_input *input)
 {
-       struct rate_shaping_vars_per_vn m_rs_vn;
-       struct fairness_vars_per_vn m_fair_vn;
+       u16 vn_max_rate;
        u32 vn_cfg = bp->mf_config[vn];
-       int func = func_by_vn(bp, vn);
-       u16 vn_min_rate, vn_max_rate;
-       int i;
 
-       /* If function is hidden - set min and max to zeroes */
-       if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
-               vn_min_rate = 0;
+       if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
                vn_max_rate = 0;
-
-       } else {
+       else {
                u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-               vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-                               FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-               /* If fairness is enabled (not all min rates are zeroes) and
-                  if current min rate is zero - set it to 1.
-                  This is a requirement of the algorithm. */
-               if (bp->vn_weight_sum && (vn_min_rate == 0))
-                       vn_min_rate = DEF_MIN_RATE;
-
-               if (IS_MF_SI(bp))
+               if (IS_MF_SI(bp)) {
                        /* maxCfg in percents of linkspeed */
                        vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
-               else
+               } else /* SD modes */
                        /* maxCfg is absolute in 100Mb units */
                        vn_max_rate = maxCfg * 100;
        }
 
-       DP(NETIF_MSG_IFUP,
-          "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
-          func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
-
-       memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
-       memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
-
-       /* global vn counter - maximal Mbps for this vn */
-       m_rs_vn.vn_counter.rate = vn_max_rate;
-
-       /* quota - number of bytes transmitted in this period */
-       m_rs_vn.vn_counter.quota =
-                               (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
-
-       if (bp->vn_weight_sum) {
-               /* credit for each period of the fairness algorithm:
-                  number of bytes in T_FAIR (the vn share the port rate).
-                  vn_weight_sum should not be larger than 10000, thus
-                  T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
-                  than zero */
-               m_fair_vn.vn_credit_delta =
-                       max_t(u32, (vn_min_rate * (T_FAIR_COEF /
-                                                  (8 * bp->vn_weight_sum))),
-                             (bp->cmng.fair_vars.fair_threshold +
-                                                       MIN_ABOVE_THRESH));
-               DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
-                  m_fair_vn.vn_credit_delta);
-       }
-
-       /* Store it to internal memory */
-       for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
-               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                      XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
-                      ((u32 *)(&m_rs_vn))[i]);
-
-       for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
-               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                      XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
-                      ((u32 *)(&m_fair_vn))[i]);
+       DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
+
+       input->vnic_max_rate[vn] = vn_max_rate;
 }
 
+
 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 {
        if (CHIP_REV_IS_SLOW(bp))
@@ -2429,38 +2310,42 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
                bp->mf_config[vn] =
                        MF_CFG_RD(bp, func_mf_config[func].config);
        }
+       if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+               DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+               bp->flags |= MF_FUNC_DIS;
+       } else {
+               DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+               bp->flags &= ~MF_FUNC_DIS;
+       }
 }
 
 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 {
+       struct cmng_init_input input;
+       memset(&input, 0, sizeof(struct cmng_init_input));
+
+       input.port_rate = bp->link_vars.line_speed;
 
        if (cmng_type == CMNG_FNS_MINMAX) {
                int vn;
 
-               /* clear cmng_enables */
-               bp->cmng.flags.cmng_enables = 0;
-
                /* read mf conf from shmem */
                if (read_cfg)
                        bnx2x_read_mf_cfg(bp);
 
-               /* Init rate shaping and fairness contexts */
-               bnx2x_init_port_minmax(bp);
-
                /* vn_weight_sum and enable fairness if not 0 */
-               bnx2x_calc_vn_weight_sum(bp);
+               bnx2x_calc_vn_min(bp, &input);
 
                /* calculate and set min-max rate for each vn */
                if (bp->port.pmf)
                        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
-                               bnx2x_init_vn_minmax(bp, vn);
+                               bnx2x_calc_vn_max(bp, vn, &input);
 
                /* always enable rate shaping and fairness */
-               bp->cmng.flags.cmng_enables |=
+               input.flags.cmng_enables |=
                                        CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-               if (!bp->vn_weight_sum)
-                       DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-                                  "  fairness will be disabled\n");
+
+               bnx2x_init_cmng(&input, &bp->cmng);
                return;
        }
 
@@ -2469,6 +2354,35 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
           "rate shaping and fairness are disabled\n");
 }
 
+static void storm_memset_cmng(struct bnx2x *bp,
+                             struct cmng_init *cmng,
+                             u8 port)
+{
+       int vn;
+       size_t size = sizeof(struct cmng_struct_per_port);
+
+       u32 addr = BAR_XSTRORM_INTMEM +
+                       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+       __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
+
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+               int func = func_by_vn(bp, vn);
+
+               addr = BAR_XSTRORM_INTMEM +
+                      XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
+               size = sizeof(struct rate_shaping_vars_per_vn);
+               __storm_memset_struct(bp, addr, size,
+                                     (u32 *)&cmng->vnic.vnic_max_rate[vn]);
+
+               addr = BAR_XSTRORM_INTMEM +
+                      XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
+               size = sizeof(struct fairness_vars_per_vn);
+               __storm_memset_struct(bp, addr, size,
+                                     (u32 *)&cmng->vnic.vnic_min_rate[vn]);
+       }
+}
+
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2541,13 +2455,197 @@ void bnx2x__link_status_update(struct bnx2x *bp)
        bnx2x_link_report(bp);
 }
 
+static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
+                                 u16 vlan_val, u8 allowed_prio)
+{
+       struct bnx2x_func_state_params func_params = {0};
+       struct bnx2x_func_afex_update_params *f_update_params =
+               &func_params.params.afex_update;
+
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
+
+       /* no need to wait for RAMROD completion, so don't
+        * set RAMROD_COMP_WAIT flag
+        */
+
+       f_update_params->vif_id = vifid;
+       f_update_params->afex_default_vlan = vlan_val;
+       f_update_params->allowed_priorities = allowed_prio;
+
+       /* if ramrod can not be sent, response to MCP immediately */
+       if (bnx2x_func_state_change(bp, &func_params) < 0)
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+
+       return 0;
+}
+
+static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
+                                         u16 vif_index, u8 func_bit_map)
+{
+       struct bnx2x_func_state_params func_params = {0};
+       struct bnx2x_func_afex_viflists_params *update_params =
+               &func_params.params.afex_viflists;
+       int rc;
+       u32 drv_msg_code;
+
+       /* validate only LIST_SET and LIST_GET are received from switch */
+       if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
+               BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
+                         cmd_type);
+
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
+
+       /* set parameters according to cmd_type */
+       update_params->afex_vif_list_command = cmd_type;
+       update_params->vif_list_index = cpu_to_le16(vif_index);
+       update_params->func_bit_map =
+               (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
+       update_params->func_to_clear = 0;
+       drv_msg_code =
+               (cmd_type == VIF_LIST_RULE_GET) ?
+               DRV_MSG_CODE_AFEX_LISTGET_ACK :
+               DRV_MSG_CODE_AFEX_LISTSET_ACK;
+
+       /* if ramrod can not be sent, respond to MCP immediately for
+        * SET and GET requests (other are not triggered from MCP)
+        */
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc < 0)
+               bnx2x_fw_command(bp, drv_msg_code, 0);
+
+       return 0;
+}
+
+static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
+{
+       struct afex_stats afex_stats;
+       u32 func = BP_ABS_FUNC(bp);
+       u32 mf_config;
+       u16 vlan_val;
+       u32 vlan_prio;
+       u16 vif_id;
+       u8 allowed_prio;
+       u8 vlan_mode;
+       u32 addr_to_write, vifid, addrs, stats_type, i;
+
+       if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
+               vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
+               bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
+       }
+
+       if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
+               vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+               addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
+                  vifid, addrs);
+               bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
+                                              addrs);
+       }
+
+       if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
+               addr_to_write = SHMEM2_RD(bp,
+                       afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
+               stats_type = SHMEM2_RD(bp,
+                       afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
+                  addr_to_write);
+
+               bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
+
+               /* write response to scratchpad, for MCP */
+               for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
+                       REG_WR(bp, addr_to_write + i*sizeof(u32),
+                              *(((u32 *)(&afex_stats))+i));
+
+               /* send ack message to MCP */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
+       }
+
+       if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
+               mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
+               bp->mf_config[BP_VN(bp)] = mf_config;
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
+                  mf_config);
+
+               /* if VIF_SET is "enabled" */
+               if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
+                       /* set rate limit directly to internal RAM */
+                       struct cmng_init_input cmng_input;
+                       struct rate_shaping_vars_per_vn m_rs_vn;
+                       size_t size = sizeof(struct rate_shaping_vars_per_vn);
+                       u32 addr = BAR_XSTRORM_INTMEM +
+                           XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
+
+                       bp->mf_config[BP_VN(bp)] = mf_config;
+
+                       bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
+                       m_rs_vn.vn_counter.rate =
+                               cmng_input.vnic_max_rate[BP_VN(bp)];
+                       m_rs_vn.vn_counter.quota =
+                               (m_rs_vn.vn_counter.rate *
+                                RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+                       __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
+
+                       /* read relevant values from mf_cfg struct in shmem */
+                       vif_id =
+                               (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+                                FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+                               FUNC_MF_CFG_E1HOV_TAG_SHIFT;
+                       vlan_val =
+                               (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+                                FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+                               FUNC_MF_CFG_AFEX_VLAN_SHIFT;
+                       vlan_prio = (mf_config &
+                                    FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+                                   FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
+                       vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
+                       vlan_mode =
+                               (MF_CFG_RD(bp,
+                                          func_mf_config[func].afex_config) &
+                                FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+                               FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
+                       allowed_prio =
+                               (MF_CFG_RD(bp,
+                                          func_mf_config[func].afex_config) &
+                                FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+                               FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
+
+                       /* send ramrod to FW, return in case of failure */
+                       if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
+                                                  allowed_prio))
+                               return;
+
+                       bp->afex_def_vlan_tag = vlan_val;
+                       bp->afex_vlan_mode = vlan_mode;
+               } else {
+                       /* notify link down because BP->flags is disabled */
+                       bnx2x_link_report(bp);
+
+                       /* send INVALID VIF ramrod to FW */
+                       bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
+
+                       /* Reset the default afex VLAN */
+                       bp->afex_def_vlan_tag = -1;
+               }
+       }
+}
+
 static void bnx2x_pmf_update(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
        u32 val;
 
        bp->port.pmf = 1;
-       DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+       DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
 
        /*
         * We need the mb() to ensure the ordering between the writing to
@@ -2624,14 +2722,17 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
        return rc;
 }
 
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+
+static void storm_memset_func_cfg(struct bnx2x *bp,
+                                struct tstorm_eth_function_common_config *tcfg,
+                                u16 abs_fid)
 {
-#ifdef BCM_CNIC
-       /* Statistics are not supported for CNIC Clients at the moment */
-       if (IS_FCOE_FP(fp))
-               return false;
-#endif
-       return true;
+       size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+       u32 addr = BAR_TSTRORM_INTMEM +
+                       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+       __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
 }
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
@@ -2663,9 +2764,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
  *
  * Return the flags that are common for the Tx-only and not normal connections.
  */
-static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
-                                                  struct bnx2x_fastpath *fp,
-                                                  bool zero_stats)
+static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+                                           struct bnx2x_fastpath *fp,
+                                           bool zero_stats)
 {
        unsigned long flags = 0;
 
@@ -2676,18 +2777,18 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
         *  parent connection). The statistics are zeroed when the parent
         *  connection is initialized.
         */
-       if (stat_counter_valid(bp, fp)) {
-               __set_bit(BNX2X_Q_FLG_STATS, &flags);
-               if (zero_stats)
-                       __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
-       }
+
+       __set_bit(BNX2X_Q_FLG_STATS, &flags);
+       if (zero_stats)
+               __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
 
        return flags;
 }
 
-static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
-                                             struct bnx2x_fastpath *fp,
-                                             bool leading)
+static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+                                      struct bnx2x_fastpath *fp,
+                                      bool leading)
 {
        unsigned long flags = 0;
 
@@ -2695,12 +2796,17 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
        if (IS_MF_SD(bp))
                __set_bit(BNX2X_Q_FLG_OV, &flags);
 
-       if (IS_FCOE_FP(fp))
+       if (IS_FCOE_FP(fp)) {
                __set_bit(BNX2X_Q_FLG_FCOE, &flags);
+               /* For FCoE - force usage of default priority (for afex) */
+               __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
+       }
 
        if (!fp->disable_tpa) {
                __set_bit(BNX2X_Q_FLG_TPA, &flags);
                __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+               if (fp->mode == TPA_MODE_GRO)
+                       __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
        }
 
        if (leading) {
@@ -2711,6 +2817,10 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
        /* Always set HW VLAN stripping */
        __set_bit(BNX2X_Q_FLG_VLAN, &flags);
 
+       /* configure silent vlan removal */
+       if (IS_MF_AFEX(bp))
+               __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
+
 
        return flags | bnx2x_get_common_flags(bp, fp, true);
 }
@@ -2797,6 +2907,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        rxq_init->sge_buf_sz = sge_sz;
        rxq_init->max_sges_pkt = max_sge;
        rxq_init->rss_engine_id = BP_FUNC(bp);
+       rxq_init->mcast_engine_id = BP_FUNC(bp);
 
        /* Maximum number or simultaneous TPA aggregation for this Queue.
         *
@@ -2812,13 +2923,20 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
                rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
        else
                rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+       /* configure silent vlan removal
+        * if multi function mode is afex, then mask default vlan
+        */
+       if (IS_MF_AFEX(bp)) {
+               rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
+               rxq_init->silent_removal_mask = VLAN_VID_MASK;
+       }
 }
 
 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
        struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
        u8 cos)
 {
-       txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+       txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
        txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
        txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
        txq_init->fw_sb_id = fp->fw_sb_id;
@@ -2921,12 +3039,151 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
         */
 }
 
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+       struct eth_stats_info *ether_stat =
+               &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+       /* leave last char as NULL */
+       memcpy(ether_stat->version, DRV_MODULE_VERSION,
+              ETH_STAT_INFO_VERSION_LEN - 1);
+
+       bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+                                       DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+                                       ether_stat->mac_local);
+
+       ether_stat->mtu_size = bp->dev->mtu;
+
+       if (bp->dev->features & NETIF_F_RXCSUM)
+               ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+       if (bp->dev->features & NETIF_F_TSO)
+               ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+       ether_stat->feature_flags |= bp->common.boot_mode;
+
+       ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+       ether_stat->txq_size = bp->tx_ring_size;
+       ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct fcoe_stats_info *fcoe_stat =
+               &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+       memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
+              bp->fip_mac, ETH_ALEN);
+
+       fcoe_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+       /* insert FCoE stats from ramrod response */
+       if (!NO_FCOE(bp)) {
+               struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
+                       tstorm_queue_statistics;
+
+               struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
+                       xstorm_queue_statistics;
+
+               struct fcoe_statistics_params *fw_fcoe_stat =
+                       &bp->fw_stats_data->fcoe;
+
+               ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+               ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->mcast_pkts_sent);
+       }
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct iscsi_stats_info *iscsi_stat =
+               &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+       memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
+              bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+       iscsi_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
 /* called due to MCP event (on pmf):
  *     reread new bandwidth configuration
  *     configure FW
  *     notify others function about the change
  */
-static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+static void bnx2x_config_mf_bw(struct bnx2x *bp)
 {
        if (bp->link_vars.link_up) {
                bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
@@ -2935,12 +3192,62 @@ static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
        storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 }
 
-static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+static void bnx2x_set_mf_bw(struct bnx2x *bp)
 {
        bnx2x_config_mf_bw(bp);
        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 }
 
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+       DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+       bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+       enum drv_info_opcode op_code;
+       u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+       /* if drv_info version supported by MFW doesn't match - send NACK */
+       if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+                 DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+
+       switch (op_code) {
+       case ETH_STATS_OPCODE:
+               bnx2x_drv_info_ether_stat(bp);
+               break;
+       case FCOE_STATS_OPCODE:
+               bnx2x_drv_info_fcoe_stat(bp);
+               break;
+       case ISCSI_STATS_OPCODE:
+               bnx2x_drv_info_iscsi_stat(bp);
+               break;
+       default:
+               /* if op code isn't supported - send NACK */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       /* if we got drv_info attn from MFW then these fields are defined in
+        * shmem2 for sure
+        */
+       SHMEM2_WR(bp, drv_info_host_addr_lo,
+               U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+       SHMEM2_WR(bp, drv_info_host_addr_hi,
+               U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+       bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
        DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2953,12 +3260,12 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
                 * locks
                 */
                if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
-                       DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+                       DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
                        bp->flags |= MF_FUNC_DIS;
 
                        bnx2x_e1h_disable(bp);
                } else {
-                       DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+                       DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
                        bp->flags &= ~MF_FUNC_DIS;
 
                        bnx2x_e1h_enable(bp);
@@ -2978,14 +3285,14 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 }
 
 /* must be called under the spq lock */
-static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
 {
        struct eth_spe *next_spe = bp->spq_prod_bd;
 
        if (bp->spq_prod_bd == bp->spq_last_bd) {
                bp->spq_prod_bd = bp->spq;
                bp->spq_prod_idx = 0;
-               DP(NETIF_MSG_TIMER, "end of spq\n");
+               DP(BNX2X_MSG_SP, "end of spq\n");
        } else {
                bp->spq_prod_bd++;
                bp->spq_prod_idx++;
@@ -2994,7 +3301,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
 }
 
 /* must be called under the spq lock */
-static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+static void bnx2x_sp_prod_update(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
 
@@ -3016,7 +3323,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
  * @cmd:       command to check
  * @cmd_type:  command type
  */
-static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
 {
        if ((cmd_type == NONE_CONNECTION_TYPE) ||
            (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
@@ -3054,8 +3361,10 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
        bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
 
 #ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
+       if (unlikely(bp->panic)) {
+               BNX2X_ERR("Can't post SP when there is panic\n");
                return -EIO;
+       }
 #endif
 
        spin_lock_bh(&bp->spq_lock);
@@ -3102,9 +3411,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
                atomic_dec(&bp->cq_spq_left);
 
 
-       DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
-          "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) "
-          "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+       DP(BNX2X_MSG_SP,
+          "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
           bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
           (u32)(U64_LO(bp->spq_mapping) +
           (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
@@ -3149,7 +3457,7 @@ static void bnx2x_release_alr(struct bnx2x *bp)
 #define BNX2X_DEF_SB_ATT_IDX   0x0001
 #define BNX2X_DEF_SB_IDX       0x0002
 
-static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
 {
        struct host_sp_status_block *def_sb = bp->def_status_blk;
        u16 rc = 0;
@@ -3281,7 +3589,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
        }
 }
 
-static inline void bnx2x_fan_failure(struct bnx2x *bp)
+static void bnx2x_fan_failure(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
        u32 ext_phy_config;
@@ -3296,9 +3604,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
                 ext_phy_config);
 
        /* log the failure */
-       netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
-              " the driver to shutdown the card to prevent permanent"
-              " damage.  Please contact OEM Support for assistance\n");
+       netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+                           "Please contact OEM Support for assistance\n");
 
        /*
         * Scheudle device reset (unload)
@@ -3312,7 +3619,7 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
 
 }
 
-static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
 {
        int port = BP_PORT(bp);
        int reg_offset;
@@ -3352,7 +3659,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
        }
 }
 
-static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
 {
        u32 val;
 
@@ -3383,7 +3690,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
        }
 }
 
-static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
 {
        u32 val;
 
@@ -3427,7 +3734,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
        }
 }
 
-static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
 {
        u32 val;
 
@@ -3437,6 +3744,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        int func = BP_FUNC(bp);
 
                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+                       bnx2x_read_mf_cfg(bp);
                        bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
                                        func_mf_config[BP_ABS_FUNC(bp)].config);
                        val = SHMEM_RD(bp,
@@ -3448,6 +3756,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        if (val & DRV_STATUS_SET_MF_BW)
                                bnx2x_set_mf_bw(bp);
 
+                       if (val & DRV_STATUS_DRV_INFO_REQ)
+                               bnx2x_handle_drv_info_req(bp);
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
 
@@ -3457,6 +3767,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                                /* start dcbx state machine */
                                bnx2x_dcbx_set_params(bp,
                                        BNX2X_DCBX_STATE_NEG_RECEIVED);
+                       if (val & DRV_STATUS_AFEX_EVENT_MASK)
+                               bnx2x_handle_afex_cmd(bp,
+                                       val & DRV_STATUS_AFEX_EVENT_MASK);
+                       if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+                               bnx2x_handle_eee_event(bp);
                        if (bp->link_vars.periodic_flags &
                            PERIODIC_FLAGS_LINK_EVENT) {
                                /*  sync with link */
@@ -3539,11 +3854,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
  */
 void bnx2x_set_reset_global(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+       u32 val;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3551,13 +3866,13 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
  *
  * Should be run under rtnl lock
  */
-static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+static void bnx2x_clear_reset_global(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+       u32 val;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3565,7 +3880,7 @@ static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
  *
  * should be run under rtnl lock
  */
-static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+static bool bnx2x_reset_is_global(struct bnx2x *bp)
 {
        u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
@@ -3578,17 +3893,19 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
  *
  * Should be run under rtnl lock
  */
-static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+static void bnx2x_set_reset_done(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val;
        u32 bit = BP_PATH(bp) ?
                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
        /* Clear the bit */
        val &= ~bit;
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
+
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3598,15 +3915,16 @@ static inline void bnx2x_set_reset_done(struct bnx2x *bp)
  */
 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val;
        u32 bit = BP_PATH(bp) ?
                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
        /* Set the bit */
        val |= bit;
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3624,25 +3942,28 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
 }
 
 /*
- * Increment the load counter for the current engine.
+ * set pf load for the current pf.
  *
  * should be run under rtnl lock
  */
-void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_set_pf_load(struct bnx2x *bp)
 {
-       u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val1, val;
        u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
                             BNX2X_PATH0_LOAD_CNT_MASK;
        u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
                             BNX2X_PATH0_LOAD_CNT_SHIFT;
 
-       DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+       DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
 
        /* get the current counter value */
        val1 = (val & mask) >> shift;
 
-       /* increment... */
-       val1++;
+       /* set bit of that PF */
+       val1 |= (1 << bp->pf_num);
 
        /* clear the old value */
        val &= ~mask;
@@ -3651,34 +3972,35 @@ void bnx2x_inc_load_cnt(struct bnx2x *bp)
        val |= ((val1 << shift) & mask);
 
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /**
- * bnx2x_dec_load_cnt - decrement the load counter
+ * bnx2x_clear_pf_load - clear pf load mark
  *
  * @bp:                driver handle
  *
  * Should be run under rtnl lock.
  * Decrements the load counter for the current engine. Returns
- * the new counter value.
+ * whether other functions are still loaded
  */
-u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+bool bnx2x_clear_pf_load(struct bnx2x *bp)
 {
-       u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val1, val;
        u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
                             BNX2X_PATH0_LOAD_CNT_MASK;
        u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
                             BNX2X_PATH0_LOAD_CNT_SHIFT;
 
-       DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
 
        /* get the current counter value */
        val1 = (val & mask) >> shift;
 
-       /* decrement... */
-       val1--;
+       /* clear bit of that PF */
+       val1 &= ~(1 << bp->pf_num);
 
        /* clear the old value */
        val &= ~mask;
@@ -3687,18 +4009,16 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
        val |= ((val1 << shift) & mask);
 
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
-
-       return val1;
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       return val1 != 0;
 }
 
 /*
- * Read the load counter for the current engine.
+ * Read the load status for the current engine.
  *
  * should be run under rtnl lock
  */
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
 {
        u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
                             BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3706,36 +4026,23 @@ static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
                             BNX2X_PATH0_LOAD_CNT_SHIFT);
        u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
-       DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+       DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
 
        val = (val & mask) >> shift;
 
-       DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
-
-       return val;
-}
-
-/*
- * Reset the load counter for the current engine.
- *
- * should be run under rtnl lock
- */
-static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
-{
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-       u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
-                            BNX2X_PATH0_LOAD_CNT_MASK);
+       DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
+          engine, val);
 
-       REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+       return val != 0;
 }
 
-static inline void _print_next_block(int idx, const char *blk)
+static void _print_next_block(int idx, const char *blk)
 {
        pr_cont("%s%s", idx ? ", " : "", blk);
 }
 
-static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
-                                                 bool print)
+static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+                                          bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -3782,8 +4089,8 @@ static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
        return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
-                                                 bool *global, bool print)
+static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+                                          bool *global, bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -3868,8 +4175,8 @@ static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
        return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
-                                                 bool print)
+static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+                                          bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -3920,8 +4227,8 @@ static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
        return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
-                                                 bool *global, bool print)
+static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+                                          bool *global, bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -3962,8 +4269,8 @@ static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
        return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
-                                                 bool print)
+static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+                                          bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -3989,8 +4296,8 @@ static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
        return par_num;
 }
 
-static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
-                                    u32 *sig)
+static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+                             u32 *sig)
 {
        if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
@@ -3998,9 +4305,8 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
            (sig[3] & HW_PRTY_ASSERT_SET_3) ||
            (sig[4] & HW_PRTY_ASSERT_SET_4)) {
                int par_num = 0;
-               DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
-                       "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
-                       "[4]:0x%08x\n",
+               DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+                                "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
                          sig[0] & HW_PRTY_ASSERT_SET_0,
                          sig[1] & HW_PRTY_ASSERT_SET_1,
                          sig[2] & HW_PRTY_ASSERT_SET_2,
@@ -4062,7 +4368,7 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
 }
 
 
-static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
 {
        u32 val;
        if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
@@ -4070,34 +4376,25 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
                val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
                BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "ADDRESS_ERROR\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "INCORRECT_RCV_BEHAVIOR\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "WAS_ERROR_ATTN\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "VF_LENGTH_VIOLATION_ATTN\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
                if (val &
                    PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "VF_GRC_SPACE_VIOLATION_ATTN\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
                if (val &
                    PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "VF_MSIX_BAR_VIOLATION_ATTN\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "TCPL_ERROR_ATTN\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "TCPL_IN_TWO_RCBS_ATTN\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
-                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
-                                 "CSSNOOP_FIFO_OVERFLOW\n");
+                       BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
        }
        if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
                val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
@@ -4105,19 +4402,15 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
                if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
                        BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
                if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
-                       BNX2X_ERR("ATC_ATC_INT_STS_REG"
-                                 "_ATC_TCPL_TO_NOT_PEND\n");
+                       BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
                if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
-                       BNX2X_ERR("ATC_ATC_INT_STS_REG_"
-                                 "ATC_GPA_MULTIPLE_HITS\n");
+                       BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
                if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
-                       BNX2X_ERR("ATC_ATC_INT_STS_REG_"
-                                 "ATC_RCPL_TO_EMPTY_CNT\n");
+                       BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
                if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
                        BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
                if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
-                       BNX2X_ERR("ATC_ATC_INT_STS_REG_"
-                                 "ATC_IREQ_LESS_THAN_STU\n");
+                       BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
        }
 
        if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
@@ -4176,8 +4469,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
                if (deasserted & (1 << index)) {
                        group_mask = &bp->attn_group[index];
 
-                       DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
-                                        "%08x %08x %08x\n",
+                       DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
                           index,
                           group_mask->sig[0], group_mask->sig[1],
                           group_mask->sig[2], group_mask->sig[3],
@@ -4268,7 +4560,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
                             igu_addr);
 }
 
-static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
 {
        /* No memory barriers */
        storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
@@ -4299,7 +4591,7 @@ static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
 }
 #endif
 
-static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
 {
        struct bnx2x_mcast_ramrod_params rparam;
        int rc;
@@ -4324,8 +4616,8 @@ static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
        netif_addr_unlock_bh(bp->dev);
 }
 
-static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
-                                                  union event_ring_elem *elem)
+static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+                                           union event_ring_elem *elem)
 {
        unsigned long ramrod_flags = 0;
        int rc = 0;
@@ -4337,15 +4629,17 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
 
        switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
        case BNX2X_FILTER_MAC_PENDING:
+               DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
 #ifdef BCM_CNIC
-               if (cid == BNX2X_ISCSI_ETH_CID)
+               if (cid == BNX2X_ISCSI_ETH_CID(bp))
                        vlan_mac_obj = &bp->iscsi_l2_mac_obj;
                else
 #endif
-                       vlan_mac_obj = &bp->fp[cid].mac_obj;
+                       vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
                break;
        case BNX2X_FILTER_MCAST_PENDING:
+               DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
                /* This is only relevant for 57710 where multicast MACs are
                 * configured as unicast MACs using the same ramrod.
                 */
@@ -4370,7 +4664,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
 #endif
 
-static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
 {
        netif_addr_lock_bh(bp->dev);
 
@@ -4391,16 +4685,103 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
        netif_addr_unlock_bh(bp->dev);
 }
 
-static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
+                                             union event_ring_elem *elem)
+{
+       if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
+               DP(BNX2X_MSG_SP,
+                  "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
+                  elem->message.data.vif_list_event.func_bit_map);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
+                       elem->message.data.vif_list_event.func_bit_map);
+       } else if (elem->message.data.vif_list_event.echo ==
+                  VIF_LIST_RULE_SET) {
+               DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
+       }
+}
+
+/* called with rtnl_lock */
+static void bnx2x_after_function_update(struct bnx2x *bp)
+{
+       int q, rc;
+       struct bnx2x_fastpath *fp;
+       struct bnx2x_queue_state_params queue_params = {NULL};
+       struct bnx2x_queue_update_params *q_update_params =
+               &queue_params.params.update;
+
+       /* Send Q update command with afex vlan removal values  for all Qs */
+       queue_params.cmd = BNX2X_Q_CMD_UPDATE;
+
+       /* set silent vlan removal values according to vlan mode */
+       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+                 &q_update_params->update_flags);
+       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                 &q_update_params->update_flags);
+       __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+       /* in access mode mark mask and value are 0 to strip all vlans */
+       if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
+               q_update_params->silent_removal_value = 0;
+               q_update_params->silent_removal_mask = 0;
+       } else {
+               q_update_params->silent_removal_value =
+                       (bp->afex_def_vlan_tag & VLAN_VID_MASK);
+               q_update_params->silent_removal_mask = VLAN_VID_MASK;
+       }
+
+       for_each_eth_queue(bp, q) {
+               /* Set the appropriate Queue object */
+               fp = &bp->fp[q];
+               queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+               /* send the ramrod */
+               rc = bnx2x_queue_state_change(bp, &queue_params);
+               if (rc < 0)
+                       BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+                                 q);
+       }
+
+#ifdef BCM_CNIC
+       if (!NO_FCOE(bp)) {
+               fp = &bp->fp[FCOE_IDX(bp)];
+               queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+               /* clear pending completion bit */
+               __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+               /* mark latest Q bit */
+               smp_mb__before_clear_bit();
+               set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+               smp_mb__after_clear_bit();
+
+               /* send Q update ramrod for FCoE Q */
+               rc = bnx2x_queue_state_change(bp, &queue_params);
+               if (rc < 0)
+                       BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+                                 q);
+       } else {
+               /* If no FCoE ring - ACK MCP now */
+               bnx2x_link_report(bp);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+       }
+#else
+       /* If no FCoE ring - ACK MCP now */
+       bnx2x_link_report(bp);
+       bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+#endif /* BCM_CNIC */
+}
+
+static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
        struct bnx2x *bp, u32 cid)
 {
        DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
 #ifdef BCM_CNIC
-       if (cid == BNX2X_FCOE_ETH_CID)
-               return &bnx2x_fcoe(bp, q_obj);
+       if (cid == BNX2X_FCOE_ETH_CID(bp))
+               return &bnx2x_fcoe_sp_obj(bp, q_obj);
        else
 #endif
-               return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+               return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
 }
 
 static void bnx2x_eq_int(struct bnx2x *bp)
@@ -4447,7 +4828,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                /* handle eq element */
                switch (opcode) {
                case EVENT_RING_OPCODE_STAT_QUERY:
-                       DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+                       DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
+                          "got statistics comp event %d\n",
                           bp->stats_comp++);
                        /* nothing to do with stats comp */
                        goto next_spqe;
@@ -4474,7 +4856,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                        goto next_spqe;
 
                case EVENT_RING_OPCODE_STOP_TRAFFIC:
-                       DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+                       DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
                        if (f_obj->complete_cmd(bp, f_obj,
                                                BNX2X_F_CMD_TX_STOP))
                                break;
@@ -4482,21 +4864,45 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                        goto next_spqe;
 
                case EVENT_RING_OPCODE_START_TRAFFIC:
-                       DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+                       DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
                        if (f_obj->complete_cmd(bp, f_obj,
                                                BNX2X_F_CMD_TX_START))
                                break;
                        bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
                        goto next_spqe;
+               case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+                       DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+                          "AFEX: ramrod completed FUNCTION_UPDATE\n");
+                       f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+
+                       /* We will perform the Queues update from sp_rtnl task
+                        * as all Queue SP operations should run under
+                        * rtnl_lock.
+                        */
+                       smp_mb__before_clear_bit();
+                       set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+                               &bp->sp_rtnl_state);
+                       smp_mb__after_clear_bit();
+
+                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+                       goto next_spqe;
+
+               case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
+                       f_obj->complete_cmd(bp, f_obj,
+                                           BNX2X_F_CMD_AFEX_VIFLISTS);
+                       bnx2x_after_afex_vif_lists(bp, elem);
+                       goto next_spqe;
                case EVENT_RING_OPCODE_FUNCTION_START:
-                       DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+                       DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+                          "got FUNC_START ramrod\n");
                        if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
                                break;
 
                        goto next_spqe;
 
                case EVENT_RING_OPCODE_FUNCTION_STOP:
-                       DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+                       DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+                          "got FUNC_STOP ramrod\n");
                        if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
                                break;
 
@@ -4578,7 +4984,7 @@ static void bnx2x_sp_task(struct work_struct *work)
 /*     if (status == 0)                                     */
 /*             BNX2X_ERR("spurious slowpath interrupt!\n"); */
 
-       DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+       DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
 
        /* HW attentions */
        if (status & BNX2X_DEF_SB_ATT_IDX) {
@@ -4612,11 +5018,18 @@ static void bnx2x_sp_task(struct work_struct *work)
        }
 
        if (unlikely(status))
-               DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+               DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
                   status);
 
        bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
             le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+
+       /* afex - poll to check if VIFSET_ACK should be sent to MFW */
+       if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
+                              &bp->sp_state)) {
+               bnx2x_link_report(bp);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+       }
 }
 
 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -4660,20 +5073,11 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
 
 static void bnx2x_timer(unsigned long data)
 {
-       u8 cos;
        struct bnx2x *bp = (struct bnx2x *) data;
 
        if (!netif_running(bp->dev))
                return;
 
-       if (poll) {
-               struct bnx2x_fastpath *fp = &bp->fp[0];
-
-               for_each_cos_in_tx_queue(fp, cos)
-                       bnx2x_tx_int(bp, &fp->txdata[cos]);
-               bnx2x_rx_int(fp, 1000);
-       }
-
        if (!BP_NOMCP(bp)) {
                int mb_idx = BP_FW_MB_IDX(bp);
                u32 drv_pulse;
@@ -4712,7 +5116,7 @@ static void bnx2x_timer(unsigned long data)
  * nic init service functions
  */
 
-static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
 {
        u32 i;
        if (!(len%4) && !(addr%4))
@@ -4725,10 +5129,10 @@ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
 }
 
 /* helper: writes FP SP data to FW - data_size in dwords */
-static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
-                                      int fw_sb_id,
-                                      u32 *sb_data_p,
-                                      u32 data_size)
+static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+                               int fw_sb_id,
+                               u32 *sb_data_p,
+                               u32 data_size)
 {
        int index;
        for (index = 0; index < data_size; index++)
@@ -4738,7 +5142,7 @@ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
                        *(sb_data_p + index));
 }
 
-static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
 {
        u32 *sb_data_p;
        u32 data_size = 0;
@@ -4771,7 +5175,7 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
 }
 
 /* helper:  writes SP SB data to FW */
-static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
                struct hc_sp_status_block_data *sp_sb_data)
 {
        int func = BP_FUNC(bp);
@@ -4783,7 +5187,7 @@ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
                        *((u32 *)sp_sb_data + i));
 }
 
-static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+static void bnx2x_zero_sp_sb(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
        struct hc_sp_status_block_data sp_sb_data;
@@ -4804,8 +5208,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
 }
 
 
-static inline
-void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
                                           int igu_sb_id, int igu_seg_id)
 {
        hc_sm->igu_sb_id = igu_sb_id;
@@ -4816,8 +5219,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
 
 
 /* allocates state machine ids. */
-static inline
-void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
 {
        /* zero out state machine indices */
        /* rx indices */
@@ -4899,7 +5301,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
        bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
                                       igu_sb_id, igu_seg_id);
 
-       DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+       DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
 
        /* write indecies to HW */
        bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
@@ -5225,7 +5627,7 @@ static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
        return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
 }
 
-static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
 {
        if (CHIP_IS_E1x(fp->bp))
                return BP_L_ID(fp->bp) + fp->index;
@@ -5249,6 +5651,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
 
        /* init shortcut */
        fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+
        /* Setup SB indicies */
        fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
 
@@ -5260,15 +5663,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
 
        /* init tx data */
        for_each_cos_in_tx_queue(fp, cos) {
-               bnx2x_init_txdata(bp, &fp->txdata[cos],
-                                 CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
-                                 FP_COS_TO_TXQ(fp, cos),
-                                 BNX2X_TX_SB_INDEX_BASE + cos);
-               cids[cos] = fp->txdata[cos].cid;
+               bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+                                 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+                                 FP_COS_TO_TXQ(fp, cos, bp),
+                                 BNX2X_TX_SB_INDEX_BASE + cos, fp);
+               cids[cos] = fp->txdata_ptr[cos]->cid;
        }
 
-       bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
-                            BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+       bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+                            fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
                             bnx2x_sp_mapping(bp, q_rdata), q_type);
 
        /**
@@ -5276,8 +5679,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
         */
        bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
 
-       DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
-                                  "cl_id %d  fw_sb %d  igu_sb %d\n",
+       DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
                   fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
                   fp->igu_sb_id);
        bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
@@ -5286,6 +5688,43 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
        bnx2x_update_fpsb_idx(fp);
 }
 
+static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
+{
+       int i;
+
+       for (i = 1; i <= NUM_TX_RINGS; i++) {
+               struct eth_tx_next_bd *tx_next_bd =
+                       &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+               tx_next_bd->addr_hi =
+                       cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
+                                   BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+               tx_next_bd->addr_lo =
+                       cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
+                                   BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+       }
+
+       SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+       txdata->tx_db.data.zero_fill1 = 0;
+       txdata->tx_db.data.prod = 0;
+
+       txdata->tx_pkt_prod = 0;
+       txdata->tx_pkt_cons = 0;
+       txdata->tx_bd_prod = 0;
+       txdata->tx_bd_cons = 0;
+       txdata->tx_pkt = 0;
+}
+
+static void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+       int i;
+       u8 cos;
+
+       for_each_tx_queue(bp, i)
+               for_each_cos_in_tx_queue(&bp->fp[i], cos)
+                       bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
+}
+
 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 {
        int i;
@@ -5364,8 +5803,7 @@ gunzip_nomem2:
        bp->gunzip_buf = NULL;
 
 gunzip_nomem1:
-       netdev_err(bp->dev, "Cannot allocate firmware buffer for"
-              " un-compression\n");
+       BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
        return -ENOMEM;
 }
 
@@ -5417,8 +5855,8 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
 
        bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
        if (bp->gunzip_outlen & 0x3)
-               netdev_err(bp->dev, "Firmware decompression error:"
-                                   " gunzip_outlen (%d) not aligned\n",
+               netdev_err(bp->dev,
+                          "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
                                bp->gunzip_outlen);
        bp->gunzip_outlen >>= 2;
 
@@ -5692,8 +6130,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
        u16 devctl;
        int r_order, w_order;
 
-       pci_read_config_word(bp->pdev,
-                            pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
+       pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
        DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
        w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
        if (bp->mrrs == -1)
@@ -5811,7 +6248,7 @@ void bnx2x_pf_disable(struct bnx2x *bp)
        REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
 }
 
-static inline void bnx2x__common_init_phy(struct bnx2x *bp)
+static void bnx2x__common_init_phy(struct bnx2x *bp)
 {
        u32 shmem_base[2], shmem2_base[2];
        shmem_base[0] =  bp->common.shmem_base;
@@ -5837,7 +6274,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
 {
        u32 val;
 
-       DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
+       DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
 
        /*
         * take the UNDI lock to protect undi_unload flow from accessing
@@ -6098,12 +6535,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        if (!CHIP_IS_E1(bp))
                REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
 
-       if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
-               /* Bit-map indicating which L2 hdrs may appear
-                * after the basic Ethernet header
-                */
-               REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
-                      bp->path_has_ovlan ? 7 : 6);
+       if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
+               if (IS_MF_AFEX(bp)) {
+                       /* configure that VNTag and VLAN headers must be
+                        * received in afex mode
+                        */
+                       REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+                       REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
+                       REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+                       REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+                       REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
+               } else {
+                       /* Bit-map indicating which L2 hdrs may appear
+                        * after the basic Ethernet header
+                        */
+                       REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+                              bp->path_has_ovlan ? 7 : 6);
+               }
+       }
 
        bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
        bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6137,9 +6586,21 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
        bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 
-       if (!CHIP_IS_E1x(bp))
-               REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
-                      bp->path_has_ovlan ? 7 : 6);
+       if (!CHIP_IS_E1x(bp)) {
+               if (IS_MF_AFEX(bp)) {
+                       /* configure that VNTag and VLAN headers must be
+                        * sent in afex mode
+                        */
+                       REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+                       REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
+                       REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+                       REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+                       REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
+               } else {
+                       REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+                              bp->path_has_ovlan ? 7 : 6);
+               }
+       }
 
        REG_WR(bp, SRC_REG_SOFT_RST, 1);
 
@@ -6161,9 +6622,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
 
        if (sizeof(union cdu_context) != 1024)
                /* we currently assume that a context is 1024 bytes */
-               dev_alert(&bp->pdev->dev, "please adjust the size "
-                                         "of cdu_context(%ld)\n",
-                        (long)sizeof(union cdu_context));
+               dev_alert(&bp->pdev->dev,
+                         "please adjust the size of cdu_context(%ld)\n",
+                         (long)sizeof(union cdu_context));
 
        bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
        val = (4 << 24) + (0 << 12) + 1024;
@@ -6292,7 +6753,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 
        bnx2x__link_reset(bp);
 
-       DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
+       DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
        REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 
@@ -6357,15 +6818,29 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 
 
        bnx2x_init_block(bp, BLOCK_PRS, init_phase);
-       if (CHIP_IS_E3B0(bp))
-               /* Ovlan exists only if we are in multi-function +
-                * switch-dependent mode, in switch-independent there
-                * is no ovlan headers
-                */
-               REG_WR(bp, BP_PORT(bp) ?
-                      PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
-                      PRS_REG_HDRS_AFTER_BASIC_PORT_0,
-                      (bp->path_has_ovlan ? 7 : 6));
+       if (CHIP_IS_E3B0(bp)) {
+               if (IS_MF_AFEX(bp)) {
+                       /* configure headers for AFEX mode */
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
+                              PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_MUST_HAVE_HDRS_PORT_1 :
+                              PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+               } else {
+                       /* Ovlan exists only if we are in multi-function +
+                        * switch-dependent mode, in switch-independent there
+                        * is no ovlan headers
+                        */
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+                              (bp->path_has_ovlan ? 7 : 6));
+               }
+       }
 
        bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
        bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6427,10 +6902,15 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                /* Bit-map indicating which L2 hdrs may appear after the
                 * basic Ethernet header
                 */
-               REG_WR(bp, BP_PORT(bp) ?
-                          NIG_REG_P1_HDRS_AFTER_BASIC :
-                          NIG_REG_P0_HDRS_AFTER_BASIC,
-                          IS_MF_SD(bp) ? 7 : 6);
+               if (IS_MF_AFEX(bp))
+                       REG_WR(bp, BP_PORT(bp) ?
+                              NIG_REG_P1_HDRS_AFTER_BASIC :
+                              NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+               else
+                       REG_WR(bp, BP_PORT(bp) ?
+                              NIG_REG_P1_HDRS_AFTER_BASIC :
+                              NIG_REG_P0_HDRS_AFTER_BASIC,
+                              IS_MF_SD(bp) ? 7 : 6);
 
                if (CHIP_IS_E3(bp))
                        REG_WR(bp, BP_PORT(bp) ?
@@ -6452,6 +6932,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                                val = 1;
                                break;
                        case MULTI_FUNCTION_SI:
+                       case MULTI_FUNCTION_AFEX:
                                val = 2;
                                break;
                        }
@@ -6483,21 +6964,71 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 {
        int reg;
+       u32 wb_write[2];
 
        if (CHIP_IS_E1(bp))
                reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
        else
                reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
 
-       bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+       wb_write[0] = ONCHIP_ADDR1(addr);
+       wb_write[1] = ONCHIP_ADDR2(addr);
+       REG_WR_DMAE(bp, reg, wb_write, 2);
 }
 
-static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
+                                  u8 idu_sb_id, bool is_Pf)
+{
+       u32 data, ctl, cnt = 100;
+       u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+       u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+       u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+       u32 sb_bit =  1 << (idu_sb_id%32);
+       u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+       u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+       /* Not supported in BC mode */
+       if (CHIP_INT_MODE_IS_BC(bp))
+               return;
+
+       data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+                       << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
+               IGU_REGULAR_CLEANUP_SET                         |
+               IGU_REGULAR_BCLEANUP;
+
+       ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
+             func_encode << IGU_CTRL_REG_FID_SHIFT             |
+             IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+       DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+                        data, igu_addr_data);
+       REG_WR(bp, igu_addr_data, data);
+       mmiowb();
+       barrier();
+       DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+                         ctl, igu_addr_ctl);
+       REG_WR(bp, igu_addr_ctl, ctl);
+       mmiowb();
+       barrier();
+
+       /* wait for clean up to finish */
+       while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+               msleep(20);
+
+
+       if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+               DP(NETIF_MSG_HW,
+                  "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
+                         idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+       }
+}
+
+static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
 {
        bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
 }
 
-static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
 {
        u32 i, base = FUNC_ILT_BASE(func);
        for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -6513,13 +7044,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        u16 cdu_ilt_start;
        u32 addr, val;
        u32 main_mem_base, main_mem_size, main_mem_prty_clr;
-       int i, main_mem_width;
+       int i, main_mem_width, rc;
 
-       DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
+       DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
 
        /* FLR cleanup - hmmm */
-       if (!CHIP_IS_E1x(bp))
-               bnx2x_pf_flr_clnup(bp);
+       if (!CHIP_IS_E1x(bp)) {
+               rc = bnx2x_pf_flr_clnup(bp);
+               if (rc)
+                       return rc;
+       }
 
        /* set MSI reconfigure capability */
        if (bp->common.int_block == INT_BLOCK_HC) {
@@ -6536,12 +7070,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
 
        for (i = 0; i < L2_ILT_LINES(bp); i++) {
-               ilt->lines[cdu_ilt_start + i].page =
-                       bp->context.vcxt + (ILT_PAGE_CIDS * i);
+               ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
                ilt->lines[cdu_ilt_start + i].page_mapping =
-                       bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
-               /* cdu ilt pages are allocated manually so there's no need to
-               set the size */
+                       bp->context[i].cxt_mapping;
+               ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
        }
        bnx2x_ilt_init_op(bp, INITOP_SET);
 
@@ -6772,9 +7304,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
 
                val = REG_RD(bp, main_mem_prty_clr);
                if (val)
-                       DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
-                                         "block during "
-                                         "function init (0x%x)!\n", val);
+                       DP(NETIF_MSG_HW,
+                          "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
+                          val);
 
                /* Clear "false" parity errors in MSI-X table */
                for (i = main_mem_base;
@@ -6808,6 +7340,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
 
 void bnx2x_free_mem(struct bnx2x *bp)
 {
+       int i;
+
        /* fastpath */
        bnx2x_free_fp_mem(bp);
        /* end of fastpath */
@@ -6821,9 +7355,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
        BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
                       sizeof(struct bnx2x_slowpath));
 
-       BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
-                      bp->context.size);
-
+       for (i = 0; i < L2_ILT_LINES(bp); i++)
+               BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+                              bp->context[i].size);
        bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
 
        BNX2X_FREE(bp->ilt->lines);
@@ -6845,16 +7379,19 @@ void bnx2x_free_mem(struct bnx2x *bp)
                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 }
 
-static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
        int num_groups;
+       int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
 
-       /* number of eth_queues */
-       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+       /* number of queues for statistics is number of eth queues + FCoE */
+       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 
        /* Total number of FW statistics requests =
-        * 1 for port stats + 1 for PF stats + num_eth_queues */
-       bp->fw_stats_num = 2 + num_queue_stats;
+        * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+        * num of queues
+        */
+       bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 
 
        /* Request is built from stats_query_header and an array of
@@ -6862,8 +7399,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
         * configured in the stats_query_header.
         */
-       num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
-               (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+       num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+                    (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 
        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
                        num_groups * sizeof(struct stats_query_cmd_group);
@@ -6872,9 +7409,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         *
         * stats_counter holds per-STORM counters that are incremented
         * when STORM has finished with the current request.
+        *
+        * memory for FCoE offloaded statistics are counted anyway,
+        * even if they will not be sent.
         */
        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
                sizeof(struct per_pf_stats) +
+               sizeof(struct fcoe_statistics_params) +
                sizeof(struct per_queue_stats) * num_queue_stats +
                sizeof(struct stats_counter);
 
@@ -6895,12 +7436,15 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 alloc_mem_err:
        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+       BNX2X_ERR("Can't allocate memory\n");
        return -ENOMEM;
 }
 
 
 int bnx2x_alloc_mem(struct bnx2x *bp)
 {
+       int i, allocated, context_size;
+
 #ifdef BCM_CNIC
        if (!CHIP_IS_E1x(bp))
                /* size = the status block + ramrod buffers */
@@ -6921,15 +7465,38 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
        BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
                        sizeof(struct bnx2x_slowpath));
 
+#ifdef BCM_CNIC
+       /* write address to which L5 should insert its values */
+       bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
+#endif
+
        /* Allocated memory for FW statistics  */
        if (bnx2x_alloc_fw_stats_mem(bp))
                goto alloc_mem_err;
 
-       bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
-
-       BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
-                       bp->context.size);
+       /* Allocate memory for CDU context:
+        * This memory is allocated separately and not in the generic ILT
+        * functions because CDU differs in few aspects:
+        * 1. There are multiple entities allocating memory for context -
+        * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+        * its own ILT lines.
+        * 2. Since CDU page-size is not a single 4KB page (which is the case
+        * for the other ILT clients), to be efficient we want to support
+        * allocation of sub-page-size in the last entry.
+        * 3. Context pointers are used by the driver to pass to FW / update
+        * the context (for the other ILT clients the pointers are used just to
+        * free the memory during unload).
+        */
+       context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
 
+       for (i = 0, allocated = 0; allocated < context_size; i++) {
+               bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+                                         (context_size - allocated));
+               BNX2X_PCI_ALLOC(bp->context[i].vcxt,
+                               &bp->context[i].cxt_mapping,
+                               bp->context[i].size);
+               allocated += bp->context[i].size;
+       }
        BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
 
        if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -6953,6 +7520,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
 
 alloc_mem_err:
        bnx2x_free_mem(bp);
+       BNX2X_ERR("Can't allocate memory\n");
        return -ENOMEM;
 }
 
@@ -6987,8 +7555,14 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
        }
 
        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-       if (rc < 0)
+
+       if (rc == -EEXIST) {
+               DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+               /* do not treat adding same MAC as error */
+               rc = 0;
+       } else if (rc < 0)
                BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+
        return rc;
 }
 
@@ -7017,12 +7591,21 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 {
        unsigned long ramrod_flags = 0;
 
+#ifdef BCM_CNIC
+       if (is_zero_ether_addr(bp->dev->dev_addr) &&
+           (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
+               DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
+                  "Ignoring Zero MAC for STORAGE SD mode\n");
+               return 0;
+       }
+#endif
+
        DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 
        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
        /* Eth MAC is set on RSS leading client (fp[0]) */
-       return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
-                                BNX2X_ETH_MAC, &ramrod_flags);
+       return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
+                                set, BNX2X_ETH_MAC, &ramrod_flags);
 }
 
 int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7037,7 +7620,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
  *
  * In case of MSI-X it will also try to enable MSI-X.
  */
-static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+void bnx2x_set_int_mode(struct bnx2x *bp)
 {
        switch (int_mode) {
        case INT_MODE_MSI:
@@ -7045,32 +7628,24 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
                /* falling through... */
        case INT_MODE_INTx:
                bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
-               DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+               BNX2X_DEV_INFO("set number of queues to 1\n");
                break;
        default:
-               /* Set number of queues according to bp->multi_mode value */
-               bnx2x_set_num_queues(bp);
-
-               DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
-                  bp->num_queues);
-
                /* if we can't use MSI-X we only need one fp,
                 * so try to enable MSI-X with the requested number of fp's
                 * and fallback to MSI or legacy INTx with one fp
                 */
-               if (bnx2x_enable_msix(bp)) {
-                       /* failed to enable MSI-X */
-                       if (bp->multi_mode)
-                               DP(NETIF_MSG_IFUP,
-                                         "Multi requested but failed to "
-                                         "enable MSI-X (%d), "
-                                         "set number of queues to %d\n",
-                                  bp->num_queues,
-                                  1 + NON_ETH_CONTEXT_USE);
+               if (bnx2x_enable_msix(bp) ||
+                   bp->flags & USING_SINGLE_MSIX_FLAG) {
+                       /* failed to enable multiple MSI-X */
+                       BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+                                      bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+
                        bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 
                        /* Try to enable MSI */
-                       if (!(bp->flags & DISABLE_MSI_FLAG))
+                       if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
+                           !(bp->flags & DISABLE_MSI_FLAG))
                                bnx2x_enable_msi(bp);
                }
                break;
@@ -7104,8 +7679,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
 #endif
        ilt_client->end = line - 1;
 
-       DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
-                                        "flags 0x%x, hw psz %d\n",
+       DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
           ilt_client->start,
           ilt_client->end,
           ilt_client->page_size,
@@ -7126,8 +7700,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
 
                ilt_client->end = line - 1;
 
-               DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
-                                                "flags 0x%x, hw psz %d\n",
+               DP(NETIF_MSG_IFUP,
+                  "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
                   ilt_client->start,
                   ilt_client->end,
                   ilt_client->page_size,
@@ -7145,8 +7719,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
        line += SRC_ILT_LINES;
        ilt_client->end = line - 1;
 
-       DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
-                                        "flags 0x%x, hw psz %d\n",
+       DP(NETIF_MSG_IFUP,
+          "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
           ilt_client->start,
           ilt_client->end,
           ilt_client->page_size,
@@ -7167,8 +7741,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
        line += TM_ILT_LINES;
        ilt_client->end = line - 1;
 
-       DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
-                                        "flags 0x%x, hw psz %d\n",
+       DP(NETIF_MSG_IFUP,
+          "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
           ilt_client->start,
           ilt_client->end,
           ilt_client->page_size,
@@ -7192,11 +7766,13 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
  *      - HC configuration
  *      - Queue's CDU context
  */
-static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
        struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
 {
 
        u8 cos;
+       int cxt_index, cxt_offset;
+
        /* FCoE Queue uses Default SB, thus has no HC capabilities */
        if (!IS_FCOE_FP(fp)) {
                __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7229,13 +7805,17 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
        /* set maximum number of COSs supported by this queue */
        init_params->max_cos = fp->max_cos;
 
-       DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
+       DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
            fp->index, init_params->max_cos);
 
        /* set the context pointers queue object */
-       for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+       for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+               cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+               cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+                               ILT_PAGE_CIDS);
                init_params->cxts[cos] =
-                       &bp->context.vcxt[fp->txdata[cos].cid].eth;
+                       &bp->context[cxt_index].vcxt[cxt_offset].eth;
+       }
 }
 
 int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7260,9 +7840,8 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        /* Set Tx TX_ONLY_SETUP parameters */
        bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
 
-       DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
-                        "cos %d, primary cid %d, cid %d, "
-                        "client id %d, sp-client id %d, flags %lx\n",
+       DP(NETIF_MSG_IFUP,
+          "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
           tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
           q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
           tx_only_params->gen_params.spcl_id, tx_only_params->flags);
@@ -7286,7 +7865,7 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                       bool leading)
 {
-       struct bnx2x_queue_state_params q_params = {0};
+       struct bnx2x_queue_state_params q_params = {NULL};
        struct bnx2x_queue_setup_params *setup_params =
                                                &q_params.params.setup;
        struct bnx2x_queue_setup_tx_only_params *tx_only_params =
@@ -7294,14 +7873,14 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        int rc;
        u8 tx_index;
 
-       DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
+       DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
 
        /* reset IGU state skip FCoE L2 queue */
        if (!IS_FCOE_FP(fp))
                bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
                             IGU_INT_ENABLE, 0);
 
-       q_params.q_obj = &fp->q_obj;
+       q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
        /* We want to wait for completion in this context */
        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 
@@ -7318,7 +7897,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                return rc;
        }
 
-       DP(BNX2X_MSG_SP, "init complete\n");
+       DP(NETIF_MSG_IFUP, "init complete\n");
 
 
        /* Now move the Queue to the SETUP state... */
@@ -7369,12 +7948,12 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
 {
        struct bnx2x_fastpath *fp = &bp->fp[index];
        struct bnx2x_fp_txdata *txdata;
-       struct bnx2x_queue_state_params q_params = {0};
+       struct bnx2x_queue_state_params q_params = {NULL};
        int rc, tx_index;
 
-       DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
+       DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
 
-       q_params.q_obj = &fp->q_obj;
+       q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
        /* We want to wait for completion in this context */
        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 
@@ -7385,9 +7964,9 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
             tx_index++){
 
                /* ascertain this is a normal queue*/
-               txdata = &fp->txdata[tx_index];
+               txdata = fp->txdata_ptr[tx_index];
 
-               DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
+               DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
                                                        txdata->txq_index);
 
                /* send halt terminate on tx-only connection */
@@ -7543,9 +8122,9 @@ static void bnx2x_reset_port(struct bnx2x *bp)
        /* TODO: Close Doorbell port? */
 }
 
-static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
 {
-       struct bnx2x_func_state_params func_params = {0};
+       struct bnx2x_func_state_params func_params = {NULL};
 
        /* Prepare parameters for function state transitions */
        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -7558,9 +8137,9 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
        return bnx2x_func_state_change(bp, &func_params);
 }
 
-static inline int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
 {
-       struct bnx2x_func_state_params func_params = {0};
+       struct bnx2x_func_state_params func_params = {NULL};
        int rc;
 
        /* Prepare parameters for function state transitions */
@@ -7579,8 +8158,7 @@ static inline int bnx2x_func_stop(struct bnx2x *bp)
 #ifdef BNX2X_STOP_ON_ERROR
                return rc;
 #else
-               BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
-                         "transaction\n");
+               BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
                __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
                return bnx2x_func_state_change(bp, &func_params);
 #endif
@@ -7643,14 +8221,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
        else {
                int path = BP_PATH(bp);
 
-               DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
-                                    "%d, %d, %d\n",
+               DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
                   path, load_count[path][0], load_count[path][1],
                   load_count[path][2]);
                load_count[path][0]--;
                load_count[path][1 + port]--;
-               DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
-                                    "%d, %d, %d\n",
+               DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
                   path, load_count[path][0], load_count[path][1],
                   load_count[path][2]);
                if (load_count[path][0] == 0)
@@ -7676,7 +8252,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp)
                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 }
 
-static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+static int bnx2x_func_wait_started(struct bnx2x *bp)
 {
        int tout = 50;
        int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -7713,16 +8289,17 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp)
        if (bnx2x_func_get_state(bp, &bp->func_obj) !=
                                                BNX2X_F_STATE_STARTED) {
 #ifdef BNX2X_STOP_ON_ERROR
+               BNX2X_ERR("Wrong function state\n");
                return -EBUSY;
 #else
                /*
                 * Failed to complete the transaction in a "good way"
                 * Force both transactions with CLR bit
                 */
-               struct bnx2x_func_state_params func_params = {0};
+               struct bnx2x_func_state_params func_params = {NULL};
 
-               DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
-                         "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+               DP(NETIF_MSG_IFDOWN,
+                  "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
 
                func_params.f_obj = &bp->func_obj;
                __set_bit(RAMROD_DRV_CLR_ONLY,
@@ -7746,7 +8323,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
        int port = BP_PORT(bp);
        int i, rc = 0;
        u8 cos;
-       struct bnx2x_mcast_ramrod_params rparam = {0};
+       struct bnx2x_mcast_ramrod_params rparam = {NULL};
        u32 reset_code;
 
        /* Wait until tx fastpath tasks complete */
@@ -7754,7 +8331,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
                struct bnx2x_fastpath *fp = &bp->fp[i];
 
                for_each_cos_in_tx_queue(fp, cos)
-                       rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+                       rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
 #ifdef BNX2X_STOP_ON_ERROR
                if (rc)
                        return;
@@ -7765,16 +8342,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
        usleep_range(1000, 1000);
 
        /* Clean all ETH MACs */
-       rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+       rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+                               false);
        if (rc < 0)
                BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
 
        /* Clean up UC list  */
-       rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+       rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
                                true);
        if (rc < 0)
-               BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
-                         "%d\n", rc);
+               BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
+                         rc);
 
        /* Disable LLH */
        if (!CHIP_IS_E1(bp))
@@ -7849,6 +8427,8 @@ unload_error:
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        /* Release IRQs */
        bnx2x_free_irq(bp);
@@ -7867,7 +8447,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
 {
        u32 val;
 
-       DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+       DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
 
        if (CHIP_IS_E1(bp)) {
                int port = BP_PORT(bp);
@@ -7920,7 +8500,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
                       (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
        }
 
-       DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+       DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
                close ? "closing" : "opening");
        mmiowb();
 }
@@ -7962,7 +8542,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
        u32 shmem;
        u32 validity_offset;
 
-       DP(NETIF_MSG_HW, "Starting\n");
+       DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
 
        /* Set `magic' bit in order to save MF config */
        if (!CHIP_IS_E1(bp))
@@ -7985,7 +8565,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
  *
  * @bp:        driver handle
  */
-static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+static void bnx2x_mcp_wait_one(struct bnx2x *bp)
 {
        /* special handling for emulation and FPGA,
           wait 10 times longer */
@@ -8199,12 +8779,8 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
        } while (cnt-- > 0);
 
        if (cnt <= 0) {
-               DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
-                         " are still"
-                         " outstanding read requests after 1s!\n");
-               DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
-                         " port_is_idle_0=0x%08x,"
-                         " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+               BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
+               BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
                          sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
                          pgl_exp_rom2);
                return -EAGAIN;
@@ -8270,13 +8846,38 @@ int bnx2x_leader_reset(struct bnx2x *bp)
 {
        int rc = 0;
        bool global = bnx2x_reset_is_global(bp);
+       u32 load_code;
+
+       /* if not going to reset MCP - load "fake" driver to reset HW while
+        * driver is owner of the HW
+        */
+       if (!global && !BP_NOMCP(bp)) {
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EAGAIN;
+                       goto exit_leader_reset;
+               }
+               if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+                   (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+                       BNX2X_ERR("MCP unexpected resp, aborting\n");
+                       rc = -EAGAIN;
+                       goto exit_leader_reset2;
+               }
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EAGAIN;
+                       goto exit_leader_reset2;
+               }
+       }
 
        /* Try to recover after the failure */
        if (bnx2x_process_kill(bp, global)) {
-               netdev_err(bp->dev, "Something bad had happen on engine %d! "
-                                   "Aii!\n", BP_PATH(bp));
+               BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
+                         BP_PATH(bp));
                rc = -EAGAIN;
-               goto exit_leader_reset;
+               goto exit_leader_reset2;
        }
 
        /*
@@ -8287,6 +8888,12 @@ int bnx2x_leader_reset(struct bnx2x *bp)
        if (global)
                bnx2x_clear_reset_global(bp);
 
+exit_leader_reset2:
+       /* unload "fake driver" if it was loaded */
+       if (!global && !BP_NOMCP(bp)) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+       }
 exit_leader_reset:
        bp->is_leader = 0;
        bnx2x_release_leader_lock(bp);
@@ -8294,7 +8901,7 @@ exit_leader_reset:
        return rc;
 }
 
-static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+static void bnx2x_recovery_failed(struct bnx2x *bp)
 {
        netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
 
@@ -8323,13 +8930,16 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp)
 static void bnx2x_parity_recover(struct bnx2x *bp)
 {
        bool global = false;
+       u32 error_recovered, error_unrecovered;
+       bool is_parity;
 
        DP(NETIF_MSG_HW, "Handling parity\n");
        while (1) {
                switch (bp->recovery_state) {
                case BNX2X_RECOVERY_INIT:
                        DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
-                       bnx2x_chk_parity_attn(bp, &global, false);
+                       is_parity = bnx2x_chk_parity_attn(bp, &global, false);
+                       WARN_ON(!is_parity);
 
                        /* Try to get a LEADER_LOCK HW lock */
                        if (bnx2x_trylock_leader_lock(bp)) {
@@ -8353,15 +8963,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
 
                        bp->recovery_state = BNX2X_RECOVERY_WAIT;
 
-                       /*
-                        * Reset MCP command sequence number and MCP mail box
-                        * sequence as we are going to reset the MCP.
-                        */
-                       if (global) {
-                               bp->fw_seq = 0;
-                               bp->fw_drv_pulse_wr_seq = 0;
-                       }
-
                        /* Ensure "is_leader", MCP command sequence and
                         * "recovery_state" update values are seen on other
                         * CPUs.
@@ -8373,10 +8974,10 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                        DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
                        if (bp->is_leader) {
                                int other_engine = BP_PATH(bp) ? 0 : 1;
-                               u32 other_load_counter =
-                                       bnx2x_get_load_cnt(bp, other_engine);
-                               u32 load_counter =
-                                       bnx2x_get_load_cnt(bp, BP_PATH(bp));
+                               bool other_load_status =
+                                       bnx2x_get_load_status(bp, other_engine);
+                               bool load_status =
+                                       bnx2x_get_load_status(bp, BP_PATH(bp));
                                global = bnx2x_reset_is_global(bp);
 
                                /*
@@ -8387,8 +8988,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                                 * the the gates will remain closed for that
                                 * engine.
                                 */
-                               if (load_counter ||
-                                   (global && other_load_counter)) {
+                               if (load_status ||
+                                   (global && other_load_status)) {
                                        /* Wait until all other functions get
                                         * down.
                                         */
@@ -8445,13 +9046,32 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                                                return;
                                        }
 
-                                       if (bnx2x_nic_load(bp, LOAD_NORMAL))
-                                               bnx2x_recovery_failed(bp);
-                                       else {
+                                       error_recovered =
+                                         bp->eth_stats.recoverable_error;
+                                       error_unrecovered =
+                                         bp->eth_stats.unrecoverable_error;
+                                       bp->recovery_state =
+                                               BNX2X_RECOVERY_NIC_LOADING;
+                                       if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+                                               error_unrecovered++;
+                                               netdev_err(bp->dev,
+                                                          "Recovery failed. Power cycle needed\n");
+                                               /* Disconnect this device */
+                                               netif_device_detach(bp->dev);
+                                               /* Shut down the power */
+                                               bnx2x_set_power_state(
+                                                       bp, PCI_D3hot);
+                                               smp_mb();
+                                       } else {
                                                bp->recovery_state =
                                                        BNX2X_RECOVERY_DONE;
+                                               error_recovered++;
                                                smp_mb();
                                        }
+                                       bp->eth_stats.recoverable_error =
+                                               error_recovered;
+                                       bp->eth_stats.unrecoverable_error =
+                                               error_unrecovered;
 
                                        return;
                                }
@@ -8462,6 +9082,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
        }
 }
 
+static int bnx2x_close(struct net_device *dev);
+
 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
  * scheduled on a general queue in order to prevent a dead lock.
  */
@@ -8476,8 +9098,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
 
        /* if stop on error is defined no recovery flows should be executed */
 #ifdef BNX2X_STOP_ON_ERROR
-       BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
-                 "so reset not done to allow debug dump,\n"
+       BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
                  "you will need to reboot when done\n");
        goto sp_rtnl_not_reset;
 #endif
@@ -8513,14 +9134,15 @@ sp_rtnl_not_reset:
 #endif
        if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
                bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
-
+       if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
+               bnx2x_after_function_update(bp);
        /*
         * in case of fan failure we need to reset id if the "stop on error"
         * debug flag is set, since we trying to prevent permanent overheating
         * damage
         */
        if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
-               DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+               DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
                netif_device_detach(bp->dev);
                bnx2x_close(bp->dev);
        }
@@ -8603,115 +9225,393 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
                bnx2x_undi_int_disable_e1h(bp);
 }
 
-static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
 {
-       u32 val;
+       u32 val, base_addr, offset, mask, reset_reg;
+       bool mac_stopped = false;
+       u8 port = BP_PORT(bp);
 
-       /* Check if there is any driver already loaded */
-       val = REG_RD(bp, MISC_REG_UNPREPARED);
-       if (val == 0x1) {
+       reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
 
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
-               /*
-                * Check if it is the UNDI driver
+       if (!CHIP_IS_E3(bp)) {
+               val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+               mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
+               if ((mask & reset_reg) && val) {
+                       u32 wb_data[2];
+                       BNX2X_DEV_INFO("Disable bmac Rx\n");
+                       base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
+                                               : NIG_REG_INGRESS_BMAC0_MEM;
+                       offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
+                                               : BIGMAC_REGISTER_BMAC_CONTROL;
+
+                       /*
+                        * use rd/wr since we cannot use dmae. This is safe
+                        * since MCP won't access the bus due to the request
+                        * to unload, and no function on the path can be
+                        * loaded at this time.
+                        */
+                       wb_data[0] = REG_RD(bp, base_addr + offset);
+                       wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+                       wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+                       REG_WR(bp, base_addr + offset, wb_data[0]);
+                       REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+
+               }
+               BNX2X_DEV_INFO("Disable emac Rx\n");
+               REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
+
+               mac_stopped = true;
+       } else {
+               if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
+                       BNX2X_DEV_INFO("Disable xmac Rx\n");
+                       base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+                       val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
+                       REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+                              val & ~(1 << 1));
+                       REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+                              val | (1 << 1));
+                       REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+                       mac_stopped = true;
+               }
+               mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+               if (mask & reset_reg) {
+                       BNX2X_DEV_INFO("Disable umac Rx\n");
+                       base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+                       REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+                       mac_stopped = true;
+               }
+       }
+
+       if (mac_stopped)
+               msleep(20);
+
+}
+
+#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_RCQ(val)       ((val) & 0xffff)
+#define BNX2X_PREV_UNDI_BD(val)                ((val) >> 16 & 0xffff)
+#define BNX2X_PREV_UNDI_PROD(rcq, bd)  ((bd) << 16 | (rcq))
+
+static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
+                                                u8 inc)
+{
+       u16 rcq, bd;
+       u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
+
+       rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+       bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+
+       tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+       REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+
+       BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+                      port, bd, rcq);
+}
+
+static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
+{
+       u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+       if (!rc) {
+               BNX2X_ERR("MCP response failure, aborting\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
+{
+       struct bnx2x_prev_path_list *tmp_list;
+       int rc = false;
+
+       if (down_trylock(&bnx2x_prev_sem))
+               return false;
+
+       list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
+               if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+                   bp->pdev->bus->number == tmp_list->bus &&
+                   BP_PATH(bp) == tmp_list->path) {
+                       rc = true;
+                       BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
+                                      BP_PATH(bp));
+                       break;
+               }
+       }
+
+       up(&bnx2x_prev_sem);
+
+       return rc;
+}
+
+static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
+{
+       struct bnx2x_prev_path_list *tmp_list;
+       int rc;
+
+       tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+       if (!tmp_list) {
+               BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
+               return -ENOMEM;
+       }
+
+       tmp_list->bus = bp->pdev->bus->number;
+       tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
+       tmp_list->path = BP_PATH(bp);
+
+       rc = down_interruptible(&bnx2x_prev_sem);
+       if (rc) {
+               BNX2X_ERR("Received %d when tried to take lock\n", rc);
+               kfree(tmp_list);
+       } else {
+               BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
+                               BP_PATH(bp));
+               list_add(&tmp_list->list, &bnx2x_prev_list);
+               up(&bnx2x_prev_sem);
+       }
+
+       return rc;
+}
+
+static int __devinit bnx2x_do_flr(struct bnx2x *bp)
+{
+       int i;
+       u16 status;
+       struct pci_dev *dev = bp->pdev;
+
+
+       if (CHIP_IS_E1x(bp)) {
+               BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+               return -EINVAL;
+       }
+
+       /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+       if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+               BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+                         bp->common.bc_ver);
+               return -EINVAL;
+       }
+
+       /* Wait for Transaction Pending bit clean */
+       for (i = 0; i < 4; i++) {
+               if (i)
+                       msleep((1 << (i - 1)) * 100);
+
+               pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
+               if (!(status & PCI_EXP_DEVSTA_TRPND))
+                       goto clear;
+       }
+
+       dev_err(&dev->dev,
+               "transaction is not cleared; proceeding with reset anyway\n");
+
+clear:
+
+       BNX2X_DEV_INFO("Initiating FLR\n");
+       bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
+
+       return 0;
+}
+
+static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+{
+       int rc;
+
+       BNX2X_DEV_INFO("Uncommon unload Flow\n");
+
+       /* Test if previous unload process was already finished for this path */
+       if (bnx2x_prev_is_path_marked(bp))
+               return bnx2x_prev_mcp_done(bp);
+
+       /* If function has FLR capabilities, and existing FW version matches
+        * the one required, then FLR will be sufficient to clean any residue
+        * left by previous driver
+        */
+       rc = bnx2x_test_firmware_version(bp, false);
+
+       if (!rc) {
+               /* fw version is good */
+               BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+               rc = bnx2x_do_flr(bp);
+       }
+
+       if (!rc) {
+               /* FLR was performed */
+               BNX2X_DEV_INFO("FLR successful\n");
+               return 0;
+       }
+
+       BNX2X_DEV_INFO("Could not FLR\n");
+
+       /* Close the MCP request, return failure*/
+       rc = bnx2x_prev_mcp_done(bp);
+       if (!rc)
+               rc = BNX2X_PREV_WAIT_NEEDED;
+
+       return rc;
+}
+
+static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
+{
+       u32 reset_reg, tmp_reg = 0, rc;
+       /* It is possible a previous function received 'common' answer,
+        * but hasn't loaded yet, therefore creating a scenario of
+        * multiple functions receiving 'common' on the same path.
+        */
+       BNX2X_DEV_INFO("Common unload Flow\n");
+
+       if (bnx2x_prev_is_path_marked(bp))
+               return bnx2x_prev_mcp_done(bp);
+
+       reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+
+       /* Reset should be performed after BRB is emptied */
+       if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+               u32 timer_count = 1000;
+               bool prev_undi = false;
+
+               /* Close the MAC Rx to prevent BRB from filling up */
+               bnx2x_prev_unload_close_mac(bp);
+
+               /* Check if the UNDI driver was previously loaded
                 * UNDI driver initializes CID offset for normal bell to 0x7
                 */
-               val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
-               if (val == 0x7) {
-                       u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-                       /* save our pf_num */
-                       int orig_pf_num = bp->pf_num;
-                       int port;
-                       u32 swap_en, swap_val, value;
-
-                       /* clear the UNDI indication */
-                       REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
-
-                       BNX2X_DEV_INFO("UNDI is active! reset device\n");
-
-                       /* try unload UNDI on port 0 */
-                       bp->pf_num = 0;
-                       bp->fw_seq =
-                             (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
-                               DRV_MSG_SEQ_NUMBER_MASK);
-                       reset_code = bnx2x_fw_command(bp, reset_code, 0);
-
-                       /* if UNDI is loaded on the other port */
-                       if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
-
-                               /* send "DONE" for previous unload */
-                               bnx2x_fw_command(bp,
-                                                DRV_MSG_CODE_UNLOAD_DONE, 0);
-
-                               /* unload UNDI on port 1 */
-                               bp->pf_num = 1;
-                               bp->fw_seq =
-                             (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
-                                       DRV_MSG_SEQ_NUMBER_MASK);
-                               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-                               bnx2x_fw_command(bp, reset_code, 0);
+               reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+               if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
+                       tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+                       if (tmp_reg == 0x7) {
+                               BNX2X_DEV_INFO("UNDI previously loaded\n");
+                               prev_undi = true;
+                               /* clear the UNDI indication */
+                               REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
                        }
+               }
+               /* wait until BRB is empty */
+               tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+               while (timer_count) {
+                       u32 prev_brb = tmp_reg;
 
-                       bnx2x_undi_int_disable(bp);
-                       port = BP_PORT(bp);
-
-                       /* close input traffic and wait for it */
-                       /* Do not rcv packets to BRB */
-                       REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
-                                          NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
-                       /* Do not direct rcv packets that are not for MCP to
-                        * the BRB */
-                       REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
-                                          NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
-                       /* clear AEU */
-                       REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-                                          MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
-                       msleep(10);
-
-                       /* save NIG port swap info */
-                       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-                       swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-                       /* reset device */
-                       REG_WR(bp,
-                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-                              0xd3ffffff);
-
-                       value = 0x1400;
-                       if (CHIP_IS_E3(bp)) {
-                               value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
-                               value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
-                       }
+                       tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+                       if (!tmp_reg)
+                               break;
+
+                       BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
+
+                       /* reset timer as long as BRB actually gets emptied */
+                       if (prev_brb > tmp_reg)
+                               timer_count = 1000;
+                       else
+                               timer_count--;
 
-                       REG_WR(bp,
-                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-                              value);
-
-                       /* take the NIG out of reset and restore swap values */
-                       REG_WR(bp,
-                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
-                              MISC_REGISTERS_RESET_REG_1_RST_NIG);
-                       REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
-                       REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
-
-                       /* send unload done to the MCP */
-                       bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
-
-                       /* restore our func and fw_seq */
-                       bp->pf_num = orig_pf_num;
-                       bp->fw_seq =
-                             (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
-                               DRV_MSG_SEQ_NUMBER_MASK);
+                       /* If UNDI resides in memory, manually increment it */
+                       if (prev_undi)
+                               bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
+
+                       udelay(10);
                }
 
-               /* now it's safe to release the lock */
-               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+               if (!timer_count)
+                       BNX2X_ERR("Failed to empty BRB, hope for the best\n");
+
+       }
+
+       /* No packets are in the pipeline, path is ready for reset */
+       bnx2x_reset_common(bp);
+
+       rc = bnx2x_prev_mark_path(bp);
+       if (rc) {
+               bnx2x_prev_mcp_done(bp);
+               return rc;
+       }
+
+       return bnx2x_prev_mcp_done(bp);
+}
+
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
+{
+       u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+       if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+               BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+       }
+}
+
+static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
+{
+       int time_counter = 10;
+       u32 rc, fw, hw_lock_reg, hw_lock_val;
+       BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
+
+       /* clear hw from errors which may have resulted from an interrupted
+        * dmae transaction.
+        */
+       bnx2x_prev_interrupted_dmae(bp);
+
+       /* Release previously held locks */
+       hw_lock_reg = (BP_FUNC(bp) <= 5) ?
+                     (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
+                     (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+
+       hw_lock_val = (REG_RD(bp, hw_lock_reg));
+       if (hw_lock_val) {
+               if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+                       BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
+                       REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+                              (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
+               }
+
+               BNX2X_DEV_INFO("Release Previously held hw lock\n");
+               REG_WR(bp, hw_lock_reg, 0xffffffff);
+       } else
+               BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
+
+       if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
+               BNX2X_DEV_INFO("Release previously held alr\n");
+               REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+       }
+
+
+       do {
+               /* Lock MCP using an unload request */
+               fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
+               if (!fw) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EBUSY;
+                       break;
+               }
+
+               if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+                       rc = bnx2x_prev_unload_common(bp);
+                       break;
+               }
+
+               /* non-common reply from MCP night require looping */
+               rc = bnx2x_prev_unload_uncommon(bp);
+               if (rc != BNX2X_PREV_WAIT_NEEDED)
+                       break;
+
+               msleep(20);
+       } while (--time_counter);
+
+       if (!time_counter || rc) {
+               BNX2X_ERR("Failed unloading previous driver, aborting\n");
+               rc = -EBUSY;
        }
+
+       BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
+
+       return rc;
 }
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-       u32 val, val2, val3, val4, id;
+       u32 val, val2, val3, val4, id, boot_mode;
        u16 pmc;
 
        /* Get the chip revision id and number. */
@@ -8726,6 +9626,17 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        id |= (val & 0xf);
        bp->common.chip_id = id;
 
+       /* force 57811 according to MISC register */
+       if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+               if (CHIP_IS_57810(bp))
+                       bp->common.chip_id = (CHIP_NUM_57811 << 16) |
+                               (bp->common.chip_id & 0x0000FFFF);
+               else if (CHIP_IS_57810_MF(bp))
+                       bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
+                               (bp->common.chip_id & 0x0000FFFF);
+               bp->common.chip_id |= 0x1;
+       }
+
        /* Set doorbell size */
        bp->db_size = (1 << BNX2X_DB_SHIFT);
 
@@ -8749,6 +9660,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
                bp->pfid = bp->pf_num;                  /* 0..7 */
        }
 
+       BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
+
        bp->link_params.chip_id = bp->common.chip_id;
        BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
 
@@ -8806,8 +9719,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        if (val < BNX2X_BC_VER) {
                /* for now only warn
                 * later we might need to enforce this */
-               BNX2X_ERR("This driver needs bc_ver %X but found %X, "
-                         "please upgrade BC\n", BNX2X_BC_VER, val);
+               BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
+                         BNX2X_BC_VER, val);
        }
        bp->link_params.feature_config_flags |=
                                (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
@@ -8816,10 +9729,37 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
                FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
-
+       bp->link_params.feature_config_flags |=
+               (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+               FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+       bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+                       BC_SUPPORTS_PFC_STATS : 0;
+
+       bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+                       BC_SUPPORTS_FCOE_FEATURES : 0;
+
+       bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+                       BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+       boot_mode = SHMEM_RD(bp,
+                       dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+                       PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+       switch (boot_mode) {
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+               break;
+       }
 
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
        bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -8881,12 +9821,13 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
        }
 
 #ifdef CONFIG_PCI_MSI
-       /*
-        * It's expected that number of CAM entries for this functions is equal
-        * to the number evaluated based on the MSI-X table size. We want a
-        * harsh warning if these values are different!
+       /* Due to new PF resource allocation by MFW T7.4 and above, it's
+        * optional that number of CAM entries will not be equal to the value
+        * advertised in PCI.
+        * Driver should use the minimal value of both as the actual status
+        * block count
         */
-       WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
+       bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
 #endif
 
        if (igu_sb_cnt == 0)
@@ -8928,8 +9869,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
        }
 
        if (!(bp->port.supported[0] || bp->port.supported[1])) {
-               BNX2X_ERR("NVRAM config error. BAD phy config."
-                         "PHY1 config 0x%x, PHY2 config 0x%x\n",
+               BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
                           SHMEM_RD(bp,
                           dev_info.port_hw_config[port].external_phy_config),
                           SHMEM_RD(bp,
@@ -9017,6 +9957,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        SPEED_AUTO_NEG;
                                bp->port.advertising[idx] |=
                                        bp->port.supported[idx];
+                               if (bp->link_params.phy[EXT_PHY1].type ==
+                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+                                       bp->port.advertising[idx] |=
+                                       (SUPPORTED_100baseT_Half |
+                                        SUPPORTED_100baseT_Full);
                        } else {
                                /* force 10G, no AN */
                                bp->link_params.req_line_speed[idx] =
@@ -9036,9 +9981,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_10baseT_Full |
                                         ADVERTISED_TP);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                           "Invalid link_config 0x%x"
-                                           "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                            link_config,
                                    bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9055,9 +9998,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_10baseT_Half |
                                         ADVERTISED_TP);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                           "Invalid link_config 0x%x"
-                                           "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                            link_config,
                                          bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9073,9 +10014,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_100baseT_Full |
                                         ADVERTISED_TP);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                           "Invalid link_config 0x%x"
-                                           "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                            link_config,
                                          bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9093,9 +10032,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_100baseT_Half |
                                         ADVERTISED_TP);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                    link_config,
                                    bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9111,9 +10048,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_1000baseT_Full |
                                         ADVERTISED_TP);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                    link_config,
                                    bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9129,9 +10064,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_2500baseX_Full |
                                                ADVERTISED_TP);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                    link_config,
                                    bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9147,9 +10080,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                        (ADVERTISED_10000baseT_Full |
                                                ADVERTISED_FIBRE);
                        } else {
-                               BNX2X_ERR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
+                               BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                    link_config,
                                    bp->link_params.speed_cap_mask[idx]);
                                return;
@@ -9160,8 +10091,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
 
                        break;
                default:
-                       BNX2X_ERR("NVRAM config error. "
-                                 "BAD link speed link_config 0x%x\n",
+                       BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
                                  link_config);
                                bp->link_params.req_line_speed[idx] =
                                                        SPEED_AUTO_NEG;
@@ -9179,8 +10109,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
                                BNX2X_FLOW_CTRL_NONE;
                }
 
-               BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
-                              " 0x%x advertising 0x%x\n",
+               BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
                               bp->link_params.req_line_speed[idx],
                               bp->link_params.req_duplex[idx],
                               bp->link_params.req_flow_ctrl[idx],
@@ -9200,7 +10129,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
        u32 config;
-       u32 ext_phy_type, ext_phy_config;
+       u32 ext_phy_type, ext_phy_config, eee_mode;
 
        bp->link_params.bp = bp;
        bp->link_params.port = port;
@@ -9229,8 +10158,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
        bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
                   (config & PORT_FEATURE_WOL_ENABLED));
 
-       BNX2X_DEV_INFO("lane_config 0x%08x  "
-                      "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
+       BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
                       bp->link_params.lane_config,
                       bp->link_params.speed_cap_mask[0],
                       bp->port.link_config[0]);
@@ -9268,11 +10196,25 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
                                                        bp->common.shmem_base,
                                                        bp->common.shmem2_base);
+
+       /* Configure link feature according to nvram value */
+       eee_mode = (((SHMEM_RD(bp, dev_info.
+                     port_feature_config[port].eee_power_mode)) &
+                    PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+                   PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+       if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+               bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+                                          EEE_MODE_ENABLE_LPI |
+                                          EEE_MODE_OUTPUT_TIME;
+       } else {
+               bp->link_params.eee_mode = 0;
+       }
 }
 
-#ifdef BCM_CNIC
 void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
+       u32 no_flags = NO_ISCSI_FLAG;
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
 
        u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
@@ -9291,11 +10233,31 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
         * disable the feature.
         */
        if (!bp->cnic_eth_dev.max_iscsi_conn)
-               bp->flags |= NO_ISCSI_FLAG;
+               bp->flags |= no_flags;
+#else
+       bp->flags |= no_flags;
+#endif
 }
 
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+{
+       /* Port info */
+       bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+               MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
+       bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+               MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
+
+       /* Node info */
+       bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+               MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
+       bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+               MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
+}
+#endif
 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
        int func = BP_ABS_FUNC(bp);
 
@@ -9329,30 +10291,15 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
                                dev_info.port_hw_config[port].
                                 fcoe_wwn_node_name_lower);
        } else if (!IS_MF_SD(bp)) {
-               u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
-
                /*
                 * Read the WWN info only if the FCoE feature is enabled for
                 * this function.
                 */
-               if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
-                       /* Port info */
-                       bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
-                               MF_CFG_RD(bp, func_ext_config[func].
-                                               fcoe_wwn_port_name_upper);
-                       bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
-                               MF_CFG_RD(bp, func_ext_config[func].
-                                               fcoe_wwn_port_name_lower);
-
-                       /* Node info */
-                       bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
-                               MF_CFG_RD(bp, func_ext_config[func].
-                                               fcoe_wwn_node_name_upper);
-                       bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
-                               MF_CFG_RD(bp, func_ext_config[func].
-                                               fcoe_wwn_node_name_lower);
-               }
-       }
+               if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
+                       bnx2x_get_ext_wwn_info(bp, func);
+
+       } else if (IS_MF_FCOE_SD(bp))
+               bnx2x_get_ext_wwn_info(bp, func);
 
        BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
 
@@ -9362,6 +10309,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
         */
        if (!bp->cnic_eth_dev.max_fcoe_conn)
                bp->flags |= NO_FCOE_FLAG;
+#else
+       bp->flags |= NO_FCOE_FLAG;
+#endif
 }
 
 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -9374,7 +10324,6 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
        bnx2x_get_iscsi_info(bp);
        bnx2x_get_fcoe_info(bp);
 }
-#endif
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
@@ -9391,7 +10340,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 
        if (BP_NOMCP(bp)) {
                BNX2X_ERROR("warning: random MAC workaround active\n");
-               random_ether_addr(bp->dev->dev_addr);
+               eth_hw_addr_random(bp->dev);
        } else if (IS_MF(bp)) {
                val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
                val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
@@ -9400,10 +10349,14 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+               /*
+                * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
                 * FCoE MAC then the appropriate feature should be disabled.
+                *
+                * In non SD mode features configuration comes from
+                * struct func_ext_config.
                 */
-               if (IS_MF_SI(bp)) {
+               if (!IS_MF_SD(bp)) {
                        u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
                        if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
                                val2 = MF_CFG_RD(bp, func_ext_config[func].
@@ -9422,12 +10375,40 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    fcoe_mac_addr_lower);
                                bnx2x_set_mac_buf(fip_mac, val, val2);
-                               BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+                               BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
                                               fip_mac);
 
                        } else
                                bp->flags |= NO_FCOE_FLAG;
+
+                       bp->mf_ext_config = cfg;
+
+               } else { /* SD MODE */
+                       if (IS_MF_STORAGE_SD(bp)) {
+                               if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+                                       /* use primary mac as iscsi mac */
+                                       memcpy(iscsi_mac, bp->dev->dev_addr,
+                                              ETH_ALEN);
+
+                                       BNX2X_DEV_INFO("SD ISCSI MODE\n");
+                                       BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+                                                      iscsi_mac);
+                               } else { /* FCoE */
+                                       memcpy(fip_mac, bp->dev->dev_addr,
+                                              ETH_ALEN);
+                                       BNX2X_DEV_INFO("SD FCoE MODE\n");
+                                       BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
+                                                      fip_mac);
+                               }
+                               /* Zero primary MAC configuration */
+                               memset(bp->dev->dev_addr, 0, ETH_ALEN);
+                       }
                }
+
+               if (IS_MF_FCOE_AFEX(bp))
+                       /* use FIP MAC as primary MAC */
+                       memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+
 #endif
        } else {
                /* in SF read MACs from port configuration */
@@ -9454,10 +10435,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
 #ifdef BCM_CNIC
-       /* Set the FCoE MAC in MF_SD mode */
-       if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
-               memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
-
        /* Disable iSCSI if MAC configuration is
         * invalid.
         */
@@ -9475,12 +10452,13 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        }
 #endif
 
-       if (!is_valid_ether_addr(bp->dev->dev_addr))
+       if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
                dev_err(&bp->pdev->dev,
-                       "bad Ethernet MAC address configuration: "
-                       "%pM, change it manually before bringing up "
-                       "the appropriate network interface\n",
+                       "bad Ethernet MAC address configuration: %pM\n"
+                       "change it manually before bringing up the appropriate network interface\n",
                        bp->dev->dev_addr);
+
+
 }
 
 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -9601,8 +10579,20 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                                        bp->mf_config[vn] = MF_CFG_RD(bp,
                                                   func_mf_config[func].config);
                                } else
-                                       BNX2X_DEV_INFO("illegal MAC address "
-                                                      "for SI\n");
+                                       BNX2X_DEV_INFO("illegal MAC address for SI\n");
+                               break;
+                       case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+                               if ((!CHIP_IS_E1x(bp)) &&
+                                   (MF_CFG_RD(bp, func_mf_config[func].
+                                              mac_upper) != 0xffff) &&
+                                   (SHMEM2_HAS(bp,
+                                               afex_driver_support))) {
+                                       bp->mf_mode = MULTI_FUNCTION_AFEX;
+                                       bp->mf_config[vn] = MF_CFG_RD(bp,
+                                               func_mf_config[func].config);
+                               } else {
+                                       BNX2X_DEV_INFO("can not configure afex mode\n");
+                               }
                                break;
                        case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
                                /* get OV configuration */
@@ -9620,7 +10610,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                        default:
                                /* Unknown configuration: reset mf_config */
                                bp->mf_config[vn] = 0;
-                               BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+                               BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
                        }
                }
 
@@ -9635,25 +10625,27 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                                bp->mf_ov = val;
                                bp->path_has_ovlan = true;
 
-                               BNX2X_DEV_INFO("MF OV for func %d is %d "
-                                              "(0x%04x)\n", func, bp->mf_ov,
-                                              bp->mf_ov);
+                               BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
+                                              func, bp->mf_ov, bp->mf_ov);
                        } else {
                                dev_err(&bp->pdev->dev,
-                                       "No valid MF OV for func %d, "
-                                       "aborting\n", func);
+                                       "No valid MF OV for func %d, aborting\n",
+                                       func);
                                return -EPERM;
                        }
                        break;
+               case MULTI_FUNCTION_AFEX:
+                       BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
+                       break;
                case MULTI_FUNCTION_SI:
-                       BNX2X_DEV_INFO("func %d is in MF "
-                                      "switch-independent mode\n", func);
+                       BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
+                                      func);
                        break;
                default:
                        if (vn) {
                                dev_err(&bp->pdev->dev,
-                                       "VN %d is in a single function mode, "
-                                       "aborting\n", vn);
+                                       "VN %d is in a single function mode, aborting\n",
+                                       vn);
                                return -EPERM;
                        }
                        break;
@@ -9687,19 +10679,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
-#ifdef BCM_CNIC
        bnx2x_get_cnic_info(bp);
-#endif
-
-       /* Get current FW pulse sequence */
-       if (!BP_NOMCP(bp)) {
-               int mb_idx = BP_FW_MB_IDX(bp);
-
-               bp->fw_drv_pulse_wr_seq =
-                               (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
-                                DRV_PULSE_SEQ_MASK);
-               BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-       }
 
        return rc;
 }
@@ -9707,30 +10687,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 {
        int cnt, i, block_end, rodi;
-       char vpd_data[BNX2X_VPD_LEN+1];
+       char vpd_start[BNX2X_VPD_LEN+1];
        char str_id_reg[VENDOR_ID_LEN+1];
        char str_id_cap[VENDOR_ID_LEN+1];
+       char *vpd_data;
+       char *vpd_extended_data = NULL;
        u8 len;
 
-       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
        memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
        if (cnt < BNX2X_VPD_LEN)
                goto out_not_found;
 
-       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+       /* VPD RO tag should be first tag after identifier string, hence
+        * we should be able to find it in first BNX2X_VPD_LEN chars
+        */
+       i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
        if (i < 0)
                goto out_not_found;
 
-
        block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-                   pci_vpd_lrdt_size(&vpd_data[i]);
+                   pci_vpd_lrdt_size(&vpd_start[i]);
 
        i += PCI_VPD_LRDT_TAG_SIZE;
 
-       if (block_end > BNX2X_VPD_LEN)
-               goto out_not_found;
+       if (block_end > BNX2X_VPD_LEN) {
+               vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+               if (vpd_extended_data  == NULL)
+                       goto out_not_found;
+
+               /* read rest of vpd image into vpd_extended_data */
+               memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+               cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+                                  block_end - BNX2X_VPD_LEN,
+                                  vpd_extended_data + BNX2X_VPD_LEN);
+               if (cnt < (block_end - BNX2X_VPD_LEN))
+                       goto out_not_found;
+               vpd_data = vpd_extended_data;
+       } else
+               vpd_data = vpd_start;
+
+       /* now vpd_data holds full vpd content in both cases */
 
        rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
                                   PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9762,9 +10761,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
                                bp->fw_ver[len] = ' ';
                        }
                }
+               kfree(vpd_extended_data);
                return;
        }
 out_not_found:
+       kfree(vpd_extended_data);
        return;
 }
 
@@ -9803,6 +10804,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
                case MULTI_FUNCTION_SI:
                        SET_FLAGS(flags, MODE_MF_SI);
                        break;
+               case MULTI_FUNCTION_AFEX:
+                       SET_FLAGS(flags, MODE_MF_AFEX);
+                       break;
                }
        } else
                SET_FLAGS(flags, MODE_SF);
@@ -9818,7 +10822,6 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 {
        int func;
-       int timer_interval;
        int rc;
 
        mutex_init(&bp->port.phy_mutex);
@@ -9846,35 +10849,37 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        func = BP_FUNC(bp);
 
        /* need to reset chip if undi was active */
-       if (!BP_NOMCP(bp))
-               bnx2x_undi_unload(bp);
-
-       /* init fw_seq after undi_unload! */
        if (!BP_NOMCP(bp)) {
+               /* init fw_seq */
                bp->fw_seq =
-                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                        DRV_MSG_SEQ_NUMBER_MASK);
+                       SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+                                                       DRV_MSG_SEQ_NUMBER_MASK;
                BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+               bnx2x_prev_unload(bp);
        }
 
+
        if (CHIP_REV_IS_FPGA(bp))
                dev_err(&bp->pdev->dev, "FPGA detected\n");
 
        if (BP_NOMCP(bp) && (func == 0))
-               dev_err(&bp->pdev->dev, "MCP disabled, "
-                                       "must load devices in order!\n");
+               dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
+
+       bp->disable_tpa = disable_tpa;
 
-       bp->multi_mode = multi_mode;
+#ifdef BCM_CNIC
+       bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+#endif
 
        /* Set TPA flags */
-       if (disable_tpa) {
-               bp->flags &= ~TPA_ENABLE_FLAG;
+       if (bp->disable_tpa) {
+               bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
                bp->dev->features &= ~NETIF_F_LRO;
        } else {
-               bp->flags |= TPA_ENABLE_FLAG;
+               bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
                bp->dev->features |= NETIF_F_LRO;
        }
-       bp->disable_tpa = disable_tpa;
 
        if (CHIP_IS_E1(bp))
                bp->dropless_fc = 0;
@@ -9883,14 +10888,13 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
        bp->mrrs = mrrs;
 
-       bp->tx_ring_size = MAX_TX_AVAIL;
+       bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 
        /* make sure that the numbers are in the right granularity */
        bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
        bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
 
-       timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
-       bp->current_interval = (poll ? poll : timer_interval);
+       bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
 
        init_timer(&bp->timer);
        bp->timer.expires = jiffies + bp->current_interval;
@@ -9933,14 +10937,16 @@ static int bnx2x_open(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        bool global = false;
        int other_engine = BP_PATH(bp) ? 0 : 1;
-       u32 other_load_counter, load_counter;
+       bool other_load_status, load_status;
+
+       bp->stats_init = true;
 
        netif_carrier_off(dev);
 
        bnx2x_set_power_state(bp, PCI_D0);
 
-       other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
-       load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+       other_load_status = bnx2x_get_load_status(bp, other_engine);
+       load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
 
        /*
         * If parity had happen during the unload, then attentions
@@ -9966,8 +10972,8 @@ static int bnx2x_open(struct net_device *dev)
                         * global blocks only the first in the chip should try
                         * to recover.
                         */
-                       if ((!load_counter &&
-                            (!global || !other_load_counter)) &&
+                       if ((!load_status &&
+                            (!global || !other_load_status)) &&
                            bnx2x_trylock_leader_lock(bp) &&
                            !bnx2x_leader_reset(bp)) {
                                netdev_info(bp->dev, "Recovered in open\n");
@@ -9978,10 +10984,8 @@ static int bnx2x_open(struct net_device *dev)
                        bnx2x_set_power_state(bp, PCI_D3hot);
                        bp->recovery_state = BNX2X_RECOVERY_FAILED;
 
-                       netdev_err(bp->dev, "Recovery flow hasn't been properly"
-                       " completed yet. Try again later. If u still see this"
-                       " message after a few retries then power cycle is"
-                       " required.\n");
+                       BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+                                 "If you still see this message after a few retries then power cycle is required.\n");
 
                        return -EAGAIN;
                } while (0);
@@ -9991,7 +10995,7 @@ static int bnx2x_open(struct net_device *dev)
 }
 
 /* called with rtnl_lock */
-int bnx2x_close(struct net_device *dev)
+static int bnx2x_close(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -10004,8 +11008,8 @@ int bnx2x_close(struct net_device *dev)
        return 0;
 }
 
-static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
-                                        struct bnx2x_mcast_ramrod_params *p)
+static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+                                     struct bnx2x_mcast_ramrod_params *p)
 {
        int mc_count = netdev_mc_count(bp->dev);
        struct bnx2x_mcast_list_elem *mc_mac =
@@ -10028,7 +11032,7 @@ static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
        return 0;
 }
 
-static inline void bnx2x_free_mcast_macs_list(
+static void bnx2x_free_mcast_macs_list(
        struct bnx2x_mcast_ramrod_params *p)
 {
        struct bnx2x_mcast_list_elem *mc_mac =
@@ -10046,12 +11050,12 @@ static inline void bnx2x_free_mcast_macs_list(
  *
  * We will use zero (0) as a MAC type for these MACs.
  */
-static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+static int bnx2x_set_uc_list(struct bnx2x *bp)
 {
        int rc;
        struct net_device *dev = bp->dev;
        struct netdev_hw_addr *ha;
-       struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+       struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
        unsigned long ramrod_flags = 0;
 
        /* First schedule a cleanup up of old configuration */
@@ -10064,7 +11068,14 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
        netdev_for_each_uc_addr(ha, dev) {
                rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
                                       BNX2X_UC_LIST_MAC, &ramrod_flags);
-               if (rc < 0) {
+               if (rc == -EEXIST) {
+                       DP(BNX2X_MSG_SP,
+                          "Failed to schedule ADD operations: %d\n", rc);
+                       /* do not treat adding same MAC as error */
+                       rc = 0;
+
+               } else if (rc < 0) {
+
                        BNX2X_ERR("Failed to schedule ADD operations: %d\n",
                                  rc);
                        return rc;
@@ -10077,10 +11088,10 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
                                 BNX2X_UC_LIST_MAC, &ramrod_flags);
 }
 
-static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+static int bnx2x_set_mc_list(struct bnx2x *bp)
 {
        struct net_device *dev = bp->dev;
-       struct bnx2x_mcast_ramrod_params rparam = {0};
+       struct bnx2x_mcast_ramrod_params rparam = {NULL};
        int rc = 0;
 
        rparam.mcast_obj = &bp->mcast_obj;
@@ -10088,8 +11099,7 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
        /* first, clear all configured multicast MACs */
        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
        if (rc < 0) {
-               BNX2X_ERR("Failed to clear multicast "
-                         "configuration: %d\n", rc);
+               BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
                return rc;
        }
 
@@ -10097,8 +11107,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
        if (netdev_mc_count(dev)) {
                rc = bnx2x_init_mcast_macs_list(bp, &rparam);
                if (rc) {
-                       BNX2X_ERR("Failed to create multicast MACs "
-                                 "list: %d\n", rc);
+                       BNX2X_ERR("Failed to create multicast MACs list: %d\n",
+                                 rc);
                        return rc;
                }
 
@@ -10106,8 +11116,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
                rc = bnx2x_config_mcast(bp, &rparam,
                                        BNX2X_MCAST_CMD_ADD);
                if (rc < 0)
-                       BNX2X_ERR("Failed to set a new multicast "
-                                 "configuration: %d\n", rc);
+                       BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+                                 rc);
 
                bnx2x_free_mcast_macs_list(&rparam);
        }
@@ -10145,6 +11155,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
        }
 
        bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+       /* handle ISCSI SD mode */
+       if (IS_MF_ISCSI_SD(bp))
+               bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
 
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10186,8 +11201,9 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
        struct bnx2x *bp = netdev_priv(netdev);
        int rc;
 
-       DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
-                          " value 0x%x\n", prtad, devad, addr, value);
+       DP(NETIF_MSG_LINK,
+          "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
+          prtad, devad, addr, value);
 
        /* The HW expects different devad if CL22 is used */
        devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
@@ -10217,13 +11233,26 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static void poll_bnx2x(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
+       int i;
 
-       disable_irq(bp->pdev->irq);
-       bnx2x_interrupt(bp->pdev->irq, dev);
-       enable_irq(bp->pdev->irq);
+       for_each_eth_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+       }
 }
 #endif
 
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
+               BNX2X_ERR("Non-valid Ethernet address\n");
+               return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -10231,7 +11260,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_select_queue       = bnx2x_select_queue,
        .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_validate_addr      = bnx2x_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
        .ndo_change_mtu         = bnx2x_change_mtu,
        .ndo_fix_features       = bnx2x_fix_features,
@@ -10247,15 +11276,14 @@ static const struct net_device_ops bnx2x_netdev_ops = {
 #endif
 };
 
-static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+static int bnx2x_set_coherency_mask(struct bnx2x *bp)
 {
        struct device *dev = &bp->pdev->dev;
 
        if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
                bp->flags |= USING_DAC_FLAG;
                if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
-                       dev_err(dev, "dma_set_coherent_mask failed, "
-                                    "aborting\n");
+                       dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
                        return -EIO;
                }
        } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
@@ -10272,6 +11300,10 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 {
        struct bnx2x *bp;
        int rc;
+       u32 pci_cfg_dword;
+       bool chip_is_e1x = (board_type == BCM57710 ||
+                           board_type == BCM57711 ||
+                           board_type == BCM57711E);
 
        SET_NETDEV_DEV(dev, &pdev->dev);
        bp = netdev_priv(dev);
@@ -10279,7 +11311,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        bp->dev = dev;
        bp->pdev = pdev;
        bp->flags = 0;
-       bp->pf_num = PCI_FUNC(pdev->devfn);
 
        rc = pci_enable_device(pdev);
        if (rc) {
@@ -10323,7 +11354,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        }
 
        if (!pci_is_pcie(pdev)) {
-               dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+               dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
                rc = -EIO;
                goto err_out_release;
        }
@@ -10346,6 +11377,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
                goto err_out_release;
        }
 
+       /* In E1/E1H use pci device function given by kernel.
+        * In E2/E3 read physical function from ME register since these chips
+        * support Physical Device Assignment where kernel BDF maybe arbitrary
+        * (depending on hypervisor).
+        */
+       if (chip_is_e1x)
+               bp->pf_num = PCI_FUNC(pdev->devfn);
+       else {/* chip is E2/3*/
+               pci_read_config_dword(bp->pdev,
+                                     PCICFG_ME_REGISTER, &pci_cfg_dword);
+               bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
+                   ME_REG_ABS_PF_NUM_SHIFT);
+       }
+       BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
+
        bnx2x_set_power_state(bp, PCI_D0);
 
        /* clean indirect addresses */
@@ -10360,7 +11406,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
 
-       if (CHIP_IS_E1x(bp)) {
+       if (chip_is_e1x) {
                REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
                REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
                REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
@@ -10371,14 +11417,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
         * Enable internal target-read (in case we are probed after PF FLR).
         * Must be done prior to any BAR read access. Only for 57712 and up
         */
-       if (board_type != BCM57710 &&
-           board_type != BCM57711 &&
-           board_type != BCM57711E)
+       if (!chip_is_e1x)
                REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
-       /* Reset the load counter */
-       bnx2x_clear_load_cnt(bp);
-
        dev->watchdog_timeo = TX_TIMEOUT;
 
        dev->netdev_ops = &bnx2x_netdev_ops;
@@ -10387,8 +11428,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        dev->priv_flags |= IFF_UNICAST_FLT;
 
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
-               NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+               NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+               NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+               NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
 
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -10447,8 +11489,10 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
        int i;
        const u8 *fw_ver;
 
-       if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+       if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
+               BNX2X_ERR("Wrong FW size\n");
                return -EINVAL;
+       }
 
        fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
        sections = (struct bnx2x_fw_file_section *)fw_hdr;
@@ -10459,8 +11503,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
                offset = be32_to_cpu(sections[i].offset);
                len = be32_to_cpu(sections[i].len);
                if (offset + len > firmware->size) {
-                       dev_err(&bp->pdev->dev,
-                               "Section %d length is out of bounds\n", i);
+                       BNX2X_ERR("Section %d length is out of bounds\n", i);
                        return -EINVAL;
                }
        }
@@ -10472,8 +11515,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
 
        for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
                if (be16_to_cpu(ops_offsets[i]) > num_ops) {
-                       dev_err(&bp->pdev->dev,
-                               "Section offset %d is out of bounds\n", i);
+                       BNX2X_ERR("Section offset %d is out of bounds\n", i);
                        return -EINVAL;
                }
        }
@@ -10485,10 +11527,9 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
            (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
            (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
            (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
-               dev_err(&bp->pdev->dev,
-                       "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
-                      fw_ver[0], fw_ver[1], fw_ver[2],
-                      fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+               BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+                      fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+                      BCM_5710_FW_MAJOR_VERSION,
                       BCM_5710_FW_MINOR_VERSION,
                       BCM_5710_FW_REVISION_VERSION,
                       BCM_5710_FW_ENGINEERING_VERSION);
@@ -10498,7 +11539,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
        return 0;
 }
 
-static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 {
        const __be32 *source = (const __be32 *)_source;
        u32 *target = (u32 *)_target;
@@ -10512,7 +11553,7 @@ static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
    Ops array is stored in the following format:
    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
  */
-static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
 {
        const __be32 *source = (const __be32 *)_source;
        struct raw_op *target = (struct raw_op *)_target;
@@ -10526,11 +11567,10 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
        }
 }
 
-/**
- * IRO array is stored in the following format:
+/* IRO array is stored in the following format:
  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
  */
-static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
 {
        const __be32 *source = (const __be32 *)_source;
        struct iro *target = (struct iro *)_target;
@@ -10550,7 +11590,7 @@ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
        }
 }
 
-static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 {
        const __be16 *source = (const __be16 *)_source;
        u16 *target = (u16 *)_target;
@@ -10564,48 +11604,44 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 do {                                                                   \
        u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
        bp->arr = kmalloc(len, GFP_KERNEL);                             \
-       if (!bp->arr) {                                                 \
-               pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+       if (!bp->arr)                                                   \
                goto lbl;                                               \
-       }                                                               \
        func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
             (u8 *)bp->arr, len);                                       \
 } while (0)
 
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
 {
+       const char *fw_file_name;
        struct bnx2x_fw_file_hdr *fw_hdr;
        int rc;
 
+       if (bp->firmware)
+               return 0;
 
-       if (!bp->firmware) {
-               const char *fw_file_name;
-
-               if (CHIP_IS_E1(bp))
-                       fw_file_name = FW_FILE_NAME_E1;
-               else if (CHIP_IS_E1H(bp))
-                       fw_file_name = FW_FILE_NAME_E1H;
-               else if (!CHIP_IS_E1x(bp))
-                       fw_file_name = FW_FILE_NAME_E2;
-               else {
-                       BNX2X_ERR("Unsupported chip revision\n");
-                       return -EINVAL;
-               }
-               BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+       if (CHIP_IS_E1(bp))
+               fw_file_name = FW_FILE_NAME_E1;
+       else if (CHIP_IS_E1H(bp))
+               fw_file_name = FW_FILE_NAME_E1H;
+       else if (!CHIP_IS_E1x(bp))
+               fw_file_name = FW_FILE_NAME_E2;
+       else {
+               BNX2X_ERR("Unsupported chip revision\n");
+               return -EINVAL;
+       }
+       BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
 
-               rc = request_firmware(&bp->firmware, fw_file_name,
-                                     &bp->pdev->dev);
-               if (rc) {
-                       BNX2X_ERR("Can't load firmware file %s\n",
-                                 fw_file_name);
-                       goto request_firmware_exit;
-               }
+       rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+       if (rc) {
+               BNX2X_ERR("Can't load firmware file %s\n",
+                         fw_file_name);
+               goto request_firmware_exit;
+       }
 
-               rc = bnx2x_check_firmware(bp);
-               if (rc) {
-                       BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
-                       goto request_firmware_exit;
-               }
+       rc = bnx2x_check_firmware(bp);
+       if (rc) {
+               BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+               goto request_firmware_exit;
        }
 
        fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
@@ -10651,6 +11687,7 @@ init_ops_alloc_err:
        kfree(bp->init_data);
 request_firmware_exit:
        release_firmware(bp->firmware);
+       bp->firmware = NULL;
 
        return rc;
 }
@@ -10690,13 +11727,15 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
        bnx2x_init_func_obj(bp, &bp->func_obj,
                            bnx2x_sp(bp, func_rdata),
                            bnx2x_sp_mapping(bp, func_rdata),
+                           bnx2x_sp(bp, func_afex_rdata),
+                           bnx2x_sp_mapping(bp, func_afex_rdata),
                            &bnx2x_func_sp_drv);
 }
 
 /* must be called after sriov-enable */
-static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
 {
-       int cid_count = BNX2X_L2_CID_COUNT(bp);
+       int cid_count = BNX2X_L2_MAX_CID(bp);
 
 #ifdef BCM_CNIC
        cid_count += CNIC_CID_MAX;
@@ -10710,7 +11749,7 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
  * @dev:       pci device
  *
  */
-static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
 {
        int pos;
        u16 control;
@@ -10741,7 +11780,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        struct bnx2x *bp;
        int pcie_width, pcie_speed;
        int rc, max_non_def_sbs;
-       int rx_count, tx_count, rss_count;
+       int rx_count, tx_count, rss_count, doorbell_size;
        /*
         * An estimated maximum supported CoS number according to the chip
         * version.
@@ -10769,8 +11808,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        case BCM57800_MF:
        case BCM57810:
        case BCM57810_MF:
-       case BCM57840:
+       case BCM57840_O:
+       case BCM57840_4_10:
+       case BCM57840_2_20:
+       case BCM57840_MFO:
        case BCM57840_MF:
+       case BCM57811:
+       case BCM57811_MF:
                max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
                break;
 
@@ -10782,13 +11826,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 
        max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
 
-       /* !!! FIXME !!!
-        * Do not allow the maximum SB count to grow above 16
-        * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
-        * We will use the FP_SB_MAX_E1x macro for this matter.
-        */
-       max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
-
        WARN_ON(!max_non_def_sbs);
 
        /* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -10799,22 +11836,17 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 
        /*
         * Maximum number of netdev Tx queues:
-        *      Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
+        * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
         */
-       tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+       tx_count = rss_count * max_cos_est + FCOE_PRESENT;
 
        /* dev zeroed in init_etherdev */
        dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
-       if (!dev) {
-               dev_err(&pdev->dev, "Cannot allocate net device\n");
+       if (!dev)
                return -ENOMEM;
-       }
 
        bp = netdev_priv(dev);
 
-       DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
-                         tx_count, rx_count);
-
        bp->igu_sb_cnt = max_non_def_sbs;
        bp->msg_enable = debug;
        pci_set_drvdata(pdev, dev);
@@ -10825,7 +11857,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
                return rc;
        }
 
-       DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
+       BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
+
+       BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+                         tx_count, rx_count);
 
        rc = bnx2x_init_bp(bp);
        if (rc)
@@ -10835,9 +11870,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
         * Map doorbels here as we need the real value of bp->max_cos which
         * is initialized in bnx2x_init_bp().
         */
+       doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+       if (doorbell_size > pci_resource_len(pdev, 2)) {
+               dev_err(&bp->pdev->dev,
+                       "Cannot map doorbells, bar size too small, aborting\n");
+               rc = -ENOMEM;
+               goto init_one_exit;
+       }
        bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
-                                       min_t(u64, BNX2X_DB_SIZE(bp),
-                                             pci_resource_len(pdev, 2)));
+                                       doorbell_size);
        if (!bp->doorbells) {
                dev_err(&bp->pdev->dev,
                        "Cannot map doorbell space, aborting\n");
@@ -10855,14 +11896,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 
 #endif
 
+
+       /* Set bp->num_queues for MSI-X mode*/
+       bnx2x_set_num_queues(bp);
+
        /* Configure interrupt mode: try to enable MSI-X/MSI if
-        * needed, set bp->num_queues appropriately.
+        * needed.
         */
        bnx2x_set_int_mode(bp);
 
-       /* Add all NAPI objects */
-       bnx2x_add_all_napi(bp);
-
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
@@ -10880,7 +11922,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 
        bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 
-       netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+       BNX2X_DEV_INFO(
+               "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
                    board_info[ent->driver_data].name,
                    (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
                    pcie_width,
@@ -10936,9 +11979,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(dev);
 
-       /* Delete all NAPI objects */
-       bnx2x_del_all_napi(bp);
-
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        bnx2x_set_power_state(bp, PCI_D0);
 
@@ -10985,6 +12025,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        bnx2x_tx_disable(bp);
 
        bnx2x_netif_stop(bp, 0);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        del_timer_sync(&bp->timer);
 
@@ -11014,29 +12056,11 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
 
        mutex_init(&bp->port.phy_mutex);
 
-       bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-       bp->link_params.shmem_base = bp->common.shmem_base;
-       BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
-
-       if (!bp->common.shmem_base ||
-           (bp->common.shmem_base < 0xA0000) ||
-           (bp->common.shmem_base >= 0xC0000)) {
-               BNX2X_DEV_INFO("MCP not active\n");
-               bp->flags |= NO_MCP_FLAG;
-               return;
-       }
 
        val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
        if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
                != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
                BNX2X_ERR("BAD MCP validity signature\n");
-
-       if (!BP_NOMCP(bp)) {
-               bp->fw_seq =
-                   (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                   DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
 }
 
 /**
@@ -11117,8 +12141,7 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
        struct bnx2x *bp = netdev_priv(dev);
 
        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               netdev_err(bp->dev, "Handling parity error recovery. "
-                                   "Try again later\n");
+               netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
                return;
        }
 
@@ -11134,7 +12157,7 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
        rtnl_unlock();
 }
 
-static struct pci_error_handlers bnx2x_err_handler = {
+static const struct pci_error_handlers bnx2x_err_handler = {
        .error_detected = bnx2x_io_error_detected,
        .slot_reset     = bnx2x_io_slot_reset,
        .resume         = bnx2x_io_resume,
@@ -11172,9 +12195,18 @@ static int __init bnx2x_init(void)
 
 static void __exit bnx2x_cleanup(void)
 {
+       struct list_head *pos, *q;
        pci_unregister_driver(&bnx2x_pci_driver);
 
        destroy_workqueue(bnx2x_wq);
+
+       /* Free globablly allocated resources */
+       list_for_each_safe(pos, q, &bnx2x_prev_list) {
+               struct bnx2x_prev_path_list *tmp =
+                       list_entry(pos, struct bnx2x_prev_path_list, list);
+               list_del(pos);
+               kfree(tmp);
+       }
 }
 
 void bnx2x_notify_link_changed(struct bnx2x *bp)
@@ -11195,7 +12227,7 @@ module_exit(bnx2x_cleanup);
  * This function will wait until the ramdord completion returns.
  * Return 0 if success, -ENODEV if ramrod doesn't return.
  */
-static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
 {
        unsigned long ramrod_flags = 0;
 
@@ -11209,6 +12241,7 @@ static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
 {
        struct eth_spe *spe;
+       int cxt_index, cxt_offset;
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (unlikely(bp->panic))
@@ -11231,10 +12264,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
                 *  ramrod
                 */
                if (type == ETH_CONNECTION_TYPE) {
-                       if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
-                               bnx2x_set_ctx_validation(bp, &bp->context.
-                                       vcxt[BNX2X_ISCSI_ETH_CID].eth,
-                                       BNX2X_ISCSI_ETH_CID);
+                       if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+                               cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+                                       ILT_PAGE_CIDS;
+                               cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+                                       (cxt_index * ILT_PAGE_CIDS);
+                               bnx2x_set_ctx_validation(bp,
+                                       &bp->context[cxt_index].
+                                                        vcxt[cxt_offset].eth,
+                                       BNX2X_ISCSI_ETH_CID(bp));
+                       }
                }
 
                /*
@@ -11269,7 +12308,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
                spe = bnx2x_sp_get_next(bp);
                *spe = *bp->cnic_kwq_cons;
 
-               DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+               DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
                   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
 
                if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
@@ -11288,10 +12327,18 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
        int i;
 
 #ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
+       if (unlikely(bp->panic)) {
+               BNX2X_ERR("Can't post to SP queue while panic\n");
                return -EIO;
+       }
 #endif
 
+       if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
+           (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+               BNX2X_ERR("Handling parity error recovery. Try again later\n");
+               return -EAGAIN;
+       }
+
        spin_lock_bh(&bp->spq_lock);
 
        for (i = 0; i < count; i++) {
@@ -11304,7 +12351,7 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
 
                bp->cnic_kwq_pending++;
 
-               DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+               DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
                   spe->hdr.conn_and_cmd_data, spe->hdr.type,
                   spe->data.update_data_addr.hi,
                   spe->data.update_data_addr.lo,
@@ -11512,6 +12559,62 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                smp_mb__after_atomic_inc();
                break;
        }
+       case DRV_CTL_ULP_REGISTER_CMD: {
+               int ulp_type = ctl->data.register_data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       int path = BP_PATH(bp);
+                       int port = BP_PORT(bp);
+                       int i;
+                       u32 scratch_offset;
+                       u32 *host_addr;
+
+                       /* first write capability to shmem2 */
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+                       if ((ulp_type != CNIC_ULP_FCOE) ||
+                           (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+                           (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
+                               break;
+
+                       /* if reached here - should write fcoe capabilities */
+                       scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+                       if (!scratch_offset)
+                               break;
+                       scratch_offset += offsetof(struct glob_ncsi_oem_data,
+                                                  fcoe_features[path][port]);
+                       host_addr = (u32 *) &(ctl->data.register_data.
+                                             fcoe_features);
+                       for (i = 0; i < sizeof(struct fcoe_capabilities);
+                            i += 4)
+                               REG_WR(bp, scratch_offset + i,
+                                      *(host_addr + i/4));
+               }
+               break;
+       }
+
+       case DRV_CTL_ULP_UNREGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
 
        default:
                BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11547,14 +12650,31 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
        cp->num_irq = 2;
 }
 
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+
+       cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+                            bnx2x_cid_ilt_lines(bp);
+       cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+       cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+       cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+       if (NO_ISCSI_OOO(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
                               void *data)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
-       if (ops == NULL)
+       if (ops == NULL) {
+               BNX2X_ERR("NULL ops received\n");
                return -EINVAL;
+       }
 
        bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (!bp->cnic_kwq)
@@ -11587,7 +12707,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
 
        mutex_lock(&bp->cnic_mutex);
        cp->drv_state = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
        synchronize_rcu();
        kfree(bp->cnic_kwq);
@@ -11623,10 +12743,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
        cp->drv_ctl = bnx2x_drv_ctl;
        cp->drv_register_cnic = bnx2x_register_cnic;
        cp->drv_unregister_cnic = bnx2x_unregister_cnic;
-       cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+       cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
        cp->iscsi_l2_client_id =
                bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
-       cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+       cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
 
        if (NO_ISCSI_OOO(bp))
                cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
@@ -11637,8 +12757,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
        if (NO_FCOE(bp))
                cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
 
-       DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
-                        "starting cid %d\n",
+       BNX2X_DEV_INFO(
+               "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
           cp->ctx_blk_size,
           cp->ctx_tbl_offset,
           cp->ctx_tbl_len,