igb: unused variable warning in igb remove
[linux-2.6.git] / drivers / net / igb / igb_main.c
index 1436326..2a5303c 100644 (file)
@@ -51,14 +51,17 @@ char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007 Intel Corporation.";
-
+static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
 
 static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
 };
 
 static struct pci_device_id igb_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -113,6 +116,9 @@ static bool igb_clean_tx_irq(struct igb_ring *);
 static int igb_poll(struct napi_struct *, int);
 static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
+#ifdef CONFIG_IGB_LRO
+static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
+#endif
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -166,6 +172,8 @@ static struct pci_driver igb_driver = {
        .err_handler = &igb_err_handler
 };
 
+static int global_quad_port_a; /* global quad port a indication */
+
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL");
@@ -197,6 +205,8 @@ static int __init igb_init_module(void)
 
        printk(KERN_INFO "%s\n", igb_copyright);
 
+       global_quad_port_a = 0;
+
        ret = pci_register_driver(&igb_driver);
 #ifdef CONFIG_DCA
        dca_register_notify(&dca_notifier);
@@ -262,12 +272,27 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
        return 0;
 }
 
+static void igb_free_queues(struct igb_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               netif_napi_del(&adapter->rx_ring[i].napi);
+
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+}
+
 #define IGB_N0_QUEUE -1
 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                              int tx_queue, int msix_vector)
 {
        u32 msixbm = 0;
        struct e1000_hw *hw = &adapter->hw;
+       u32 ivar, index;
+
+       switch (hw->mac.type) {
+       case e1000_82575:
                /* The 82575 assigns vectors using a bitmask, which matches the
                   bitmask for the EICR/EIMS/EIMC registers.  To assign one
                   or more queues to a vector, we write the appropriate bits
@@ -282,6 +307,47 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                                  E1000_EICR_TX_QUEUE0 << tx_queue;
                }
                array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+               break;
+       case e1000_82576:
+               /* Kawela uses a table-based method for assigning vectors.
+                  Each queue has a single entry in the table to which we write
+                  a vector number along with a "valid" bit.  Sadly, the layout
+                  of the table is somewhat counterintuitive. */
+               if (rx_queue > IGB_N0_QUEUE) {
+                       index = (rx_queue & 0x7);
+                       ivar = array_rd32(E1000_IVAR0, index);
+                       if (rx_queue < 8) {
+                               /* vector goes into low byte of register */
+                               ivar = ivar & 0xFFFFFF00;
+                               ivar |= msix_vector | E1000_IVAR_VALID;
+                       } else {
+                               /* vector goes into third byte of register */
+                               ivar = ivar & 0xFF00FFFF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+                       }
+                       adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
+                       array_wr32(E1000_IVAR0, index, ivar);
+               }
+               if (tx_queue > IGB_N0_QUEUE) {
+                       index = (tx_queue & 0x7);
+                       ivar = array_rd32(E1000_IVAR0, index);
+                       if (tx_queue < 8) {
+                               /* vector goes into second byte of register */
+                               ivar = ivar & 0xFFFF00FF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+                       } else {
+                               /* vector goes into high byte of register */
+                               ivar = ivar & 0x00FFFFFF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+                       }
+                       adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
+                       array_wr32(E1000_IVAR0, index, ivar);
+               }
+               break;
+       default:
+               BUG();
+               break;
+       }
 }
 
 /**
@@ -297,6 +363,12 @@ static void igb_configure_msix(struct igb_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
 
        adapter->eims_enable_mask = 0;
+       if (hw->mac.type == e1000_82576)
+               /* Turn on MSI-X capability first, or our settings
+                * won't stick.  And it will take days to debug. */
+               wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+                                  E1000_GPIE_PBA | E1000_GPIE_EIAME | 
+                                  E1000_GPIE_NSICR);
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct igb_ring *tx_ring = &adapter->tx_ring[i];
@@ -322,6 +394,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 
 
        /* set vector for other causes, i.e. link changes */
+       switch (hw->mac.type) {
+       case e1000_82575:
                array_wr32(E1000_MSIXBM(0), vector++,
                                      E1000_EIMS_OTHER);
 
@@ -337,6 +411,19 @@ static void igb_configure_msix(struct igb_adapter *adapter)
                adapter->eims_enable_mask |= E1000_EIMS_OTHER;
                adapter->eims_other = E1000_EIMS_OTHER;
 
+               break;
+
+       case e1000_82576:
+               tmp = (vector++ | E1000_IVAR_VALID) << 8;
+               wr32(E1000_IVAR_MISC, tmp);
+
+               adapter->eims_enable_mask = (1 << (vector)) - 1;
+               adapter->eims_other = 1 << (vector - 1);
+               break;
+       default:
+               /* do nothing, since nothing else supports MSI-X */
+               break;
+       } /* switch (hw->mac.type) */
        wrfl();
 }
 
@@ -401,7 +488,7 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
                pci_disable_msix(adapter->pdev);
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-       } else if (adapter->msi_enabled)
+       } else if (adapter->flags & IGB_FLAG_HAS_MSI)
                pci_disable_msi(adapter->pdev);
        return;
 }
@@ -440,7 +527,7 @@ msi_only:
        adapter->num_rx_queues = 1;
        adapter->num_tx_queues = 1;
        if (!pci_enable_msi(adapter->pdev))
-               adapter->msi_enabled = 1;
+               adapter->flags |= IGB_FLAG_HAS_MSI;
 
 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
        /* Notify the stack of the (possibly) reduced Tx Queue count. */
@@ -468,24 +555,33 @@ static int igb_request_irq(struct igb_adapter *adapter)
                /* fall back to MSI */
                igb_reset_interrupt_capability(adapter);
                if (!pci_enable_msi(adapter->pdev))
-                       adapter->msi_enabled = 1;
+                       adapter->flags |= IGB_FLAG_HAS_MSI;
                igb_free_all_tx_resources(adapter);
                igb_free_all_rx_resources(adapter);
                adapter->num_rx_queues = 1;
                igb_alloc_queues(adapter);
        } else {
-               wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0 |
-                                      E1000_EIMS_OTHER));
+               switch (hw->mac.type) {
+               case e1000_82575:
+                       wr32(E1000_MSIXBM(0),
+                            (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
+                       break;
+               case e1000_82576:
+                       wr32(E1000_IVAR0, E1000_IVAR_VALID);
+                       break;
+               default:
+                       break;
+               }
        }
 
-       if (adapter->msi_enabled) {
+       if (adapter->flags & IGB_FLAG_HAS_MSI) {
                err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
                                  netdev->name, netdev);
                if (!err)
                        goto request_done;
                /* fall back to legacy interrupts */
                igb_reset_interrupt_capability(adapter);
-               adapter->msi_enabled = 0;
+               adapter->flags &= ~IGB_FLAG_HAS_MSI;
        }
 
        err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
@@ -770,16 +866,23 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 void igb_reset(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_fc_info *fc = &adapter->hw.fc;
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_fc_info *fc = &hw->fc;
        u32 pba = 0, tx_space, min_tx_space, min_rx_space;
        u16 hwm;
 
        /* Repartition Pba for greater than 9k mtu
         * To take effect CTRL.RST is required.
         */
+       if (mac->type != e1000_82576) {
        pba = E1000_PBA_34K;
+       }
+       else {
+               pba = E1000_PBA_64K;
+       }
 
-       if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+       if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           (mac->type < e1000_82576)) {
                /* adjust PBA for jumbo frames */
                wr32(E1000_PBA, pba);
 
@@ -818,8 +921,8 @@ void igb_reset(struct igb_adapter *adapter)
                        if (pba < min_rx_space)
                                pba = min_rx_space;
                }
+               wr32(E1000_PBA, pba);
        }
-       wr32(E1000_PBA, pba);
 
        /* flow control settings */
        /* The high water mark must be low enough to fit one full frame
@@ -828,10 +931,15 @@ void igb_reset(struct igb_adapter *adapter)
         * - 90% of the Rx FIFO size, or
         * - the full Rx FIFO size minus one full frame */
        hwm = min(((pba << 10) * 9 / 10),
-                 ((pba << 10) - adapter->max_frame_size));
+                       ((pba << 10) - 2 * adapter->max_frame_size));
 
-       fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
-       fc->low_water = fc->high_water - 8;
+       if (mac->type < e1000_82576) {
+               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
+               fc->low_water = fc->high_water - 8;
+       } else {
+               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
+               fc->low_water = fc->high_water - 16;
+       }
        fc->pause_time = 0xFFFF;
        fc->send_xon = 1;
        fc->type = fc->original_type;
@@ -1006,6 +1114,17 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
        igb_get_bus_info_pcie(hw);
 
+       /* set flags */
+       switch (hw->mac.type) {
+       case e1000_82576:
+       case e1000_82575:
+               adapter->flags |= IGB_FLAG_HAS_DCA;
+               adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
+               break;
+       default:
+               break;
+       }
+
        hw->phy.autoneg_wait_to_complete = false;
        hw->mac.adaptive_ifs = true;
 
@@ -1029,6 +1148,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        netdev->features |= NETIF_F_TSO;
        netdev->features |= NETIF_F_TSO6;
 
+#ifdef CONFIG_IGB_LRO
+       netdev->features |= NETIF_F_LRO;
+#endif
+
        netdev->vlan_features |= NETIF_F_TSO;
        netdev->vlan_features |= NETIF_F_TSO6;
        netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -1121,11 +1244,23 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                adapter->eeprom_wol = 0;
                break;
        case E1000_DEV_ID_82575EB_FIBER_SERDES:
+       case E1000_DEV_ID_82576_FIBER:
+       case E1000_DEV_ID_82576_SERDES:
                /* Wake events only supported on port A for dual fiber
                 * regardless of eeprom setting */
                if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
                        adapter->eeprom_wol = 0;
                break;
+       case E1000_DEV_ID_82576_QUAD_COPPER:
+               /* if quad port adapter, disable WoL on all but port A */
+               if (global_quad_port_a != 0)
+                       adapter->eeprom_wol = 0;
+               else
+                       adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+               /* Reset for multiple quad port adapters */
+               if (++global_quad_port_a == 4)
+                       global_quad_port_a = 0;
+               break;
        }
 
        /* initialize the wol settings based on the eeprom settings */
@@ -1152,8 +1287,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                goto err_register;
 
 #ifdef CONFIG_DCA
-       if (dca_add_requester(&pdev->dev) == 0) {
-               adapter->dca_enabled = true;
+       if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
+           (dca_add_requester(&pdev->dev) == 0)) {
+               adapter->flags |= IGB_FLAG_DCA_ENABLED;
                dev_info(&pdev->dev, "DCA enabled\n");
                /* Always use CB2 mode, difference is masked
                 * in the CB driver. */
@@ -1182,7 +1318,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        dev_info(&pdev->dev,
                "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
                adapter->msix_entries ? "MSI-X" :
-               adapter->msi_enabled ? "MSI" : "legacy",
+               (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
                adapter->num_rx_queues, adapter->num_tx_queues);
 
        return 0;
@@ -1197,8 +1333,7 @@ err_eeprom:
                iounmap(hw->flash_address);
 
        igb_remove_device(hw);
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+       igb_free_queues(adapter);
 err_sw_init:
 err_hw_init:
        iounmap(hw->hw_addr);
@@ -1225,7 +1360,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_DCA
        struct e1000_hw *hw = &adapter->hw;
+#endif
 
        /* flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled  */
@@ -1236,10 +1373,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        flush_scheduled_work();
 
 #ifdef CONFIG_DCA
-       if (adapter->dca_enabled) {
+       if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
                dev_info(&pdev->dev, "DCA disabled\n");
                dca_remove_requester(&pdev->dev);
-               adapter->dca_enabled = false;
+               adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
                wr32(E1000_DCA_CTRL, 1);
        }
 #endif
@@ -1256,8 +1393,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        igb_remove_device(&adapter->hw);
        igb_reset_interrupt_capability(adapter);
 
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+       igb_free_queues(adapter);
 
        iounmap(adapter->hw.hw_addr);
        if (adapter->hw.flash_address)
@@ -1587,6 +1723,14 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
        struct pci_dev *pdev = adapter->pdev;
        int size, desc_len;
 
+#ifdef CONFIG_IGB_LRO
+       size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
+       rx_ring->lro_mgr.lro_arr = vmalloc(size);
+       if (!rx_ring->lro_mgr.lro_arr)
+               goto err;
+       memset(rx_ring->lro_mgr.lro_arr, 0, size);
+#endif
+
        size = sizeof(struct igb_buffer) * rx_ring->count;
        rx_ring->buffer_info = vmalloc(size);
        if (!rx_ring->buffer_info)
@@ -1607,13 +1751,16 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-       rx_ring->pending_skb = NULL;
 
        rx_ring->adapter = adapter;
 
        return 0;
 
 err:
+#ifdef CONFIG_IGB_LRO
+       vfree(rx_ring->lro_mgr.lro_arr);
+       rx_ring->lro_mgr.lro_arr = NULL;
+#endif
        vfree(rx_ring->buffer_info);
        dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
                "the receive descriptor ring\n");
@@ -1699,15 +1846,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
                        rctl |= E1000_RCTL_SZ_2048;
                        rctl &= ~E1000_RCTL_BSEX;
                        break;
-               case IGB_RXBUFFER_4096:
-                       rctl |= E1000_RCTL_SZ_4096;
-                       break;
-               case IGB_RXBUFFER_8192:
-                       rctl |= E1000_RCTL_SZ_8192;
-                       break;
-               case IGB_RXBUFFER_16384:
-                       rctl |= E1000_RCTL_SZ_16384;
-                       break;
                }
        } else {
                rctl &= ~E1000_RCTL_BSEX;
@@ -1725,10 +1863,8 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
         * so only enable packet split for jumbo frames */
        if (rctl & E1000_RCTL_LPE) {
                adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-               srrctl = adapter->rx_ps_hdr_size <<
+               srrctl |= adapter->rx_ps_hdr_size <<
                         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-               /* buffer size is ALWAYS one page */
-               srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
        } else {
                adapter->rx_ps_hdr_size = 0;
@@ -1788,6 +1924,16 @@ static void igb_configure_rx(struct igb_adapter *adapter)
                rxdctl |= IGB_RX_HTHRESH << 8;
                rxdctl |= IGB_RX_WTHRESH << 16;
                wr32(E1000_RXDCTL(i), rxdctl);
+#ifdef CONFIG_IGB_LRO
+               /* Intitial LRO Settings */
+               ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
+               ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
+               ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
+               ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+               ring->lro_mgr.dev = adapter->netdev;
+               ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+               ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+#endif
        }
 
        if (adapter->num_rx_queues > 1) {
@@ -1801,7 +1947,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 
                get_random_bytes(&random[0], 40);
 
-               shift = 6;
+               if (hw->mac.type >= e1000_82576)
+                       shift = 0;
+               else
+                       shift = 6;
                for (j = 0; j < (32 * 4); j++) {
                        reta.bytes[j & 3] =
                                (j % adapter->num_rx_queues) << shift;
@@ -1976,6 +2125,11 @@ static void igb_free_rx_resources(struct igb_ring *rx_ring)
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
 
+#ifdef CONFIG_IGB_LRO
+       vfree(rx_ring->lro_mgr.lro_arr);
+       rx_ring->lro_mgr.lro_arr = NULL;
+#endif 
+
        pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
@@ -2030,20 +2184,17 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                        buffer_info->skb = NULL;
                }
                if (buffer_info->page) {
-                       pci_unmap_page(pdev, buffer_info->page_dma,
-                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       if (buffer_info->page_dma)
+                               pci_unmap_page(pdev, buffer_info->page_dma,
+                                              PAGE_SIZE / 2,
+                                              PCI_DMA_FROMDEVICE);
                        put_page(buffer_info->page);
                        buffer_info->page = NULL;
                        buffer_info->page_dma = 0;
+                       buffer_info->page_offset = 0;
                }
        }
 
-       /* there also may be some cached data from a chained receive */
-       if (rx_ring->pending_skb) {
-               dev_kfree_skb(rx_ring->pending_skb);
-               rx_ring->pending_skb = NULL;
-       }
-
        size = sizeof(struct igb_buffer) * rx_ring->count;
        memset(rx_ring->buffer_info, 0, size);
 
@@ -2127,7 +2278,7 @@ static void igb_set_multi(struct net_device *netdev)
 
        if (!netdev->mc_count) {
                /* nothing to program, so clear mc list */
-               igb_update_mc_addr_list(hw, NULL, 0, 1,
+               igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
                                          mac->rar_entry_count);
                return;
        }
@@ -2145,7 +2296,8 @@ static void igb_set_multi(struct net_device *netdev)
                memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
                mc_ptr = mc_ptr->next;
        }
-       igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
+       igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
+                                     mac->rar_entry_count);
        kfree(mta_list);
 }
 
@@ -2552,9 +2704,9 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
        mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
-       /* Context index must be unique per ring.  Luckily, so is the interrupt
-        * mask value. */
-       mss_l4len_idx |= tx_ring->eims_value >> 4;
+       /* Context index must be unique per ring. */
+       if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+               mss_l4len_idx |= tx_ring->queue_index << 4;
 
        context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
        context_desc->seqnum_seed = 0;
@@ -2618,8 +2770,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
                context_desc->seqnum_seed = 0;
-               context_desc->mss_l4len_idx =
-                                         cpu_to_le32(tx_ring->queue_index << 4);
+               if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+                       context_desc->mss_l4len_idx =
+                               cpu_to_le32(tx_ring->queue_index << 4);
 
                buffer_info->time_stamp = jiffies;
                buffer_info->dma = 0;
@@ -2720,8 +2873,9 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
        }
 
-       if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
-                       IGB_TX_FLAGS_VLAN))
+       if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
+           (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+                        IGB_TX_FLAGS_VLAN)))
                olinfo_status |= tx_ring->queue_index << 4;
 
        olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
@@ -2967,7 +3121,11 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
        else if (max_frame <= IGB_RXBUFFER_2048)
                adapter->rx_buffer_len = IGB_RXBUFFER_2048;
        else
-               adapter->rx_buffer_len = IGB_RXBUFFER_4096;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+               adapter->rx_buffer_len = IGB_RXBUFFER_16384;
+#else
+               adapter->rx_buffer_len = PAGE_SIZE / 2;
+#endif
        /* adjust allocation if LPE protects us, and we aren't using SBP */
        if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
@@ -3157,7 +3315,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
        if (!tx_ring->itr_val)
                wr32(E1000_EIMC, tx_ring->eims_value);
 #ifdef CONFIG_DCA
-       if (adapter->dca_enabled)
+       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                igb_update_tx_dca(tx_ring);
 #endif
        tx_ring->total_bytes = 0;
@@ -3194,7 +3352,7 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
                __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
 
 #ifdef CONFIG_DCA
-       if (adapter->dca_enabled)
+       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                igb_update_rx_dca(rx_ring);
 #endif
                return IRQ_HANDLED;
@@ -3211,8 +3369,14 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
 
        if (rx_ring->cpu != cpu) {
                dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-               dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
-               dca_rxctrl |= dca_get_tag(cpu);
+               if (hw->mac.type == e1000_82576) {
+                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
+                       dca_rxctrl |= dca_get_tag(cpu) <<
+                                     E1000_DCA_RXCTRL_CPUID_SHIFT;
+               } else {
+                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
+                       dca_rxctrl |= dca_get_tag(cpu);
+               }
                dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
                dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
                dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
@@ -3232,8 +3396,14 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
 
        if (tx_ring->cpu != cpu) {
                dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-               dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-               dca_txctrl |= dca_get_tag(cpu);
+               if (hw->mac.type == e1000_82576) {
+                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+                       dca_txctrl |= dca_get_tag(cpu) <<
+                                     E1000_DCA_TXCTRL_CPUID_SHIFT;
+               } else {
+                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+                       dca_txctrl |= dca_get_tag(cpu);
+               }
                dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
                wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
                tx_ring->cpu = cpu;
@@ -3245,7 +3415,7 @@ static void igb_setup_dca(struct igb_adapter *adapter)
 {
        int i;
 
-       if (!(adapter->dca_enabled))
+       if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
                return;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -3265,12 +3435,15 @@ static int __igb_notify_dca(struct device *dev, void *data)
        struct e1000_hw *hw = &adapter->hw;
        unsigned long event = *(unsigned long *)data;
 
+       if (!(adapter->flags & IGB_FLAG_HAS_DCA))
+               goto out;
+
        switch (event) {
        case DCA_PROVIDER_ADD:
                /* if already enabled, don't do it again */
-               if (adapter->dca_enabled)
+               if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                        break;
-               adapter->dca_enabled = true;
+               adapter->flags |= IGB_FLAG_DCA_ENABLED;
                /* Always use CB2 mode, difference is masked
                 * in the CB driver. */
                wr32(E1000_DCA_CTRL, 2);
@@ -3281,17 +3454,17 @@ static int __igb_notify_dca(struct device *dev, void *data)
                }
                /* Fall Through since DCA is disabled. */
        case DCA_PROVIDER_REMOVE:
-               if (adapter->dca_enabled) {
+               if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
                        /* without this a class_device is left
                         * hanging around in the sysfs model */
                        dca_remove_requester(dev);
                        dev_info(&adapter->pdev->dev, "DCA disabled\n");
-                       adapter->dca_enabled = false;
+                       adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
                        wr32(E1000_DCA_CTRL, 1);
                }
                break;
        }
-
+out:
        return 0;
 }
 
@@ -3397,13 +3570,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
 
        /* this poll routine only supports one tx and one rx queue */
 #ifdef CONFIG_DCA
-       if (adapter->dca_enabled)
+       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                igb_update_tx_dca(&adapter->tx_ring[0]);
 #endif
        tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
 
 #ifdef CONFIG_DCA
-       if (adapter->dca_enabled)
+       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                igb_update_rx_dca(&adapter->rx_ring[0]);
 #endif
        igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
@@ -3435,7 +3608,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
                goto quit_polling;
 
 #ifdef CONFIG_DCA
-       if (adapter->dca_enabled)
+       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                igb_update_rx_dca(rx_ring);
 #endif
        igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
@@ -3572,7 +3745,7 @@ done_cleaning:
                        /* detected Tx unit hang */
                        dev_err(&adapter->pdev->dev,
                                "Detected Tx Unit Hang\n"
-                               "  Tx Queue             <%lu>\n"
+                               "  Tx Queue             <%d>\n"
                                "  TDH                  <%x>\n"
                                "  TDT                  <%x>\n"
                                "  next_to_use          <%x>\n"
@@ -3582,8 +3755,7 @@ done_cleaning:
                                "  time_stamp           <%lx>\n"
                                "  jiffies              <%lx>\n"
                                "  desc.status          <%x>\n",
-                               (unsigned long)((tx_ring - adapter->tx_ring) /
-                                       sizeof(struct igb_ring)),
+                               tx_ring->queue_index,
                                readl(adapter->hw.hw_addr + tx_ring->head),
                                readl(adapter->hw.hw_addr + tx_ring->tail),
                                tx_ring->next_to_use,
@@ -3608,22 +3780,75 @@ done_cleaning:
        return retval;
 }
 
+#ifdef CONFIG_IGB_LRO
+ /**
+ * igb_get_skb_hdr - helper function for LRO header processing
+ * @skb: pointer to sk_buff to be added to LRO packet
+ * @iphdr: pointer to ip header structure
+ * @tcph: pointer to tcp header structure
+ * @hdr_flags: pointer to header flags
+ * @priv: pointer to the receive descriptor for the current sk_buff
+ **/
+static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
+                           u64 *hdr_flags, void *priv)
+{
+       union e1000_adv_rx_desc *rx_desc = priv;
+       u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
+                      (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
+
+       /* Verify that this is a valid IPv4 TCP packet */
+       if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
+                         E1000_RXDADV_PKTTYPE_TCP))
+               return -1;
+
+       /* Set network headers */
+       skb_reset_network_header(skb);
+       skb_set_transport_header(skb, ip_hdrlen(skb));
+       *iphdr = ip_hdr(skb);
+       *tcph = tcp_hdr(skb);
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+
+       return 0;
+
+}
+#endif /* CONFIG_IGB_LRO */
 
 /**
  * igb_receive_skb - helper function to handle rx indications
- * @adapter: board private structure
+ * @ring: pointer to receive ring receving this packet 
  * @status: descriptor status field as written by hardware
  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
  * @skb: pointer to sk_buff to be indicated to stack
  **/
-static void igb_receive_skb(struct igb_adapter *adapter, u8 status, __le16 vlan,
-                           struct sk_buff *skb)
+static void igb_receive_skb(struct igb_ring *ring, u8 status,
+                            union e1000_adv_rx_desc * rx_desc,
+                            struct sk_buff *skb)
 {
-       if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
-               vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
-                                        le16_to_cpu(vlan));
-       else
-               netif_receive_skb(skb);
+       struct igb_adapter * adapter = ring->adapter;
+       bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
+
+#ifdef CONFIG_IGB_LRO
+       if (adapter->netdev->features & NETIF_F_LRO &&
+           skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               if (vlan_extracted)
+                       lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
+                                          adapter->vlgrp,
+                                          le16_to_cpu(rx_desc->wb.upper.vlan),
+                                          rx_desc);
+               else
+                       lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
+               ring->lro_used = 1;
+       } else {
+#endif
+               if (vlan_extracted)
+                       vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+                                         le16_to_cpu(rx_desc->wb.upper.vlan));
+               else
+
+                       netif_receive_skb(skb);
+#ifdef CONFIG_IGB_LRO
+       }
+#endif
 }
 
 
@@ -3658,7 +3883,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
        union e1000_adv_rx_desc *rx_desc , *next_rxd;
        struct igb_buffer *buffer_info , *next_buffer;
        struct sk_buff *skb;
-       unsigned int i, j;
+       unsigned int i;
        u32 length, hlen, staterr;
        bool cleaned = false;
        int cleaned_count = 0;
@@ -3688,61 +3913,46 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
                cleaned = true;
                cleaned_count++;
 
-               if (rx_ring->pending_skb != NULL) {
-                       skb = rx_ring->pending_skb;
-                       rx_ring->pending_skb = NULL;
-                       j = rx_ring->pending_skb_page;
-               } else {
-                       skb = buffer_info->skb;
-                       prefetch(skb->data - NET_IP_ALIGN);
-                       buffer_info->skb = NULL;
-                       if (hlen) {
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_ps_hdr_size +
-                                                  NET_IP_ALIGN,
-                                                PCI_DMA_FROMDEVICE);
-                               skb_put(skb, hlen);
-                       } else {
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_buffer_len +
-                                                  NET_IP_ALIGN,
-                                                PCI_DMA_FROMDEVICE);
-                               skb_put(skb, length);
-                               goto send_up;
-                       }
-                       j = 0;
+               skb = buffer_info->skb;
+               prefetch(skb->data - NET_IP_ALIGN);
+               buffer_info->skb = NULL;
+               if (!adapter->rx_ps_hdr_size) {
+                       pci_unmap_single(pdev, buffer_info->dma,
+                                        adapter->rx_buffer_len +
+                                          NET_IP_ALIGN,
+                                        PCI_DMA_FROMDEVICE);
+                       skb_put(skb, length);
+                       goto send_up;
+               }
+
+               if (!skb_shinfo(skb)->nr_frags) {
+                       pci_unmap_single(pdev, buffer_info->dma,
+                                        adapter->rx_ps_hdr_size +
+                                          NET_IP_ALIGN,
+                                        PCI_DMA_FROMDEVICE);
+                       skb_put(skb, hlen);
                }
 
-               while (length) {
+               if (length) {
                        pci_unmap_page(pdev, buffer_info->page_dma,
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                                      PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
                        buffer_info->page_dma = 0;
-                       skb_fill_page_desc(skb, j, buffer_info->page,
-                                               0, length);
-                       buffer_info->page = NULL;
+
+                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+                                               buffer_info->page,
+                                               buffer_info->page_offset,
+                                               length);
+
+                       if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
+                           (page_count(buffer_info->page) != 1))
+                               buffer_info->page = NULL;
+                       else
+                               get_page(buffer_info->page);
 
                        skb->len += length;
                        skb->data_len += length;
-                       skb->truesize += length;
-                       rx_desc->wb.upper.status_error = 0;
-                       if (staterr & E1000_RXD_STAT_EOP)
-                               break;
 
-                       j++;
-                       cleaned_count++;
-                       i++;
-                       if (i == rx_ring->count)
-                               i = 0;
-
-                       buffer_info = &rx_ring->buffer_info[i];
-                       rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
-                       staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-                       length = le16_to_cpu(rx_desc->wb.upper.length);
-                       if (!(staterr & E1000_RXD_STAT_DD)) {
-                               rx_ring->pending_skb = skb;
-                               rx_ring->pending_skb_page = j;
-                               goto out;
-                       }
+                       skb->truesize += length;
                }
 send_up:
                i++;
@@ -3752,6 +3962,12 @@ send_up:
                prefetch(next_rxd);
                next_buffer = &rx_ring->buffer_info[i];
 
+               if (!(staterr & E1000_RXD_STAT_EOP)) {
+                       buffer_info->skb = xchg(&next_buffer->skb, skb);
+                       buffer_info->dma = xchg(&next_buffer->dma, 0);
+                       goto next_desc;
+               }
+
                if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
@@ -3765,7 +3981,7 @@ send_up:
 
                skb->protocol = eth_type_trans(skb, netdev);
 
-               igb_receive_skb(adapter, staterr, rx_desc->wb.upper.vlan, skb);
+               igb_receive_skb(rx_ring, staterr, rx_desc, skb);
 
                netdev->last_rx = jiffies;
 
@@ -3784,10 +4000,17 @@ next_desc:
 
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
-out:
+
        rx_ring->next_to_clean = i;
        cleaned_count = IGB_DESC_UNUSED(rx_ring);
 
+#ifdef CONFIG_IGB_LRO
+       if (rx_ring->lro_used) {
+               lro_flush_all(&rx_ring->lro_mgr);
+               rx_ring->lro_used = 0;
+       }
+#endif
+
        if (cleaned_count)
                igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
 
@@ -3822,16 +4045,22 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
        while (cleaned_count--) {
                rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-               if (adapter->rx_ps_hdr_size && !buffer_info->page) {
-                       buffer_info->page = alloc_page(GFP_ATOMIC);
+               if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
                        if (!buffer_info->page) {
-                               adapter->alloc_rx_buff_failed++;
-                               goto no_buffers;
+                               buffer_info->page = alloc_page(GFP_ATOMIC);
+                               if (!buffer_info->page) {
+                                       adapter->alloc_rx_buff_failed++;
+                                       goto no_buffers;
+                               }
+                               buffer_info->page_offset = 0;
+                       } else {
+                               buffer_info->page_offset ^= PAGE_SIZE / 2;
                        }
                        buffer_info->page_dma =
                                pci_map_page(pdev,
                                             buffer_info->page,
-                                            0, PAGE_SIZE,
+                                            buffer_info->page_offset,
+                                            PAGE_SIZE / 2,
                                             PCI_DMA_FROMDEVICE);
                }
 
@@ -4098,7 +4327,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 ctrl, ctrl_ext, rctl, status;
+       u32 ctrl, rctl, status;
        u32 wufc = adapter->wol;
 #ifdef CONFIG_PM
        int retval = 0;
@@ -4106,11 +4335,12 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
 
        netif_device_detach(netdev);
 
-       if (netif_running(netdev)) {
-               WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
-               igb_down(adapter);
-               igb_free_irq(adapter);
-       }
+       if (netif_running(netdev))
+               igb_close(netdev);
+
+       igb_reset_interrupt_capability(adapter);
+
+       igb_free_queues(adapter);
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -4141,33 +4371,24 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
                ctrl |= E1000_CTRL_ADVD3WUC;
                wr32(E1000_CTRL, ctrl);
 
-               if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
-                  adapter->hw.phy.media_type ==
-                                       e1000_media_type_internal_serdes) {
-                       /* keep the laser running in D3 */
-                       ctrl_ext = rd32(E1000_CTRL_EXT);
-                       ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
-                       wr32(E1000_CTRL_EXT, ctrl_ext);
-               }
-
                /* Allow time for pending master requests to run */
                igb_disable_pcie_master(&adapter->hw);
 
                wr32(E1000_WUC, E1000_WUC_PME_EN);
                wr32(E1000_WUFC, wufc);
-               pci_enable_wake(pdev, PCI_D3hot, 1);
-               pci_enable_wake(pdev, PCI_D3cold, 1);
        } else {
                wr32(E1000_WUC, 0);
                wr32(E1000_WUFC, 0);
-               pci_enable_wake(pdev, PCI_D3hot, 0);
-               pci_enable_wake(pdev, PCI_D3cold, 0);
        }
 
-       /* make sure adapter isn't asleep if manageability is enabled */
-       if (adapter->en_mng_pt) {
+       /* make sure adapter isn't asleep if manageability/wol is enabled */
+       if (wufc || adapter->en_mng_pt) {
                pci_enable_wake(pdev, PCI_D3hot, 1);
                pci_enable_wake(pdev, PCI_D3cold, 1);
+       } else {
+               igb_shutdown_fiber_serdes_link_82575(hw);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
+               pci_enable_wake(pdev, PCI_D3cold, 0);
        }
 
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
@@ -4206,10 +4427,11 @@ static int igb_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
-       if (netif_running(netdev)) {
-               err = igb_request_irq(adapter);
-               if (err)
-                       return err;
+       igb_set_interrupt_capability(adapter);
+
+       if (igb_alloc_queues(adapter)) {
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               return -ENOMEM;
        }
 
        /* e1000_power_up_phy(adapter); */
@@ -4217,10 +4439,11 @@ static int igb_resume(struct pci_dev *pdev)
        igb_reset(adapter);
        wr32(E1000_WUS, ~0);
 
-       igb_init_manageability(adapter);
-
-       if (netif_running(netdev))
-               igb_up(adapter);
+       if (netif_running(netdev)) {
+               err = igb_open(netdev);
+               if (err)
+                       return err;
+       }
 
        netif_device_attach(netdev);
 
@@ -4250,6 +4473,8 @@ static void igb_netpoll(struct net_device *netdev)
        int work_done = 0;
 
        igb_irq_disable(adapter);
+       adapter->flags |= IGB_FLAG_IN_NETPOLL;
+
        for (i = 0; i < adapter->num_tx_queues; i++)
                igb_clean_tx_irq(&adapter->tx_ring[i]);
 
@@ -4258,6 +4483,7 @@ static void igb_netpoll(struct net_device *netdev)
                                     &work_done,
                                     adapter->rx_ring[i].napi.weight);
 
+       adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
        igb_irq_enable(adapter);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */