Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
index d143e8b..fabcded 100644 (file)
@@ -48,6 +48,9 @@ static atomic_t devices_found;
 static int enable_mq = 1;
 static int irq_share_mode;
 
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+
 /*
  *    Enable/Disable the given intr
  */
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
 {
        u32 ret;
        int i;
+       unsigned long flags;
 
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
        ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
        adapter->link_speed = ret >> 16;
        if (ret & 1) { /* Link is up. */
                printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -171,6 +178,7 @@ static void
 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 {
        int i;
+       unsigned long flags;
        u32 events = le32_to_cpu(adapter->shared->ecr);
        if (!events)
                return;
@@ -183,8 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 
        /* Check if there is an error on xmit/recv queues */
        if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+               spin_lock_irqsave(&adapter->cmd_lock, flags);
                VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                       VMXNET3_CMD_GET_QUEUE_STATUS);
+               spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
                for (i = 0; i < adapter->num_tx_queues; i++)
                        if (adapter->tqd_start[i].status.stopped)
@@ -395,10 +405,8 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
 
        while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
                struct vmxnet3_tx_buf_info *tbi;
-               union Vmxnet3_GenericDesc *gdesc;
 
                tbi = tq->buf_info + tq->tx_ring.next2comp;
-               gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
 
                vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
                if (tbi->skb) {
@@ -565,7 +573,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
        struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
        u32 val;
 
-       while (num_allocated < num_to_alloc) {
+       while (num_allocated <= num_to_alloc) {
                struct vmxnet3_rx_buf_info *rbi;
                union Vmxnet3_GenericDesc *gd;
 
@@ -611,9 +619,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
 
                BUG_ON(rbi->dma_addr == 0);
                gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
-               gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
+               gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
                                           | val | rbi->len);
 
+               /* Fill the last buffer but dont mark it ready, or else the
+                * device will think that the queue is full */
+               if (num_allocated == num_to_alloc)
+                       break;
+
+               gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
                num_allocated++;
                vmxnet3_cmd_ring_adv_next2fill(ring);
        }
@@ -804,30 +818,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                                   skb_transport_header(skb))->doff * 4;
                ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
        } else {
-               unsigned int pull_size;
-
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
 
                        if (ctx->ipv4) {
                                struct iphdr *iph = (struct iphdr *)
                                                    skb_network_header(skb);
-                               if (iph->protocol == IPPROTO_TCP) {
-                                       pull_size = ctx->eth_ip_hdr_size +
-                                                   sizeof(struct tcphdr);
-
-                                       if (unlikely(!pskb_may_pull(skb,
-                                                               pull_size))) {
-                                               goto err;
-                                       }
+                               if (iph->protocol == IPPROTO_TCP)
                                        ctx->l4_hdr_size = ((struct tcphdr *)
                                           skb_transport_header(skb))->doff * 4;
-                               } else if (iph->protocol == IPPROTO_UDP) {
+                               else if (iph->protocol == IPPROTO_UDP)
+                                       /*
+                                        * Use tcp header size so that bytes to
+                                        * be copied are more than required by
+                                        * the device.
+                                        */
                                        ctx->l4_hdr_size =
-                                                       sizeof(struct udphdr);
-                               } else {
+                                                       sizeof(struct tcphdr);
+                               else
                                        ctx->l4_hdr_size = 0;
-                               }
                        } else {
                                /* for simplicity, don't copy L4 headers */
                                ctx->l4_hdr_size = 0;
@@ -888,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
  * Transmits a pkt thru a given tq
  * Returns:
  *    NETDEV_TX_OK:      descriptors are setup successfully
- *    NETDEV_TX_OK:      error occured, the pkt is dropped
+ *    NETDEV_TX_OK:      error occurred, the pkt is dropped
  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
  *
  * Side-effects:
@@ -915,7 +924,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
        count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
                skb_shinfo(skb)->nr_frags + 1;
 
-       ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
+       ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
 
        ctx.mss = skb_shinfo(skb)->gso_size;
        if (ctx.mss) {
@@ -1078,7 +1087,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
                struct sk_buff *skb,
                union Vmxnet3_GenericDesc *gdesc)
 {
-       if (!gdesc->rcd.cnc && adapter->rxcsum) {
+       if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
                /* typical case: TCP/UDP over IP and both csums are correct */
                if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
                                                        VMXNET3_RCD_CSUM_OK) {
@@ -1135,6 +1144,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
        };
        u32 num_rxd = 0;
+       bool skip_page_frags = false;
        struct Vmxnet3_RxCompDesc *rcd;
        struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -1145,11 +1155,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                          &rxComp);
        while (rcd->gen == rq->comp_ring.gen) {
                struct vmxnet3_rx_buf_info *rbi;
-               struct sk_buff *skb;
+               struct sk_buff *skb, *new_skb = NULL;
+               struct page *new_page = NULL;
                int num_to_alloc;
                struct Vmxnet3_RxDesc *rxd;
                u32 idx, ring_idx;
-
+               struct vmxnet3_cmd_ring *ring = NULL;
                if (num_rxd >= quota) {
                        /* we may stop even before we see the EOP desc of
                         * the current pkt
@@ -1160,6 +1171,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
                idx = rcd->rxdIdx;
                ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+               ring = rq->rx_ring + ring_idx;
                vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
                                  &rxCmdDesc);
                rbi = rq->buf_info[ring_idx] + idx;
@@ -1188,37 +1200,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                goto rcd_done;
                        }
 
+                       skip_page_frags = false;
                        ctx->skb = rbi->skb;
-                       rbi->skb = NULL;
+                       new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+                       if (new_skb == NULL) {
+                               /* Skb allocation failed, do not handover this
+                                * skb to stack. Reuse it. Drop the existing pkt
+                                */
+                               rq->stats.rx_buf_alloc_failure++;
+                               ctx->skb = NULL;
+                               rq->stats.drop_total++;
+                               skip_page_frags = true;
+                               goto rcd_done;
+                       }
 
                        pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
                                         PCI_DMA_FROMDEVICE);
 
                        skb_put(ctx->skb, rcd->len);
+
+                       /* Immediate refill */
+                       new_skb->dev = adapter->netdev;
+                       skb_reserve(new_skb, NET_IP_ALIGN);
+                       rbi->skb = new_skb;
+                       rbi->dma_addr = pci_map_single(adapter->pdev,
+                                       rbi->skb->data, rbi->len,
+                                       PCI_DMA_FROMDEVICE);
+                       rxd->addr = cpu_to_le64(rbi->dma_addr);
+                       rxd->len = rbi->len;
+
                } else {
-                       BUG_ON(ctx->skb == NULL);
+                       BUG_ON(ctx->skb == NULL && !skip_page_frags);
+
                        /* non SOP buffer must be type 1 in most cases */
-                       if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
-                               BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+                       BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
+                       BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
 
-                               if (rcd->len) {
-                                       pci_unmap_page(adapter->pdev,
-                                                      rbi->dma_addr, rbi->len,
-                                                      PCI_DMA_FROMDEVICE);
+                       /* If an sop buffer was dropped, skip all
+                        * following non-sop fragments. They will be reused.
+                        */
+                       if (skip_page_frags)
+                               goto rcd_done;
 
-                                       vmxnet3_append_frag(ctx->skb, rcd, rbi);
-                                       rbi->page = NULL;
-                               }
-                       } else {
-                               /*
-                                * The only time a non-SOP buffer is type 0 is
-                                * when it's EOP and error flag is raised, which
-                                * has already been handled.
+                       new_page = alloc_page(GFP_ATOMIC);
+                       if (unlikely(new_page == NULL)) {
+                               /* Replacement page frag could not be allocated.
+                                * Reuse this page. Drop the pkt and free the
+                                * skb which contained this page as a frag. Skip
+                                * processing all the following non-sop frags.
                                 */
-                               BUG_ON(true);
+                               rq->stats.rx_buf_alloc_failure++;
+                               dev_kfree_skb(ctx->skb);
+                               ctx->skb = NULL;
+                               skip_page_frags = true;
+                               goto rcd_done;
+                       }
+
+                       if (rcd->len) {
+                               pci_unmap_page(adapter->pdev,
+                                              rbi->dma_addr, rbi->len,
+                                              PCI_DMA_FROMDEVICE);
+
+                               vmxnet3_append_frag(ctx->skb, rcd, rbi);
                        }
+
+                       /* Immediate refill */
+                       rbi->page = new_page;
+                       rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+                                                    0, PAGE_SIZE,
+                                                    PCI_DMA_FROMDEVICE);
+                       rxd->addr = cpu_to_le64(rbi->dma_addr);
+                       rxd->len = rbi->len;
                }
 
+
                skb = ctx->skb;
                if (rcd->eop) {
                        skb->len += skb->data_len;
@@ -1228,37 +1283,39 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                        (union Vmxnet3_GenericDesc *)rcd);
                        skb->protocol = eth_type_trans(skb, adapter->netdev);
 
-                       if (unlikely(adapter->vlan_grp && rcd->ts)) {
-                               vlan_hwaccel_receive_skb(skb,
-                                               adapter->vlan_grp, rcd->tci);
-                       } else {
+                       if (unlikely(rcd->ts))
+                               __vlan_hwaccel_put_tag(skb, rcd->tci);
+
+                       if (adapter->netdev->features & NETIF_F_LRO)
                                netif_receive_skb(skb);
-                       }
+                       else
+                               napi_gro_receive(&rq->napi, skb);
 
                        ctx->skb = NULL;
                }
 
 rcd_done:
-               /* device may skip some rx descs */
-               rq->rx_ring[ring_idx].next2comp = idx;
-               VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
-                                         rq->rx_ring[ring_idx].size);
-
-               /* refill rx buffers frequently to avoid starving the h/w */
-               num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
-                                                          ring_idx);
-               if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
-                                                       ring_idx, adapter))) {
-                       vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
-                                               adapter);
-
-                       /* if needed, update the register */
-                       if (unlikely(rq->shared->updateRxProd)) {
-                               VMXNET3_WRITE_BAR0_REG(adapter,
-                                       rxprod_reg[ring_idx] + rq->qid * 8,
-                                       rq->rx_ring[ring_idx].next2fill);
-                               rq->uncommitted[ring_idx] = 0;
-                       }
+               /* device may have skipped some rx descs */
+               ring->next2comp = idx;
+               num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+               ring = rq->rx_ring + ring_idx;
+               while (num_to_alloc) {
+                       vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+                                         &rxCmdDesc);
+                       BUG_ON(!rxd->addr);
+
+                       /* Recv desc is ready to be used by the device */
+                       rxd->gen = ring->gen;
+                       vmxnet3_cmd_ring_adv_next2fill(ring);
+                       num_to_alloc--;
+               }
+
+               /* if needed, update the register */
+               if (unlikely(rq->shared->updateRxProd)) {
+                       VMXNET3_WRITE_BAR0_REG(adapter,
+                               rxprod_reg[ring_idx] + rq->qid * 8,
+                               ring->next2fill);
+                       rq->uncommitted[ring_idx] = 0;
                }
 
                vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -1853,84 +1910,18 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
        }
 }
 
-static void
-vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
-{
-       struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-       struct Vmxnet3_DriverShared *shared = adapter->shared;
-       u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
-
-       if (grp) {
-               /* add vlan rx stripping. */
-               if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
-                       int i;
-                       struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
-                       adapter->vlan_grp = grp;
-
-                       /* update FEATURES to device */
-                       devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
-                       VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-                                              VMXNET3_CMD_UPDATE_FEATURE);
-                       /*
-                        *  Clear entire vfTable; then enable untagged pkts.
-                        *  Note: setting one entry in vfTable to non-zero turns
-                        *  on VLAN rx filtering.
-                        */
-                       for (i = 0; i < VMXNET3_VFT_SIZE; i++)
-                               vfTable[i] = 0;
-
-                       VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
-                       VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-                                              VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-               } else {
-                       printk(KERN_ERR "%s: vlan_rx_register when device has "
-                              "no NETIF_F_HW_VLAN_RX\n", netdev->name);
-               }
-       } else {
-               /* remove vlan rx stripping. */
-               struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
-               adapter->vlan_grp = NULL;
-
-               if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
-                       int i;
-
-                       for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
-                               /* clear entire vfTable; this also disables
-                                * VLAN rx filtering
-                                */
-                               vfTable[i] = 0;
-                       }
-                       VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-                                              VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-
-                       /* update FEATURES to device */
-                       devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
-                       VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-                                              VMXNET3_CMD_UPDATE_FEATURE);
-               }
-       }
-}
-
 
 static void
 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
 {
-       if (adapter->vlan_grp) {
-               u16 vid;
-               u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
-               bool activeVlan = false;
+       u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+       u16 vid;
 
-               for (vid = 0; vid < VLAN_N_VID; vid++) {
-                       if (vlan_group_get_device(adapter->vlan_grp, vid)) {
-                               VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
-                               activeVlan = true;
-                       }
-               }
-               if (activeVlan) {
-                       /* continue to allow untagged pkts */
-                       VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
-               }
-       }
+       /* allow untagged pkts */
+       VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+
+       for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+               VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
 }
 
 
@@ -1939,10 +1930,15 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+       unsigned long flags;
 
        VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+       set_bit(vid, adapter->active_vlans);
 }
 
 
@@ -1951,10 +1947,15 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+       unsigned long flags;
 
        VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+       clear_bit(vid, adapter->active_vlans);
 }
 
 
@@ -1985,13 +1986,20 @@ static void
 vmxnet3_set_mc(struct net_device *netdev)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+       unsigned long flags;
        struct Vmxnet3_RxFilterConf *rxConf =
                                        &adapter->shared->devRead.rxFilterConf;
        u8 *new_table = NULL;
        u32 new_mode = VMXNET3_RXM_UCAST;
 
-       if (netdev->flags & IFF_PROMISC)
+       if (netdev->flags & IFF_PROMISC) {
+               u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+               memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
+
                new_mode |= VMXNET3_RXM_PROMISC;
+       } else {
+               vmxnet3_restore_vlan(adapter);
+       }
 
        if (netdev->flags & IFF_BROADCAST)
                new_mode |= VMXNET3_RXM_BCAST;
@@ -2020,14 +2028,18 @@ vmxnet3_set_mc(struct net_device *netdev)
                rxConf->mfTablePA = 0;
        }
 
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        if (new_mode != rxConf->rxMode) {
                rxConf->rxMode = cpu_to_le32(new_mode);
                VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                       VMXNET3_CMD_UPDATE_RX_MODE);
+               VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+                                      VMXNET3_CMD_UPDATE_VLAN_FILTERS);
        }
 
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_UPDATE_MAC_FILTERS);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
        kfree(new_table);
 }
@@ -2073,17 +2085,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
        devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
 
        /* set up feature flags */
-       if (adapter->rxcsum)
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
 
-       if (adapter->lro) {
+       if (adapter->netdev->features & NETIF_F_LRO) {
                devRead->misc.uptFeatures |= UPT1_F_LRO;
                devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
        }
-       if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
-           adapter->vlan_grp) {
+       if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
                devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
-       }
 
        devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
        devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2178,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
        /* rx filter settings */
        devRead->rxFilterConf.rxMode = 0;
        vmxnet3_restore_vlan(adapter);
+       vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
        /* the rest are already zeroed */
 }
 
@@ -2177,6 +2189,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
 {
        int err, i;
        u32 ret;
+       unsigned long flags;
 
        dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
                " ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2219,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
                               adapter->shared_pa));
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
                               adapter->shared_pa));
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_ACTIVATE_DEV);
        ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
        if (ret != 0) {
                printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2270,10 @@ rq_err:
 void
 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
 {
+       unsigned long flags;
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -2263,12 +2281,15 @@ int
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
 {
        int i;
+       unsigned long flags;
        if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
                return 0;
 
 
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_QUIESCE_DEV);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
        vmxnet3_disable_all_intrs(adapter);
 
        for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2447,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
        sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
        ring0_size = adapter->rx_queue[0].rx_ring[0].size;
        ring0_size = (ring0_size + sz - 1) / sz * sz;
-       ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+       ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
                           sz * sz);
        ring1_size = adapter->rx_queue[0].rx_ring[1].size;
        comp_size = ring0_size + ring1_size;
@@ -2576,9 +2597,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
        if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
                return -EINVAL;
 
-       if (new_mtu > 1500 && !adapter->jumbo_frame)
-               return -EINVAL;
-
        netdev->mtu = new_mtu;
 
        /*
@@ -2624,28 +2642,19 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
 {
        struct net_device *netdev = adapter->netdev;
 
-       netdev->features = NETIF_F_SG |
-               NETIF_F_HW_CSUM |
-               NETIF_F_HW_VLAN_TX |
-               NETIF_F_HW_VLAN_RX |
-               NETIF_F_HW_VLAN_FILTER |
-               NETIF_F_TSO |
-               NETIF_F_TSO6 |
+       netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+               NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
+               NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_LRO;
-
-       printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
-
-       adapter->rxcsum = true;
-       adapter->jumbo_frame = true;
-       adapter->lro = true;
-
-       if (dma64) {
+       if (dma64)
                netdev->features |= NETIF_F_HIGHDMA;
-               printk(" highDMA");
-       }
+       netdev->vlan_features = netdev->hw_features &
+                               ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+       netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
 
-       netdev->vlan_features = netdev->features;
-       printk("\n");
+       netdev_info(adapter->netdev,
+               "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
+               dma64 ? " highDMA" : "");
 }
 
 
@@ -2668,7 +2677,7 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  * Enable MSIx vectors.
  * Returns :
  *     0 on successful enabling of required vectors,
- *     VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
+ *     VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
  *      could be enabled.
  *     number of vectors which can be enabled otherwise (this number is smaller
  *      than VMXNET3_LINUX_MIN_MSIX_VECT)
@@ -2695,7 +2704,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
                        break;
                } else {
                        /* If fails to enable required number of MSI-x vectors
-                        * try enabling 3 of them. One each for rx, tx and event
+                        * try enabling minimum number of vectors required.
                         */
                        vectors = vector_threshold;
                        printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2716,11 +2725,14 @@ static void
 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
 {
        u32 cfg;
+       unsigned long flags;
 
        /* intr settings */
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_GET_CONF_INTR);
        cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
        adapter->intr.type = cfg & 0x3;
        adapter->intr.mask_mode = (cfg >> 2) & 0x3;
 
@@ -2755,7 +2767,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                 */
                if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
                        if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
-                           || adapter->num_rx_queues != 2) {
+                           || adapter->num_rx_queues != 1) {
                                adapter->share_intr = VMXNET3_INTR_TXSHARE;
                                printk(KERN_ERR "Number of rx queues : 1\n");
                                adapter->num_rx_queues = 1;
@@ -2855,10 +2867,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
                .ndo_start_xmit = vmxnet3_xmit_frame,
                .ndo_set_mac_address = vmxnet3_set_mac_addr,
                .ndo_change_mtu = vmxnet3_change_mtu,
-               .ndo_get_stats = vmxnet3_get_stats,
+               .ndo_set_features = vmxnet3_set_features,
+               .ndo_get_stats64 = vmxnet3_get_stats64,
                .ndo_tx_timeout = vmxnet3_tx_timeout,
                .ndo_set_multicast_list = vmxnet3_set_mc,
-               .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
                .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
                .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2875,6 +2887,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        int num_tx_queues;
        int num_rx_queues;
 
+       if (!pci_msi_enabled())
+               enable_mq = 0;
+
 #ifdef VMXNET3_RSS
        if (enable_mq)
                num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
@@ -2905,6 +2920,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        adapter->netdev = netdev;
        adapter->pdev = pdev;
 
+       spin_lock_init(&adapter->cmd_lock);
        adapter->shared = pci_alloc_consistent(adapter->pdev,
                          sizeof(struct Vmxnet3_DriverShared),
                          &adapter->shared_pa);
@@ -3108,11 +3124,15 @@ vmxnet3_suspend(struct device *device)
        u8 *arpreq;
        struct in_device *in_dev;
        struct in_ifaddr *ifa;
+       unsigned long flags;
        int i = 0;
 
        if (!netif_running(netdev))
                return 0;
 
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               napi_disable(&adapter->rx_queue[i].napi);
+
        vmxnet3_disable_all_intrs(adapter);
        vmxnet3_free_irqs(adapter);
        vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3208,10 @@ skip_arp:
        adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
                                                                 pmConf));
 
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_UPDATE_PMCFG);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
        pci_save_state(pdev);
        pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3226,8 @@ skip_arp:
 static int
 vmxnet3_resume(struct device *device)
 {
-       int err;
+       int err, i = 0;
+       unsigned long flags;
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3255,14 @@ vmxnet3_resume(struct device *device)
 
        pci_enable_wake(pdev, PCI_D0, 0);
 
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_UPDATE_PMCFG);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
        vmxnet3_alloc_intr_resources(adapter);
        vmxnet3_request_irqs(adapter);
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               napi_enable(&adapter->rx_queue[i].napi);
        vmxnet3_enable_all_intrs(adapter);
 
        return 0;