]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - drivers/net/gianfar.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6.git] / drivers / net / gianfar.c
index e212f2c5448b13cb40ede8bec2339b28ab64e264..5ed8f9f9419f163c26703412bb03512537a7e966 100644 (file)
@@ -8,9 +8,10 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
- * Copyright (c) 2007 MontaVista Software, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/in.h>
+#include <linux/net_tstamp.h>
 
 #include <asm/io.h>
+#include <asm/reg.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 #include <linux/module.h>
@@ -92,6 +95,7 @@
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/of.h>
+#include <linux/of_net.h>
 
 #include "gianfar.h"
 #include "fsl_pq_mdio.h"
@@ -109,7 +113,7 @@ static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
 struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
                struct sk_buff *skb);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -119,9 +123,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 static void adjust_link(struct net_device *dev);
 static void init_registers(struct net_device *dev);
 static int init_phy(struct net_device *dev);
-static int gfar_probe(struct of_device *ofdev,
+static int gfar_probe(struct platform_device *ofdev,
                const struct of_device_id *match);
-static int gfar_remove(struct of_device *ofdev);
+static int gfar_remove(struct platform_device *ofdev);
 static void free_skb_resources(struct gfar_private *priv);
 static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
@@ -130,8 +134,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gfar_netpoll(struct net_device *dev);
 #endif
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
-static int gfar_clean_tx_ring(struct net_device *dev);
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
                              int amount_pull);
 static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -140,13 +144,321 @@ void gfar_halt(struct net_device *dev);
 static void gfar_halt_nodisable(struct net_device *dev);
 void gfar_start(struct net_device *dev);
 static void gfar_clear_exact_match(struct net_device *dev);
-static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+                                 const u8 *addr);
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 MODULE_LICENSE("GPL");
 
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+                           dma_addr_t buf)
+{
+       u32 lstatus;
+
+       bdp->bufPtr = buf;
+
+       lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+       if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+               lstatus |= BD_LFLAG(RXBD_WRAP);
+
+       eieio();
+
+       bdp->lstatus = lstatus;
+}
+
+static int gfar_init_bds(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct txbd8 *txbdp;
+       struct rxbd8 *rxbdp;
+       int i, j;
+
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               tx_queue = priv->tx_queue[i];
+               /* Initialize some variables in our dev structure */
+               tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+               tx_queue->dirty_tx = tx_queue->tx_bd_base;
+               tx_queue->cur_tx = tx_queue->tx_bd_base;
+               tx_queue->skb_curtx = 0;
+               tx_queue->skb_dirtytx = 0;
+
+               /* Initialize Transmit Descriptor Ring */
+               txbdp = tx_queue->tx_bd_base;
+               for (j = 0; j < tx_queue->tx_ring_size; j++) {
+                       txbdp->lstatus = 0;
+                       txbdp->bufPtr = 0;
+                       txbdp++;
+               }
+
+               /* Set the last descriptor in the ring to indicate wrap */
+               txbdp--;
+               txbdp->status |= TXBD_WRAP;
+       }
+
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_queue = priv->rx_queue[i];
+               rx_queue->cur_rx = rx_queue->rx_bd_base;
+               rx_queue->skb_currx = 0;
+               rxbdp = rx_queue->rx_bd_base;
+
+               for (j = 0; j < rx_queue->rx_ring_size; j++) {
+                       struct sk_buff *skb = rx_queue->rx_skbuff[j];
+
+                       if (skb) {
+                               gfar_init_rxbdp(rx_queue, rxbdp,
+                                               rxbdp->bufPtr);
+                       } else {
+                               skb = gfar_new_skb(ndev);
+                               if (!skb) {
+                                       pr_err("%s: Can't allocate RX buffers\n",
+                                                       ndev->name);
+                                       goto err_rxalloc_fail;
+                               }
+                               rx_queue->rx_skbuff[j] = skb;
+
+                               gfar_new_rxbdp(rx_queue, rxbdp, skb);
+                       }
+
+                       rxbdp++;
+               }
+
+       }
+
+       return 0;
+
+err_rxalloc_fail:
+       free_skb_resources(priv);
+       return -ENOMEM;
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+       void *vaddr;
+       dma_addr_t addr;
+       int i, j, k;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct device *dev = &priv->ofdev->dev;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+
+       priv->total_tx_ring_size = 0;
+       for (i = 0; i < priv->num_tx_queues; i++)
+               priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+       priv->total_rx_ring_size = 0;
+       for (i = 0; i < priv->num_rx_queues; i++)
+               priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+       /* Allocate memory for the buffer descriptors */
+       vaddr = dma_alloc_coherent(dev,
+                       sizeof(struct txbd8) * priv->total_tx_ring_size +
+                       sizeof(struct rxbd8) * priv->total_rx_ring_size,
+                       &addr, GFP_KERNEL);
+       if (!vaddr) {
+               if (netif_msg_ifup(priv))
+                       pr_err("%s: Could not allocate buffer descriptors!\n",
+                              ndev->name);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               tx_queue = priv->tx_queue[i];
+               tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
+               tx_queue->tx_bd_dma_base = addr;
+               tx_queue->dev = ndev;
+               /* enet DMA only understands physical addresses */
+               addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+               vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+       }
+
+       /* Start the rx descriptor ring where the tx ring leaves off */
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_queue = priv->rx_queue[i];
+               rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
+               rx_queue->rx_bd_dma_base = addr;
+               rx_queue->dev = ndev;
+               addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+               vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+       }
+
+       /* Setup the skbuff rings */
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               tx_queue = priv->tx_queue[i];
+               tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
+                                 tx_queue->tx_ring_size, GFP_KERNEL);
+               if (!tx_queue->tx_skbuff) {
+                       if (netif_msg_ifup(priv))
+                               pr_err("%s: Could not allocate tx_skbuff\n",
+                                               ndev->name);
+                       goto cleanup;
+               }
+
+               for (k = 0; k < tx_queue->tx_ring_size; k++)
+                       tx_queue->tx_skbuff[k] = NULL;
+       }
+
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_queue = priv->rx_queue[i];
+               rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
+                                 rx_queue->rx_ring_size, GFP_KERNEL);
+
+               if (!rx_queue->rx_skbuff) {
+                       if (netif_msg_ifup(priv))
+                               pr_err("%s: Could not allocate rx_skbuff\n",
+                                      ndev->name);
+                       goto cleanup;
+               }
+
+               for (j = 0; j < rx_queue->rx_ring_size; j++)
+                       rx_queue->rx_skbuff[j] = NULL;
+       }
+
+       if (gfar_init_bds(ndev))
+               goto cleanup;
+
+       return 0;
+
+cleanup:
+       free_skb_resources(priv);
+       return -ENOMEM;
+}
+
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 __iomem *baddr;
+       int i;
+
+       baddr = &regs->tbase0;
+       for(i = 0; i < priv->num_tx_queues; i++) {
+               gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+               baddr   += 2;
+       }
+
+       baddr = &regs->rbase0;
+       for(i = 0; i < priv->num_rx_queues; i++) {
+               gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+               baddr   += 2;
+       }
+}
+
+static void gfar_init_mac(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 rctrl = 0;
+       u32 tctrl = 0;
+       u32 attrs = 0;
+
+       /* write the tx/rx base registers */
+       gfar_init_tx_rx_base(priv);
+
+       /* Configure the coalescing support */
+       gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+       if (priv->rx_filer_enable) {
+               rctrl |= RCTRL_FILREN;
+               /* Program the RIR0 reg with the required distribution */
+               gfar_write(&regs->rir0, DEFAULT_RIR0);
+       }
+
+       if (priv->rx_csum_enable)
+               rctrl |= RCTRL_CHECKSUMMING;
+
+       if (priv->extended_hash) {
+               rctrl |= RCTRL_EXTHASH;
+
+               gfar_clear_exact_match(ndev);
+               rctrl |= RCTRL_EMEN;
+       }
+
+       if (priv->padding) {
+               rctrl &= ~RCTRL_PAL_MASK;
+               rctrl |= RCTRL_PADDING(priv->padding);
+       }
+
+       /* Insert receive time stamps into padding alignment bytes */
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+               rctrl &= ~RCTRL_PAL_MASK;
+               rctrl |= RCTRL_PADDING(8);
+               priv->padding = 8;
+       }
+
+       /* Enable HW time stamping if requested from user space */
+       if (priv->hwts_rx_en)
+               rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+
+       /* keep vlan related bits if it's enabled */
+       if (priv->vlgrp) {
+               rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+               tctrl |= TCTRL_VLINS;
+       }
+
+       /* Init rctrl based on our settings */
+       gfar_write(&regs->rctrl, rctrl);
+
+       if (ndev->features & NETIF_F_IP_CSUM)
+               tctrl |= TCTRL_INIT_CSUM;
+
+       tctrl |= TCTRL_TXSCHED_PRIO;
+
+       gfar_write(&regs->tctrl, tctrl);
+
+       /* Set the extraction length and index */
+       attrs = ATTRELI_EL(priv->rx_stash_size) |
+               ATTRELI_EI(priv->rx_stash_index);
+
+       gfar_write(&regs->attreli, attrs);
+
+       /* Start with defaults, and add stashing or locking
+        * depending on the approprate variables */
+       attrs = ATTR_INIT_SETTINGS;
+
+       if (priv->bd_stash_en)
+               attrs |= ATTR_BDSTASH;
+
+       if (priv->rx_stash_size != 0)
+               attrs |= ATTR_BUFSTASH;
+
+       gfar_write(&regs->attr, attrs);
+
+       gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+       gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+       gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+}
+
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+       unsigned long tx_packets = 0, tx_bytes = 0;
+       int i = 0;
+
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_packets += priv->rx_queue[i]->stats.rx_packets;
+               rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+               rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+       }
+
+       dev->stats.rx_packets = rx_packets;
+       dev->stats.rx_bytes = rx_bytes;
+       dev->stats.rx_dropped = rx_dropped;
+
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+               tx_packets += priv->tx_queue[i]->stats.tx_packets;
+       }
+
+       dev->stats.tx_bytes = tx_bytes;
+       dev->stats.tx_packets = tx_packets;
+
+       return &dev->stats;
+}
+
 static const struct net_device_ops gfar_netdev_ops = {
        .ndo_open = gfar_enet_open,
        .ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +467,7 @@ static const struct net_device_ops gfar_netdev_ops = {
        .ndo_set_multicast_list = gfar_set_multi,
        .ndo_tx_timeout = gfar_timeout,
        .ndo_do_ioctl = gfar_ioctl,
+       .ndo_get_stats = gfar_get_stats,
        .ndo_vlan_rx_register = gfar_vlan_rx_register,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
@@ -163,56 +476,243 @@ static const struct net_device_ops gfar_netdev_ops = {
 #endif
 };
 
+unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
+void lock_rx_qs(struct gfar_private *priv)
+{
+       int i = 0x0;
+
+       for (i = 0; i < priv->num_rx_queues; i++)
+               spin_lock(&priv->rx_queue[i]->rxlock);
+}
+
+void lock_tx_qs(struct gfar_private *priv)
+{
+       int i = 0x0;
+
+       for (i = 0; i < priv->num_tx_queues; i++)
+               spin_lock(&priv->tx_queue[i]->txlock);
+}
+
+void unlock_rx_qs(struct gfar_private *priv)
+{
+       int i = 0x0;
+
+       for (i = 0; i < priv->num_rx_queues; i++)
+               spin_unlock(&priv->rx_queue[i]->rxlock);
+}
+
+void unlock_tx_qs(struct gfar_private *priv)
+{
+       int i = 0x0;
+
+       for (i = 0; i < priv->num_tx_queues; i++)
+               spin_unlock(&priv->tx_queue[i]->txlock);
+}
+
 /* Returns 1 if incoming frames use an FCB */
 static inline int gfar_uses_fcb(struct gfar_private *priv)
 {
-       return priv->vlgrp || priv->rx_csum_enable;
+       return priv->vlgrp || priv->rx_csum_enable ||
+               (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+}
+
+static void free_tx_pointers(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < priv->num_tx_queues; i++)
+               kfree(priv->tx_queue[i]);
 }
 
-static int gfar_of_init(struct net_device *dev)
+static void free_rx_pointers(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < priv->num_rx_queues; i++)
+               kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < MAXGROUPS; i++)
+               if (priv->gfargrp[i].regs)
+                       iounmap(priv->gfargrp[i].regs);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < priv->num_grps; i++)
+               napi_disable(&priv->gfargrp[i].napi);
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+       int i = 0;
+
+       for (i = 0; i < priv->num_grps; i++)
+               napi_enable(&priv->gfargrp[i].napi);
+}
+
+static int gfar_parse_group(struct device_node *np,
+               struct gfar_private *priv, const char *model)
+{
+       u32 *queue_mask;
+
+       priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
+       if (!priv->gfargrp[priv->num_grps].regs)
+               return -ENOMEM;
+
+       priv->gfargrp[priv->num_grps].interruptTransmit =
+                       irq_of_parse_and_map(np, 0);
+
+       /* If we aren't the FEC we have multiple interrupts */
+       if (model && strcasecmp(model, "FEC")) {
+               priv->gfargrp[priv->num_grps].interruptReceive =
+                       irq_of_parse_and_map(np, 1);
+               priv->gfargrp[priv->num_grps].interruptError =
+                       irq_of_parse_and_map(np,2);
+               if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+                   priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
+                   priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
+                       return -EINVAL;
+       }
+
+       priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
+       priv->gfargrp[priv->num_grps].priv = priv;
+       spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+       if(priv->mode == MQ_MG_MODE) {
+               queue_mask = (u32 *)of_get_property(np,
+                                       "fsl,rx-bit-map", NULL);
+               priv->gfargrp[priv->num_grps].rx_bit_map =
+                       queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
+               queue_mask = (u32 *)of_get_property(np,
+                                       "fsl,tx-bit-map", NULL);
+               priv->gfargrp[priv->num_grps].tx_bit_map =
+                       queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+       } else {
+               priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
+               priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+       }
+       priv->num_grps++;
+
+       return 0;
+}
+
+static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 {
        const char *model;
        const char *ctype;
        const void *mac_addr;
-       u64 addr, size;
-       int err = 0;
-       struct gfar_private *priv = netdev_priv(dev);
-       struct device_node *np = priv->node;
+       int err = 0, i;
+       struct net_device *dev = NULL;
+       struct gfar_private *priv = NULL;
+       struct device_node *np = ofdev->dev.of_node;
+       struct device_node *child = NULL;
        const u32 *stash;
        const u32 *stash_len;
        const u32 *stash_idx;
+       unsigned int num_tx_qs, num_rx_qs;
+       u32 *tx_queues, *rx_queues;
 
        if (!np || !of_device_is_available(np))
                return -ENODEV;
 
-       /* get a pointer to the register memory */
-       addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
-       priv->regs = ioremap(addr, size);
+       /* parse the num of tx and rx queues */
+       tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+       num_tx_qs = tx_queues ? *tx_queues : 1;
+
+       if (num_tx_qs > MAX_TX_QS) {
+               printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+                               num_tx_qs, MAX_TX_QS);
+               printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+               return -EINVAL;
+       }
+
+       rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+       num_rx_qs = rx_queues ? *rx_queues : 1;
+
+       if (num_rx_qs > MAX_RX_QS) {
+               printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+                               num_tx_qs, MAX_TX_QS);
+               printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+               return -EINVAL;
+       }
 
-       if (priv->regs == NULL)
+       *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+       dev = *pdev;
+       if (NULL == dev)
                return -ENOMEM;
 
-       priv->interruptTransmit = irq_of_parse_and_map(np, 0);
+       priv = netdev_priv(dev);
+       priv->node = ofdev->dev.of_node;
+       priv->ndev = dev;
+
+       priv->num_tx_queues = num_tx_qs;
+       netif_set_real_num_rx_queues(dev, num_rx_qs);
+       priv->num_rx_queues = num_rx_qs;
+       priv->num_grps = 0x0;
 
        model = of_get_property(np, "model", NULL);
 
-       /* If we aren't the FEC we have multiple interrupts */
-       if (model && strcasecmp(model, "FEC")) {
-               priv->interruptReceive = irq_of_parse_and_map(np, 1);
+       for (i = 0; i < MAXGROUPS; i++)
+               priv->gfargrp[i].regs = NULL;
 
-               priv->interruptError = irq_of_parse_and_map(np, 2);
+       /* Parse and initialize group specific information */
+       if (of_device_is_compatible(np, "fsl,etsec2")) {
+               priv->mode = MQ_MG_MODE;
+               for_each_child_of_node(np, child) {
+                       err = gfar_parse_group(child, priv, model);
+                       if (err)
+                               goto err_grp_init;
+               }
+       } else {
+               priv->mode = SQ_SG_MODE;
+               err = gfar_parse_group(np, priv, model);
+               if(err)
+                       goto err_grp_init;
+       }
 
-               if (priv->interruptTransmit < 0 ||
-                               priv->interruptReceive < 0 ||
-                               priv->interruptError < 0) {
-                       err = -EINVAL;
-                       goto err_out;
+       for (i = 0; i < priv->num_tx_queues; i++)
+              priv->tx_queue[i] = NULL;
+       for (i = 0; i < priv->num_rx_queues; i++)
+               priv->rx_queue[i] = NULL;
+
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+                                           GFP_KERNEL);
+               if (!priv->tx_queue[i]) {
+                       err = -ENOMEM;
+                       goto tx_alloc_failed;
                }
+               priv->tx_queue[i]->tx_skbuff = NULL;
+               priv->tx_queue[i]->qindex = i;
+               priv->tx_queue[i]->dev = dev;
+               spin_lock_init(&(priv->tx_queue[i]->txlock));
        }
 
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+                                           GFP_KERNEL);
+               if (!priv->rx_queue[i]) {
+                       err = -ENOMEM;
+                       goto rx_alloc_failed;
+               }
+               priv->rx_queue[i]->rx_skbuff = NULL;
+               priv->rx_queue[i]->qindex = i;
+               priv->rx_queue[i]->dev = dev;
+               spin_lock_init(&(priv->rx_queue[i]->rxlock));
+       }
+
+
        stash = of_get_property(np, "bd-stash", NULL);
 
-       if(stash) {
+       if (stash) {
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
                priv->bd_stash_en = 1;
        }
@@ -250,7 +750,8 @@ static int gfar_of_init(struct net_device *dev)
                        FSL_GIANFAR_DEV_HAS_CSUM |
                        FSL_GIANFAR_DEV_HAS_VLAN |
                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
-                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+                       FSL_GIANFAR_DEV_HAS_TIMER;
 
        ctype = of_get_property(np, "phy-connection-type", NULL);
 
@@ -270,11 +771,66 @@ static int gfar_of_init(struct net_device *dev)
 
        return 0;
 
-err_out:
-       iounmap(priv->regs);
+rx_alloc_failed:
+       free_rx_pointers(priv);
+tx_alloc_failed:
+       free_tx_pointers(priv);
+err_grp_init:
+       unmap_group_regs(priv);
+       free_netdev(dev);
        return err;
 }
 
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+                       struct ifreq *ifr, int cmd)
+{
+       struct hwtstamp_config config;
+       struct gfar_private *priv = netdev_priv(netdev);
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               priv->hwts_tx_en = 0;
+               break;
+       case HWTSTAMP_TX_ON:
+               if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+                       return -ERANGE;
+               priv->hwts_tx_en = 1;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               if (priv->hwts_rx_en) {
+                       stop_gfar(netdev);
+                       priv->hwts_rx_en = 0;
+                       startup_gfar(netdev);
+               }
+               break;
+       default:
+               if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+                       return -ERANGE;
+               if (!priv->hwts_rx_en) {
+                       stop_gfar(netdev);
+                       priv->hwts_rx_en = 1;
+                       startup_gfar(netdev);
+               }
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       }
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
+}
+
 /* Ioctl MII Interface */
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
@@ -283,80 +839,193 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!netif_running(dev))
                return -EINVAL;
 
+       if (cmd == SIOCSHWTSTAMP)
+               return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
        if (!priv->phydev)
                return -ENODEV;
 
-       return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+       return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+{
+       unsigned int new_bit_map = 0x0;
+       int mask = 0x1 << (max_qs - 1), i;
+       for (i = 0; i < max_qs; i++) {
+               if (bit_map & mask)
+                       new_bit_map = new_bit_map + (1 << i);
+               mask = mask >> 0x1;
+       }
+       return new_bit_map;
+}
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+                                  u32 class)
+{
+       u32 rqfpr = FPR_FILER_MASK;
+       u32 rqfcr = 0x0;
+
+       rqfar--;
+       rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+       ftp_rqfpr[rqfar] = rqfpr;
+       ftp_rqfcr[rqfar] = rqfcr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar--;
+       rqfcr = RQFCR_CMP_NOMATCH;
+       ftp_rqfpr[rqfar] = rqfpr;
+       ftp_rqfcr[rqfar] = rqfcr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar--;
+       rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+       rqfpr = class;
+       ftp_rqfcr[rqfar] = rqfcr;
+       ftp_rqfpr[rqfar] = rqfpr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar--;
+       rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+       rqfpr = class;
+       ftp_rqfcr[rqfar] = rqfcr;
+       ftp_rqfpr[rqfar] = rqfpr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+       int i = 0x0;
+       u32 rqfar = MAX_FILER_IDX;
+       u32 rqfcr = 0x0;
+       u32 rqfpr = FPR_FILER_MASK;
+
+       /* Default rule */
+       rqfcr = RQFCR_CMP_MATCH;
+       ftp_rqfcr[rqfar] = rqfcr;
+       ftp_rqfpr[rqfar] = rqfpr;
+       gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+       rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+       /* cur_filer_idx indicated the first non-masked rule */
+       priv->cur_filer_idx = rqfar;
+
+       /* Rest are masked rules */
+       rqfcr = RQFCR_CMP_NOMATCH;
+       for (i = 0; i < rqfar; i++) {
+               ftp_rqfcr[i] = rqfcr;
+               ftp_rqfpr[i] = rqfpr;
+               gfar_write_filer(priv, i, rqfcr, rqfpr);
+       }
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+       struct device *dev = &priv->ofdev->dev;
+       unsigned int pvr = mfspr(SPRN_PVR);
+       unsigned int svr = mfspr(SPRN_SVR);
+       unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
+       unsigned int rev = svr & 0xffff;
+
+       /* MPC8313 Rev 2.0 and higher; All MPC837x */
+       if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
+                       (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+               priv->errata |= GFAR_ERRATA_74;
+
+       /* MPC8313 and MPC837x all rev */
+       if ((pvr == 0x80850010 && mod == 0x80b0) ||
+                       (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+               priv->errata |= GFAR_ERRATA_76;
+
+       /* MPC8313 and MPC837x all rev */
+       if ((pvr == 0x80850010 && mod == 0x80b0) ||
+                       (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+               priv->errata |= GFAR_ERRATA_A002;
+
+       if (priv->errata)
+               dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+                        priv->errata);
 }
 
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
-static int gfar_probe(struct of_device *ofdev,
+static int gfar_probe(struct platform_device *ofdev,
                const struct of_device_id *match)
 {
        u32 tempval;
        struct net_device *dev = NULL;
        struct gfar_private *priv = NULL;
-       DECLARE_MAC_BUF(mac);
-       int err = 0;
+       struct gfar __iomem *regs = NULL;
+       int err = 0, i, grp_idx = 0;
        int len_devname;
+       u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+       u32 isrg = 0;
+       u32 __iomem *baddr;
 
-       /* Create an ethernet device instance */
-       dev = alloc_etherdev(sizeof (*priv));
+       err = gfar_of_init(ofdev, &dev);
 
-       if (NULL == dev)
-               return -ENOMEM;
+       if (err)
+               return err;
 
        priv = netdev_priv(dev);
        priv->ndev = dev;
        priv->ofdev = ofdev;
-       priv->node = ofdev->node;
+       priv->node = ofdev->dev.of_node;
        SET_NETDEV_DEV(dev, &ofdev->dev);
 
-       err = gfar_of_init(dev);
-
-       if (err)
-               goto regs_fail;
-
-       spin_lock_init(&priv->txlock);
-       spin_lock_init(&priv->rxlock);
        spin_lock_init(&priv->bflock);
        INIT_WORK(&priv->reset_task, gfar_reset_task);
 
        dev_set_drvdata(&ofdev->dev, priv);
+       regs = priv->gfargrp[0].regs;
+
+       gfar_detect_errata(priv);
 
        /* Stop the DMA engine now, in case it was running before */
        /* (The firmware could have used it, and left it running). */
        gfar_halt(dev);
 
        /* Reset MAC layer */
-       gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+       gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
 
        /* We need to delay at least 3 TX clocks */
        udelay(2);
 
        tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-       gfar_write(&priv->regs->maccfg1, tempval);
+       gfar_write(&regs->maccfg1, tempval);
 
        /* Initialize MACCFG2. */
-       gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+       tempval = MACCFG2_INIT_SETTINGS;
+       if (gfar_has_errata(priv, GFAR_ERRATA_74))
+               tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+       gfar_write(&regs->maccfg2, tempval);
 
        /* Initialize ECNTRL */
-       gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+       gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
 
        /* Set the dev->base_addr to the gfar reg region */
-       dev->base_addr = (unsigned long) (priv->regs);
+       dev->base_addr = (unsigned long) regs;
 
        SET_NETDEV_DEV(dev, &ofdev->dev);
 
        /* Fill in the dev structure */
        dev->watchdog_timeo = TX_TIMEOUT;
-       netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
        dev->mtu = 1500;
-
        dev->netdev_ops = &gfar_netdev_ops;
        dev->ethtool_ops = &gfar_ethtool_ops;
 
+       /* Register for napi ...We are registering NAPI for each grp */
+       for (i = 0; i < priv->num_grps; i++)
+               netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
                priv->rx_csum_enable = 1;
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -372,35 +1041,35 @@ static int gfar_probe(struct of_device *ofdev,
                priv->extended_hash = 1;
                priv->hash_width = 9;
 
-               priv->hash_regs[0] = &priv->regs->igaddr0;
-               priv->hash_regs[1] = &priv->regs->igaddr1;
-               priv->hash_regs[2] = &priv->regs->igaddr2;
-               priv->hash_regs[3] = &priv->regs->igaddr3;
-               priv->hash_regs[4] = &priv->regs->igaddr4;
-               priv->hash_regs[5] = &priv->regs->igaddr5;
-               priv->hash_regs[6] = &priv->regs->igaddr6;
-               priv->hash_regs[7] = &priv->regs->igaddr7;
-               priv->hash_regs[8] = &priv->regs->gaddr0;
-               priv->hash_regs[9] = &priv->regs->gaddr1;
-               priv->hash_regs[10] = &priv->regs->gaddr2;
-               priv->hash_regs[11] = &priv->regs->gaddr3;
-               priv->hash_regs[12] = &priv->regs->gaddr4;
-               priv->hash_regs[13] = &priv->regs->gaddr5;
-               priv->hash_regs[14] = &priv->regs->gaddr6;
-               priv->hash_regs[15] = &priv->regs->gaddr7;
+               priv->hash_regs[0] = &regs->igaddr0;
+               priv->hash_regs[1] = &regs->igaddr1;
+               priv->hash_regs[2] = &regs->igaddr2;
+               priv->hash_regs[3] = &regs->igaddr3;
+               priv->hash_regs[4] = &regs->igaddr4;
+               priv->hash_regs[5] = &regs->igaddr5;
+               priv->hash_regs[6] = &regs->igaddr6;
+               priv->hash_regs[7] = &regs->igaddr7;
+               priv->hash_regs[8] = &regs->gaddr0;
+               priv->hash_regs[9] = &regs->gaddr1;
+               priv->hash_regs[10] = &regs->gaddr2;
+               priv->hash_regs[11] = &regs->gaddr3;
+               priv->hash_regs[12] = &regs->gaddr4;
+               priv->hash_regs[13] = &regs->gaddr5;
+               priv->hash_regs[14] = &regs->gaddr6;
+               priv->hash_regs[15] = &regs->gaddr7;
 
        } else {
                priv->extended_hash = 0;
                priv->hash_width = 8;
 
-               priv->hash_regs[0] = &priv->regs->gaddr0;
-               priv->hash_regs[1] = &priv->regs->gaddr1;
-               priv->hash_regs[2] = &priv->regs->gaddr2;
-               priv->hash_regs[3] = &priv->regs->gaddr3;
-               priv->hash_regs[4] = &priv->regs->gaddr4;
-               priv->hash_regs[5] = &priv->regs->gaddr5;
-               priv->hash_regs[6] = &priv->regs->gaddr6;
-               priv->hash_regs[7] = &priv->regs->gaddr7;
+               priv->hash_regs[0] = &regs->gaddr0;
+               priv->hash_regs[1] = &regs->gaddr1;
+               priv->hash_regs[2] = &regs->gaddr2;
+               priv->hash_regs[3] = &regs->gaddr3;
+               priv->hash_regs[4] = &regs->gaddr4;
+               priv->hash_regs[5] = &regs->gaddr5;
+               priv->hash_regs[6] = &regs->gaddr6;
+               priv->hash_regs[7] = &regs->gaddr7;
        }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -408,19 +1077,78 @@ static int gfar_probe(struct of_device *ofdev,
        else
                priv->padding = 0;
 
-       if (dev->features & NETIF_F_IP_CSUM)
+       if (dev->features & NETIF_F_IP_CSUM ||
+                       priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->hard_header_len += GMAC_FCB_LEN;
 
+       /* Program the isrg regs only if number of grps > 1 */
+       if (priv->num_grps > 1) {
+               baddr = &regs->isrg0;
+               for (i = 0; i < priv->num_grps; i++) {
+                       isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+                       isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+                       gfar_write(baddr, isrg);
+                       baddr++;
+                       isrg = 0x0;
+               }
+       }
+
+       /* Need to reverse the bit maps as  bit_map's MSB is q0
+        * but, for_each_set_bit parses from right to left, which
+        * basically reverses the queue numbers */
+       for (i = 0; i< priv->num_grps; i++) {
+               priv->gfargrp[i].tx_bit_map = reverse_bitmap(
+                               priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+               priv->gfargrp[i].rx_bit_map = reverse_bitmap(
+                               priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+       }
+
+       /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+        * also assign queues to groups */
+       for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+               priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+                               priv->num_rx_queues) {
+                       priv->gfargrp[grp_idx].num_rx_queues++;
+                       priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+                       rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+                       rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+               }
+               priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
+                               priv->num_tx_queues) {
+                       priv->gfargrp[grp_idx].num_tx_queues++;
+                       priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+                       tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+                       tqueue = tqueue | (TQUEUE_EN0 >> i);
+               }
+               priv->gfargrp[grp_idx].rstat = rstat;
+               priv->gfargrp[grp_idx].tstat = tstat;
+               rstat = tstat =0;
+       }
+
+       gfar_write(&regs->rqueue, rqueue);
+       gfar_write(&regs->tqueue, tqueue);
+
        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-       priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
-       priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
-       priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
 
-       priv->txcoalescing = DEFAULT_TX_COALESCE;
-       priv->txic = DEFAULT_TXIC;
-       priv->rxcoalescing = DEFAULT_RX_COALESCE;
-       priv->rxic = DEFAULT_RXIC;
+       /* Initializing some of the rx/tx queue level parameters */
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+               priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+               priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+               priv->tx_queue[i]->txic = DEFAULT_TXIC;
+       }
 
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+               priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+               priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+       }
+
+       /* enable filer if using multiple RX queues*/
+       if(priv->num_rx_queues > 1)
+               priv->rx_filer_enable = 1;
        /* Enable most messages by default */
        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
 
@@ -440,20 +1168,43 @@ static int gfar_probe(struct of_device *ofdev,
 
        /* fill out IRQ number and name fields */
        len_devname = strlen(dev->name);
-       strncpy(&priv->int_name_tx[0], dev->name, len_devname);
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               strncpy(&priv->int_name_tx[len_devname],
-                       "_tx", sizeof("_tx") + 1);
-
-               strncpy(&priv->int_name_rx[0], dev->name, len_devname);
-               strncpy(&priv->int_name_rx[len_devname],
-                       "_rx", sizeof("_rx") + 1);
+       for (i = 0; i < priv->num_grps; i++) {
+               strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
+                               len_devname);
+               if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+                       strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
+                               "_g", sizeof("_g"));
+                       priv->gfargrp[i].int_name_tx[
+                               strlen(priv->gfargrp[i].int_name_tx)] = i+48;
+                       strncpy(&priv->gfargrp[i].int_name_tx[strlen(
+                               priv->gfargrp[i].int_name_tx)],
+                               "_tx", sizeof("_tx") + 1);
+
+                       strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
+                                       len_devname);
+                       strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
+                                       "_g", sizeof("_g"));
+                       priv->gfargrp[i].int_name_rx[
+                               strlen(priv->gfargrp[i].int_name_rx)] = i+48;
+                       strncpy(&priv->gfargrp[i].int_name_rx[strlen(
+                               priv->gfargrp[i].int_name_rx)],
+                               "_rx", sizeof("_rx") + 1);
+
+                       strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
+                                       len_devname);
+                       strncpy(&priv->gfargrp[i].int_name_er[len_devname],
+                               "_g", sizeof("_g"));
+                       priv->gfargrp[i].int_name_er[strlen(
+                                       priv->gfargrp[i].int_name_er)] = i+48;
+                       strncpy(&priv->gfargrp[i].int_name_er[strlen(\
+                               priv->gfargrp[i].int_name_er)],
+                               "_er", sizeof("_er") + 1);
+               } else
+                       priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+       }
 
-               strncpy(&priv->int_name_er[0], dev->name, len_devname);
-               strncpy(&priv->int_name_er[len_devname],
-                       "_er", sizeof("_er") + 1);
-       } else
-               priv->int_name_tx[len_devname] = '\0';
+       /* Initialize the filer table */
+       gfar_init_filer_table(priv);
 
        /* Create all the sysfs files */
        gfar_init_sysfs(dev);
@@ -464,14 +1215,19 @@ static int gfar_probe(struct of_device *ofdev,
        /* Even more device info helps when determining which kernel */
        /* provided which set of benchmarks. */
        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
-       printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
-              dev->name, priv->rx_ring_size, priv->tx_ring_size);
+       for (i = 0; i < priv->num_rx_queues; i++)
+               printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
+                       dev->name, i, priv->rx_queue[i]->rx_ring_size);
+       for(i = 0; i < priv->num_tx_queues; i++)
+                printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
+                       dev->name, i, priv->tx_queue[i]->tx_ring_size);
 
        return 0;
 
 register_fail:
-       iounmap(priv->regs);
-regs_fail:
+       unmap_group_regs(priv);
+       free_tx_pointers(priv);
+       free_rx_pointers(priv);
        if (priv->phy_node)
                of_node_put(priv->phy_node);
        if (priv->tbi_node)
@@ -480,7 +1236,7 @@ regs_fail:
        return err;
 }
 
-static int gfar_remove(struct of_device *ofdev)
+static int gfar_remove(struct platform_device *ofdev)
 {
        struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
 
@@ -491,54 +1247,60 @@ static int gfar_remove(struct of_device *ofdev)
 
        dev_set_drvdata(&ofdev->dev, NULL);
 
-       iounmap(priv->regs);
+       unregister_netdev(priv->ndev);
+       unmap_group_regs(priv);
        free_netdev(priv->ndev);
 
        return 0;
 }
 
 #ifdef CONFIG_PM
-static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
+
+static int gfar_suspend(struct device *dev)
 {
-       struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
-       struct net_device *dev = priv->ndev;
+       struct gfar_private *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->ndev;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        unsigned long flags;
        u32 tempval;
 
        int magic_packet = priv->wol_en &&
                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
-       netif_device_detach(dev);
+       netif_device_detach(ndev);
+
+       if (netif_running(ndev)) {
 
-       if (netif_running(dev)) {
-               spin_lock_irqsave(&priv->txlock, flags);
-               spin_lock(&priv->rxlock);
+               local_irq_save(flags);
+               lock_tx_qs(priv);
+               lock_rx_qs(priv);
 
-               gfar_halt_nodisable(dev);
+               gfar_halt_nodisable(ndev);
 
                /* Disable Tx, and Rx if wake-on-LAN is disabled. */
-               tempval = gfar_read(&priv->regs->maccfg1);
+               tempval = gfar_read(&regs->maccfg1);
 
                tempval &= ~MACCFG1_TX_EN;
 
                if (!magic_packet)
                        tempval &= ~MACCFG1_RX_EN;
 
-               gfar_write(&priv->regs->maccfg1, tempval);
+               gfar_write(&regs->maccfg1, tempval);
 
-               spin_unlock(&priv->rxlock);
-               spin_unlock_irqrestore(&priv->txlock, flags);
+               unlock_rx_qs(priv);
+               unlock_tx_qs(priv);
+               local_irq_restore(flags);
 
-               napi_disable(&priv->napi);
+               disable_napi(priv);
 
                if (magic_packet) {
                        /* Enable interrupt on Magic Packet */
-                       gfar_write(&priv->regs->imask, IMASK_MAG);
+                       gfar_write(&regs->imask, IMASK_MAG);
 
                        /* Enable Magic Packet mode */
-                       tempval = gfar_read(&priv->regs->maccfg2);
+                       tempval = gfar_read(&regs->maccfg2);
                        tempval |= MACCFG2_MPEN;
-                       gfar_write(&priv->regs->maccfg2, tempval);
+                       gfar_write(&regs->maccfg2, tempval);
                } else {
                        phy_stop(priv->phydev);
                }
@@ -547,17 +1309,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
        return 0;
 }
 
-static int gfar_resume(struct of_device *ofdev)
+static int gfar_resume(struct device *dev)
 {
-       struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
-       struct net_device *dev = priv->ndev;
+       struct gfar_private *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->ndev;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        unsigned long flags;
        u32 tempval;
        int magic_packet = priv->wol_en &&
                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
-       if (!netif_running(dev)) {
-               netif_device_attach(dev);
+       if (!netif_running(ndev)) {
+               netif_device_attach(ndev);
                return 0;
        }
 
@@ -567,28 +1330,68 @@ static int gfar_resume(struct of_device *ofdev)
        /* Disable Magic Packet mode, in case something
         * else woke us up.
         */
+       local_irq_save(flags);
+       lock_tx_qs(priv);
+       lock_rx_qs(priv);
 
-       spin_lock_irqsave(&priv->txlock, flags);
-       spin_lock(&priv->rxlock);
-
-       tempval = gfar_read(&priv->regs->maccfg2);
+       tempval = gfar_read(&regs->maccfg2);
        tempval &= ~MACCFG2_MPEN;
-       gfar_write(&priv->regs->maccfg2, tempval);
+       gfar_write(&regs->maccfg2, tempval);
 
-       gfar_start(dev);
+       gfar_start(ndev);
 
-       spin_unlock(&priv->rxlock);
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       unlock_rx_qs(priv);
+       unlock_tx_qs(priv);
+       local_irq_restore(flags);
 
-       netif_device_attach(dev);
+       netif_device_attach(ndev);
 
-       napi_enable(&priv->napi);
+       enable_napi(priv);
 
        return 0;
 }
+
+static int gfar_restore(struct device *dev)
+{
+       struct gfar_private *priv = dev_get_drvdata(dev);
+       struct net_device *ndev = priv->ndev;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       gfar_init_bds(ndev);
+       init_registers(ndev);
+       gfar_set_mac_address(ndev);
+       gfar_init_mac(ndev);
+       gfar_start(ndev);
+
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
+       if (priv->phydev)
+               phy_start(priv->phydev);
+
+       netif_device_attach(ndev);
+       enable_napi(priv);
+
+       return 0;
+}
+
+static struct dev_pm_ops gfar_pm_ops = {
+       .suspend = gfar_suspend,
+       .resume = gfar_resume,
+       .freeze = gfar_suspend,
+       .thaw = gfar_resume,
+       .restore = gfar_restore,
+};
+
+#define GFAR_PM_OPS (&gfar_pm_ops)
+
 #else
-#define gfar_suspend NULL
-#define gfar_resume NULL
+
+#define GFAR_PM_OPS NULL
+
 #endif
 
 /* Reads the controller's registers to determine what interface
@@ -597,7 +1400,10 @@ static int gfar_resume(struct of_device *ofdev)
 static phy_interface_t gfar_get_interface(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       u32 ecntrl = gfar_read(&priv->regs->ecntrl);
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 ecntrl;
+
+       ecntrl = gfar_read(&regs->ecntrl);
 
        if (ecntrl & ECNTRL_SGMII_MODE)
                return PHY_INTERFACE_MODE_SGMII;
@@ -719,72 +1525,112 @@ static void gfar_configure_serdes(struct net_device *dev)
 static void init_registers(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = NULL;
+       int i = 0;
 
-       /* Clear IEVENT */
-       gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+       for (i = 0; i < priv->num_grps; i++) {
+               regs = priv->gfargrp[i].regs;
+               /* Clear IEVENT */
+               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
-       /* Initialize IMASK */
-       gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+               /* Initialize IMASK */
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       }
 
+       regs = priv->gfargrp[0].regs;
        /* Init hash registers to zero */
-       gfar_write(&priv->regs->igaddr0, 0);
-       gfar_write(&priv->regs->igaddr1, 0);
-       gfar_write(&priv->regs->igaddr2, 0);
-       gfar_write(&priv->regs->igaddr3, 0);
-       gfar_write(&priv->regs->igaddr4, 0);
-       gfar_write(&priv->regs->igaddr5, 0);
-       gfar_write(&priv->regs->igaddr6, 0);
-       gfar_write(&priv->regs->igaddr7, 0);
-
-       gfar_write(&priv->regs->gaddr0, 0);
-       gfar_write(&priv->regs->gaddr1, 0);
-       gfar_write(&priv->regs->gaddr2, 0);
-       gfar_write(&priv->regs->gaddr3, 0);
-       gfar_write(&priv->regs->gaddr4, 0);
-       gfar_write(&priv->regs->gaddr5, 0);
-       gfar_write(&priv->regs->gaddr6, 0);
-       gfar_write(&priv->regs->gaddr7, 0);
+       gfar_write(&regs->igaddr0, 0);
+       gfar_write(&regs->igaddr1, 0);
+       gfar_write(&regs->igaddr2, 0);
+       gfar_write(&regs->igaddr3, 0);
+       gfar_write(&regs->igaddr4, 0);
+       gfar_write(&regs->igaddr5, 0);
+       gfar_write(&regs->igaddr6, 0);
+       gfar_write(&regs->igaddr7, 0);
+
+       gfar_write(&regs->gaddr0, 0);
+       gfar_write(&regs->gaddr1, 0);
+       gfar_write(&regs->gaddr2, 0);
+       gfar_write(&regs->gaddr3, 0);
+       gfar_write(&regs->gaddr4, 0);
+       gfar_write(&regs->gaddr5, 0);
+       gfar_write(&regs->gaddr6, 0);
+       gfar_write(&regs->gaddr7, 0);
 
        /* Zero out the rmon mib registers if it has them */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-               memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
+               memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
 
                /* Mask off the CAM interrupts */
-               gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
-               gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+               gfar_write(&regs->rmon.cam1, 0xffffffff);
+               gfar_write(&regs->rmon.cam2, 0xffffffff);
        }
 
        /* Initialize the max receive buffer length */
-       gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+       gfar_write(&regs->mrblr, priv->rx_buffer_size);
 
        /* Initialize the Minimum Frame Length Register */
-       gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
+       gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
 }
 
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+       u32 res;
+
+       /*
+        * Normaly TSEC should not hang on GRS commands, so we should
+        * actually wait for IEVENT_GRSC flag.
+        */
+       if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+               return 0;
+
+       /*
+        * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+        * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+        * and the Rx can be safely reset.
+        */
+       res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+       res &= 0x7f807f80;
+       if ((res & 0xffff) == (res >> 16))
+               return 1;
+
+       return 0;
+}
 
 /* Halt the receive and transmit queues */
 static void gfar_halt_nodisable(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = NULL;
        u32 tempval;
+       int i = 0;
 
-       /* Mask all interrupts */
-       gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       for (i = 0; i < priv->num_grps; i++) {
+               regs = priv->gfargrp[i].regs;
+               /* Mask all interrupts */
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
-       /* Clear all interrupts */
-       gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+               /* Clear all interrupts */
+               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+       }
 
+       regs = priv->gfargrp[0].regs;
        /* Stop the DMA, and wait for it to stop */
-       tempval = gfar_read(&priv->regs->dmactrl);
+       tempval = gfar_read(&regs->dmactrl);
        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
            != (DMACTRL_GRS | DMACTRL_GTS)) {
-               tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-               gfar_write(&priv->regs->dmactrl, tempval);
+               int ret;
 
-               while (!(gfar_read(&priv->regs->ievent) &
-                        (IEVENT_GRSC | IEVENT_GTSC)))
-                       cpu_relax();
+               tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+               gfar_write(&regs->dmactrl, tempval);
+
+               do {
+                       ret = spin_event_timeout(((gfar_read(&regs->ievent) &
+                                (IEVENT_GRSC | IEVENT_GTSC)) ==
+                                (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
+                       if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
+                               ret = __gfar_is_rx_idle(priv);
+               } while (!ret);
        }
 }
 
@@ -792,7 +1638,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
 void gfar_halt(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
 
        gfar_halt_nodisable(dev);
@@ -803,101 +1649,132 @@ void gfar_halt(struct net_device *dev)
        gfar_write(&regs->maccfg1, tempval);
 }
 
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+       free_irq(grp->interruptError, grp);
+       free_irq(grp->interruptTransmit, grp);
+       free_irq(grp->interruptReceive, grp);
+}
+
 void stop_gfar(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
        unsigned long flags;
+       int i;
 
        phy_stop(priv->phydev);
 
+
        /* Lock it down */
-       spin_lock_irqsave(&priv->txlock, flags);
-       spin_lock(&priv->rxlock);
+       local_irq_save(flags);
+       lock_tx_qs(priv);
+       lock_rx_qs(priv);
 
        gfar_halt(dev);
 
-       spin_unlock(&priv->rxlock);
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       unlock_rx_qs(priv);
+       unlock_tx_qs(priv);
+       local_irq_restore(flags);
 
        /* Free the IRQs */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               free_irq(priv->interruptError, dev);
-               free_irq(priv->interruptTransmit, dev);
-               free_irq(priv->interruptReceive, dev);
+               for (i = 0; i < priv->num_grps; i++)
+                       free_grp_irqs(&priv->gfargrp[i]);
        } else {
-               free_irq(priv->interruptTransmit, dev);
+               for (i = 0; i < priv->num_grps; i++)
+                       free_irq(priv->gfargrp[i].interruptTransmit,
+                                       &priv->gfargrp[i]);
        }
 
        free_skb_resources(priv);
-
-       dma_free_coherent(&priv->ofdev->dev,
-                       sizeof(struct txbd8)*priv->tx_ring_size
-                       + sizeof(struct rxbd8)*priv->rx_ring_size,
-                       priv->tx_bd_base,
-                       gfar_read(&regs->tbase0));
 }
 
-/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff */
-static void free_skb_resources(struct gfar_private *priv)
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
 {
-       struct rxbd8 *rxbdp;
        struct txbd8 *txbdp;
+       struct gfar_private *priv = netdev_priv(tx_queue->dev);
        int i, j;
 
-       /* Go through all the buffer descriptors and free their data buffers */
-       txbdp = priv->tx_bd_base;
+       txbdp = tx_queue->tx_bd_base;
 
-       for (i = 0; i < priv->tx_ring_size; i++) {
-               if (!priv->tx_skbuff[i])
+       for (i = 0; i < tx_queue->tx_ring_size; i++) {
+               if (!tx_queue->tx_skbuff[i])
                        continue;
 
                dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
                                txbdp->length, DMA_TO_DEVICE);
                txbdp->lstatus = 0;
-               for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
+               for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+                               j++) {
                        txbdp++;
                        dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
                                        txbdp->length, DMA_TO_DEVICE);
                }
                txbdp++;
-               dev_kfree_skb_any(priv->tx_skbuff[i]);
-               priv->tx_skbuff[i] = NULL;
+               dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+               tx_queue->tx_skbuff[i] = NULL;
        }
+       kfree(tx_queue->tx_skbuff);
+}
 
-       kfree(priv->tx_skbuff);
-
-       rxbdp = priv->rx_bd_base;
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+       struct rxbd8 *rxbdp;
+       struct gfar_private *priv = netdev_priv(rx_queue->dev);
+       int i;
 
-       /* rx_skbuff is not guaranteed to be allocated, so only
-        * free it and its contents if it is allocated */
-       if(priv->rx_skbuff != NULL) {
-               for (i = 0; i < priv->rx_ring_size; i++) {
-                       if (priv->rx_skbuff[i]) {
-                               dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
-                                               priv->rx_buffer_size,
-                                               DMA_FROM_DEVICE);
+       rxbdp = rx_queue->rx_bd_base;
 
-                               dev_kfree_skb_any(priv->rx_skbuff[i]);
-                               priv->rx_skbuff[i] = NULL;
-                       }
+       for (i = 0; i < rx_queue->rx_ring_size; i++) {
+               if (rx_queue->rx_skbuff[i]) {
+                       dma_unmap_single(&priv->ofdev->dev,
+                                       rxbdp->bufPtr, priv->rx_buffer_size,
+                                       DMA_FROM_DEVICE);
+                       dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
+                       rx_queue->rx_skbuff[i] = NULL;
+               }
+               rxbdp->lstatus = 0;
+               rxbdp->bufPtr = 0;
+               rxbdp++;
+       }
+       kfree(rx_queue->rx_skbuff);
+}
 
-                       rxbdp->lstatus = 0;
-                       rxbdp->bufPtr = 0;
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff */
+static void free_skb_resources(struct gfar_private *priv)
+{
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       int i;
 
-                       rxbdp++;
-               }
+       /* Go through all the buffer descriptors and free their data buffers */
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               tx_queue = priv->tx_queue[i];
+               if(tx_queue->tx_skbuff)
+                       free_skb_tx_queue(tx_queue);
+       }
 
-               kfree(priv->rx_skbuff);
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_queue = priv->rx_queue[i];
+               if(rx_queue->rx_skbuff)
+                       free_skb_rx_queue(rx_queue);
        }
+
+       dma_free_coherent(&priv->ofdev->dev,
+                       sizeof(struct txbd8) * priv->total_tx_ring_size +
+                       sizeof(struct rxbd8) * priv->total_rx_ring_size,
+                       priv->tx_queue[0]->tx_bd_base,
+                       priv->tx_queue[0]->tx_bd_dma_base);
+       skb_queue_purge(&priv->rx_recycle);
 }
 
 void gfar_start(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
+       int i = 0;
 
        /* Enable Rx and Tx in MACCFG1 */
        tempval = gfar_read(&regs->maccfg1);
@@ -905,269 +1782,159 @@ void gfar_start(struct net_device *dev)
        gfar_write(&regs->maccfg1, tempval);
 
        /* Initialize DMACTRL to have WWR and WOP */
-       tempval = gfar_read(&priv->regs->dmactrl);
+       tempval = gfar_read(&regs->dmactrl);
        tempval |= DMACTRL_INIT_SETTINGS;
-       gfar_write(&priv->regs->dmactrl, tempval);
+       gfar_write(&regs->dmactrl, tempval);
 
        /* Make sure we aren't stopped */
-       tempval = gfar_read(&priv->regs->dmactrl);
+       tempval = gfar_read(&regs->dmactrl);
        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
-       gfar_write(&priv->regs->dmactrl, tempval);
-
-       /* Clear THLT/RHLT, so that the DMA starts polling now */
-       gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
-       gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
-
-       /* Unmask the interrupts we look for */
-       gfar_write(&regs->imask, IMASK_DEFAULT);
+       gfar_write(&regs->dmactrl, tempval);
+
+       for (i = 0; i < priv->num_grps; i++) {
+               regs = priv->gfargrp[i].regs;
+               /* Clear THLT/RHLT, so that the DMA starts polling now */
+               gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+               gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+               /* Unmask the interrupts we look for */
+               gfar_write(&regs->imask, IMASK_DEFAULT);
+       }
 
-       dev->trans_start = jiffies;
+       dev->trans_start = jiffies; /* prevent tx timeout */
 }
 
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *dev)
+void gfar_configure_coalescing(struct gfar_private *priv,
+       unsigned long tx_mask, unsigned long rx_mask)
 {
-       struct txbd8 *txbdp;
-       struct rxbd8 *rxbdp;
-       dma_addr_t addr = 0;
-       unsigned long vaddr;
-       int i;
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
-       int err = 0;
-       u32 rctrl = 0;
-       u32 tctrl = 0;
-       u32 attrs = 0;
-
-       gfar_write(&regs->imask, IMASK_INIT_CLEAR);
-
-       /* Allocate memory for the buffer descriptors */
-       vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev,
-                       sizeof (struct txbd8) * priv->tx_ring_size +
-                       sizeof (struct rxbd8) * priv->rx_ring_size,
-                       &addr, GFP_KERNEL);
-
-       if (vaddr == 0) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
-                                       dev->name);
-               return -ENOMEM;
-       }
-
-       priv->tx_bd_base = (struct txbd8 *) vaddr;
-
-       /* enet DMA only understands physical addresses */
-       gfar_write(&regs->tbase0, addr);
-
-       /* Start the rx descriptor ring where the tx ring leaves off */
-       addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
-       vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
-       priv->rx_bd_base = (struct rxbd8 *) vaddr;
-       gfar_write(&regs->rbase0, addr);
-
-       /* Setup the skbuff rings */
-       priv->tx_skbuff =
-           (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
-                                       priv->tx_ring_size, GFP_KERNEL);
-
-       if (NULL == priv->tx_skbuff) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
-                                       dev->name);
-               err = -ENOMEM;
-               goto tx_skb_fail;
-       }
-
-       for (i = 0; i < priv->tx_ring_size; i++)
-               priv->tx_skbuff[i] = NULL;
-
-       priv->rx_skbuff =
-           (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
-                                       priv->rx_ring_size, GFP_KERNEL);
-
-       if (NULL == priv->rx_skbuff) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
-                                       dev->name);
-               err = -ENOMEM;
-               goto rx_skb_fail;
-       }
-
-       for (i = 0; i < priv->rx_ring_size; i++)
-               priv->rx_skbuff[i] = NULL;
-
-       /* Initialize some variables in our dev structure */
-       priv->num_txbdfree = priv->tx_ring_size;
-       priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
-       priv->cur_rx = priv->rx_bd_base;
-       priv->skb_curtx = priv->skb_dirtytx = 0;
-       priv->skb_currx = 0;
-
-       /* Initialize Transmit Descriptor Ring */
-       txbdp = priv->tx_bd_base;
-       for (i = 0; i < priv->tx_ring_size; i++) {
-               txbdp->lstatus = 0;
-               txbdp->bufPtr = 0;
-               txbdp++;
-       }
-
-       /* Set the last descriptor in the ring to indicate wrap */
-       txbdp--;
-       txbdp->status |= TXBD_WRAP;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 __iomem *baddr;
+       int i = 0;
 
-       rxbdp = priv->rx_bd_base;
-       for (i = 0; i < priv->rx_ring_size; i++) {
-               struct sk_buff *skb;
-
-               skb = gfar_new_skb(dev);
-
-               if (!skb) {
-                       printk(KERN_ERR "%s: Can't allocate RX buffers\n",
-                                       dev->name);
+       /* Backward compatible case ---- even if we enable
+        * multiple queues, there's only single reg to program
+        */
+       gfar_write(&regs->txic, 0);
+       if(likely(priv->tx_queue[0]->txcoalescing))
+               gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 
-                       goto err_rxalloc_fail;
+       gfar_write(&regs->rxic, 0);
+       if(unlikely(priv->rx_queue[0]->rxcoalescing))
+               gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+
+       if (priv->mode == MQ_MG_MODE) {
+               baddr = &regs->txic0;
+               for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+                       if (likely(priv->tx_queue[i]->txcoalescing)) {
+                               gfar_write(baddr + i, 0);
+                               gfar_write(baddr + i, priv->tx_queue[i]->txic);
+                       }
                }
 
-               priv->rx_skbuff[i] = skb;
-
-               gfar_new_rxbdp(dev, rxbdp, skb);
-
-               rxbdp++;
+               baddr = &regs->rxic0;
+               for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
+                               gfar_write(baddr + i, 0);
+                               gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+                       }
+               }
        }
+}
 
-       /* Set the last descriptor in the ring to wrap */
-       rxbdp--;
-       rxbdp->status |= RXBD_WRAP;
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+       struct gfar_private *priv = grp->priv;
+       struct net_device *dev = priv->ndev;
+       int err;
 
        /* If the device has multiple interrupts, register for
         * them.  Otherwise, only register for the one */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
                /* Install our interrupt handlers for Error,
                 * Transmit, and Receive */
-               if (request_irq(priv->interruptError, gfar_error,
-                               0, priv->int_name_er, dev) < 0) {
+               if ((err = request_irq(grp->interruptError, gfar_error, 0,
+                               grp->int_name_er,grp)) < 0) {
                        if (netif_msg_intr(priv))
                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
-                                       dev->name, priv->interruptError);
+                                       dev->name, grp->interruptError);
 
-                       err = -1;
                        goto err_irq_fail;
                }
 
-               if (request_irq(priv->interruptTransmit, gfar_transmit,
-                               0, priv->int_name_tx, dev) < 0) {
+               if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
+                               0, grp->int_name_tx, grp)) < 0) {
                        if (netif_msg_intr(priv))
                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
-                                       dev->name, priv->interruptTransmit);
-
-                       err = -1;
-
+                                       dev->name, grp->interruptTransmit);
                        goto tx_irq_fail;
                }
 
-               if (request_irq(priv->interruptReceive, gfar_receive,
-                               0, priv->int_name_rx, dev) < 0) {
+               if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
+                               grp->int_name_rx, grp)) < 0) {
                        if (netif_msg_intr(priv))
-                               printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
-                                               dev->name, priv->interruptReceive);
-
-                       err = -1;
+                               printk(KERN_ERR "%s: Can't get IRQ %d\n",
+                                       dev->name, grp->interruptReceive);
                        goto rx_irq_fail;
                }
        } else {
-               if (request_irq(priv->interruptTransmit, gfar_interrupt,
-                               0, priv->int_name_tx, dev) < 0) {
+               if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
+                               grp->int_name_tx, grp)) < 0) {
                        if (netif_msg_intr(priv))
                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
-                                       dev->name, priv->interruptTransmit);
-
-                       err = -1;
+                                       dev->name, grp->interruptTransmit);
                        goto err_irq_fail;
                }
        }
 
-       phy_start(priv->phydev);
-
-       /* Configure the coalescing support */
-       gfar_write(&regs->txic, 0);
-       if (priv->txcoalescing)
-               gfar_write(&regs->txic, priv->txic);
-
-       gfar_write(&regs->rxic, 0);
-       if (priv->rxcoalescing)
-               gfar_write(&regs->rxic, priv->rxic);
-
-       if (priv->rx_csum_enable)
-               rctrl |= RCTRL_CHECKSUMMING;
+       return 0;
 
-       if (priv->extended_hash) {
-               rctrl |= RCTRL_EXTHASH;
+rx_irq_fail:
+       free_irq(grp->interruptTransmit, grp);
+tx_irq_fail:
+       free_irq(grp->interruptError, grp);
+err_irq_fail:
+       return err;
 
-               gfar_clear_exact_match(dev);
-               rctrl |= RCTRL_EMEN;
-       }
+}
 
-       if (priv->padding) {
-               rctrl &= ~RCTRL_PAL_MASK;
-               rctrl |= RCTRL_PADDING(priv->padding);
-       }
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct gfar __iomem *regs = NULL;
+       int err, i, j;
 
-       /* keep vlan related bits if it's enabled */
-       if (priv->vlgrp) {
-               rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
-               tctrl |= TCTRL_VLINS;
+       for (i = 0; i < priv->num_grps; i++) {
+               regs= priv->gfargrp[i].regs;
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
        }
 
-       /* Init rctrl based on our settings */
-       gfar_write(&priv->regs->rctrl, rctrl);
-
-       if (dev->features & NETIF_F_IP_CSUM)
-               tctrl |= TCTRL_INIT_CSUM;
-
-       gfar_write(&priv->regs->tctrl, tctrl);
-
-       /* Set the extraction length and index */
-       attrs = ATTRELI_EL(priv->rx_stash_size) |
-               ATTRELI_EI(priv->rx_stash_index);
-
-       gfar_write(&priv->regs->attreli, attrs);
-
-       /* Start with defaults, and add stashing or locking
-        * depending on the approprate variables */
-       attrs = ATTR_INIT_SETTINGS;
+       regs= priv->gfargrp[0].regs;
+       err = gfar_alloc_skb_resources(ndev);
+       if (err)
+               return err;
 
-       if (priv->bd_stash_en)
-               attrs |= ATTR_BDSTASH;
+       gfar_init_mac(ndev);
 
-       if (priv->rx_stash_size != 0)
-               attrs |= ATTR_BUFSTASH;
+       for (i = 0; i < priv->num_grps; i++) {
+               err = register_grp_irqs(&priv->gfargrp[i]);
+               if (err) {
+                       for (j = 0; j < i; j++)
+                               free_grp_irqs(&priv->gfargrp[j]);
+                       goto irq_fail;
+               }
+       }
 
-       gfar_write(&priv->regs->attr, attrs);
+       /* Start the controller */
+       gfar_start(ndev);
 
-       gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
-       gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
-       gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+       phy_start(priv->phydev);
 
-       /* Start the controller */
-       gfar_start(dev);
+       gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
        return 0;
 
-rx_irq_fail:
-       free_irq(priv->interruptTransmit, dev);
-tx_irq_fail:
-       free_irq(priv->interruptError, dev);
-err_irq_fail:
-err_rxalloc_fail:
-rx_skb_fail:
+irq_fail:
        free_skb_resources(priv);
-tx_skb_fail:
-       dma_free_coherent(&priv->ofdev->dev,
-                       sizeof(struct txbd8)*priv->tx_ring_size
-                       + sizeof(struct rxbd8)*priv->rx_ring_size,
-                       priv->tx_bd_base,
-                       gfar_read(&regs->tbase0));
-
        return err;
 }
 
@@ -1178,7 +1945,7 @@ static int gfar_enet_open(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        int err;
 
-       napi_enable(&priv->napi);
+       enable_napi(priv);
 
        skb_queue_head_init(&priv->rx_recycle);
 
@@ -1189,18 +1956,18 @@ static int gfar_enet_open(struct net_device *dev)
 
        err = init_phy(dev);
 
-       if(err) {
-               napi_disable(&priv->napi);
+       if (err) {
+               disable_napi(priv);
                return err;
        }
 
        err = startup_gfar(dev);
        if (err) {
-               napi_disable(&priv->napi);
+               disable_napi(priv);
                return err;
        }
 
-       netif_start_queue(dev);
+       netif_tx_start_all_queues(dev);
 
        device_set_wakeup_enable(&dev->dev, priv->wol_en);
 
@@ -1269,19 +2036,46 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct netdev_queue *txq;
+       struct gfar __iomem *regs = NULL;
        struct txfcb *fcb = NULL;
-       struct txbd8 *txbdp, *txbdp_start, *base;
+       struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
        u32 lstatus;
-       int i;
+       int i, rq = 0, do_tstamp = 0;
        u32 bufaddr;
        unsigned long flags;
-       unsigned int nr_frags, length;
+       unsigned int nr_frags, nr_txbds, length;
+
+       /*
+        * TOE=1 frames larger than 2500 bytes may see excess delays
+        * before start of transmission.
+        */
+       if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
+                       skb->ip_summed == CHECKSUM_PARTIAL &&
+                       skb->len > 2500)) {
+               int ret;
+
+               ret = skb_checksum_help(skb);
+               if (ret)
+                       return ret;
+       }
 
-       base = priv->tx_bd_base;
+       rq = skb->queue_mapping;
+       tx_queue = priv->tx_queue[rq];
+       txq = netdev_get_tx_queue(dev, rq);
+       base = tx_queue->tx_bd_base;
+       regs = tx_queue->grp->regs;
+
+       /* check if time stamp should be generated */
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+                    priv->hwts_tx_en))
+               do_tstamp = 1;
 
        /* make space for additional header when fcb is needed */
        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
-                       (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+                       vlan_tx_tag_present(skb) ||
+                       unlikely(do_tstamp)) &&
                        (skb_headroom(skb) < GMAC_FCB_LEN)) {
                struct sk_buff *skb_new;
 
@@ -1298,29 +2092,43 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* total number of fragments in the SKB */
        nr_frags = skb_shinfo(skb)->nr_frags;
 
-       spin_lock_irqsave(&priv->txlock, flags);
+       /* calculate the required number of TxBDs for this skb */
+       if (unlikely(do_tstamp))
+               nr_txbds = nr_frags + 2;
+       else
+               nr_txbds = nr_frags + 1;
 
        /* check if there is space to queue this packet */
-       if ((nr_frags+1) > priv->num_txbdfree) {
+       if (nr_txbds > tx_queue->num_txbdfree) {
                /* no space, stop the queue */
-               netif_stop_queue(dev);
+               netif_tx_stop_queue(txq);
                dev->stats.tx_fifo_errors++;
-               spin_unlock_irqrestore(&priv->txlock, flags);
                return NETDEV_TX_BUSY;
        }
 
        /* Update transmit stats */
-       dev->stats.tx_bytes += skb->len;
+       tx_queue->stats.tx_bytes += skb->len;
+       tx_queue->stats.tx_packets++;
 
-       txbdp = txbdp_start = priv->cur_tx;
+       txbdp = txbdp_start = tx_queue->cur_tx;
+       lstatus = txbdp->lstatus;
+
+       /* Time stamp insertion requires one additional TxBD */
+       if (unlikely(do_tstamp))
+               txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+                               tx_queue->tx_ring_size);
 
        if (nr_frags == 0) {
-               lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+               if (unlikely(do_tstamp))
+                       txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+                                       TXBD_INTERRUPT);
+               else
+                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                /* Place the fragment addresses and lengths into the TxBDs */
                for (i = 0; i < nr_frags; i++) {
                        /* Point at the next BD, wrapping as needed */
-                       txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
+                       txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
                        length = skb_shinfo(skb)->frags[i].size;
 
@@ -1352,7 +2160,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                gfar_tx_checksum(skb, fcb);
        }
 
-       if (priv->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                if (unlikely(NULL == fcb)) {
                        fcb = gfar_add_fcb(skb);
                        lstatus |= BD_LFLAG(TXBD_TOE);
@@ -1361,12 +2169,46 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                gfar_tx_vlan(skb, fcb);
        }
 
-       /* setup the TxBD length and buffer pointer for the first BD */
-       priv->tx_skbuff[priv->skb_curtx] = skb;
+       /* Setup tx hardware time stamping if requested */
+       if (unlikely(do_tstamp)) {
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               if (fcb == NULL)
+                       fcb = gfar_add_fcb(skb);
+               fcb->ptp = 1;
+               lstatus |= BD_LFLAG(TXBD_TOE);
+       }
+
        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
                        skb_headlen(skb), DMA_TO_DEVICE);
 
-       lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+       /*
+        * If time stamping is requested one additional TxBD must be set up. The
+        * first TxBD points to the FCB and must have a data length of
+        * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+        * the full frame length.
+        */
+       if (unlikely(do_tstamp)) {
+               txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+               txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+                               (skb_headlen(skb) - GMAC_FCB_LEN);
+               lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+       } else {
+               lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+       }
+
+       /*
+        * We can work in parallel with gfar_clean_tx_ring(), except
+        * when modifying num_txbdfree. Note that we didn't grab the lock
+        * when we were reading the num_txbdfree and checking for available
+        * space, that's because outside of this function it can only grow,
+        * and once we've got needed space, it cannot suddenly disappear.
+        *
+        * The lock also protects us from gfar_error(), which can modify
+        * regs->tstat and thus retrigger the transfers, which is why we
+        * also must grab the lock before setting ready bit for the first
+        * to be transmitted BD.
+        */
+       spin_lock_irqsave(&tx_queue->txlock, flags);
 
        /*
         * The powerpc-specific eieio() is used, as wmb() has too strong
@@ -1380,31 +2222,33 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txbdp_start->lstatus = lstatus;
 
+       eieio(); /* force lstatus write before tx_skbuff */
+
+       tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
        /* Update the current skb pointer to the next entry we will use
         * (wrapping if necessary) */
-       priv->skb_curtx = (priv->skb_curtx + 1) &
-               TX_RING_MOD_MASK(priv->tx_ring_size);
+       tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+               TX_RING_MOD_MASK(tx_queue->tx_ring_size);
 
-       priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
+       tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
        /* reduce TxBD free count */
-       priv->num_txbdfree -= (nr_frags + 1);
-
-       dev->trans_start = jiffies;
+       tx_queue->num_txbdfree -= (nr_txbds);
 
        /* If the next BD still needs to be cleaned up, then the bds
           are full.  We need to tell the kernel to stop sending us stuff. */
-       if (!priv->num_txbdfree) {
-               netif_stop_queue(dev);
+       if (!tx_queue->num_txbdfree) {
+               netif_tx_stop_queue(txq);
 
                dev->stats.tx_fifo_errors++;
        }
 
        /* Tell the DMA to go go go */
-       gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+       gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
 
        /* Unlock priv */
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       spin_unlock_irqrestore(&tx_queue->txlock, flags);
 
        return NETDEV_TX_OK;
 }
@@ -1414,9 +2258,8 @@ static int gfar_close(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
 
-       napi_disable(&priv->napi);
+       disable_napi(priv);
 
-       skb_queue_purge(&priv->rx_recycle);
        cancel_work_sync(&priv->reset_task);
        stop_gfar(dev);
 
@@ -1424,7 +2267,7 @@ static int gfar_close(struct net_device *dev)
        phy_disconnect(priv->phydev);
        priv->phydev = NULL;
 
-       netif_stop_queue(dev);
+       netif_tx_stop_all_queues(dev);
 
        return 0;
 }
@@ -1443,50 +2286,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
                struct vlan_group *grp)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = NULL;
        unsigned long flags;
        u32 tempval;
 
-       spin_lock_irqsave(&priv->rxlock, flags);
+       regs = priv->gfargrp[0].regs;
+       local_irq_save(flags);
+       lock_rx_qs(priv);
 
        priv->vlgrp = grp;
 
        if (grp) {
                /* Enable VLAN tag insertion */
-               tempval = gfar_read(&priv->regs->tctrl);
+               tempval = gfar_read(&regs->tctrl);
                tempval |= TCTRL_VLINS;
 
-               gfar_write(&priv->regs->tctrl, tempval);
+               gfar_write(&regs->tctrl, tempval);
 
                /* Enable VLAN tag extraction */
-               tempval = gfar_read(&priv->regs->rctrl);
+               tempval = gfar_read(&regs->rctrl);
                tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
-               gfar_write(&priv->regs->rctrl, tempval);
+               gfar_write(&regs->rctrl, tempval);
        } else {
                /* Disable VLAN tag insertion */
-               tempval = gfar_read(&priv->regs->tctrl);
+               tempval = gfar_read(&regs->tctrl);
                tempval &= ~TCTRL_VLINS;
-               gfar_write(&priv->regs->tctrl, tempval);
+               gfar_write(&regs->tctrl, tempval);
 
                /* Disable VLAN tag extraction */
-               tempval = gfar_read(&priv->regs->rctrl);
+               tempval = gfar_read(&regs->rctrl);
                tempval &= ~RCTRL_VLEX;
                /* If parse is no longer required, then disable parser */
                if (tempval & RCTRL_REQ_PARSER)
                        tempval |= RCTRL_PRSDEP_INIT;
                else
                        tempval &= ~RCTRL_PRSDEP_INIT;
-               gfar_write(&priv->regs->rctrl, tempval);
+               gfar_write(&regs->rctrl, tempval);
        }
 
        gfar_change_mtu(dev, dev->mtu);
 
-       spin_unlock_irqrestore(&priv->rxlock, flags);
+       unlock_rx_qs(priv);
+       local_irq_restore(flags);
 }
 
 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 {
        int tempsize, tempval;
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        int oldsize = priv->rx_buffer_size;
        int frame_size = new_mtu + ETH_HLEN;
 
@@ -1518,20 +2366,21 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 
        dev->mtu = new_mtu;
 
-       gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
-       gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+       gfar_write(&regs->mrblr, priv->rx_buffer_size);
+       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
 
        /* If the mtu is larger than the max size for standard
         * ethernet frames (ie, a jumbo frame), then set maccfg2
         * to allow huge frames, and to check the length */
-       tempval = gfar_read(&priv->regs->maccfg2);
+       tempval = gfar_read(&regs->maccfg2);
 
-       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
+       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+                       gfar_has_errata(priv, GFAR_ERRATA_74))
                tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
        else
                tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 
-       gfar_write(&priv->regs->maccfg2, tempval);
+       gfar_write(&regs->maccfg2, tempval);
 
        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
                startup_gfar(dev);
@@ -1551,10 +2400,10 @@ static void gfar_reset_task(struct work_struct *work)
        struct net_device *dev = priv->ndev;
 
        if (dev->flags & IFF_UP) {
-               netif_stop_queue(dev);
+               netif_tx_stop_all_queues(dev);
                stop_gfar(dev);
                startup_gfar(dev);
-               netif_start_queue(dev);
+               netif_tx_start_all_queues(dev);
        }
 
        netif_tx_schedule_all(dev);
@@ -1568,27 +2417,52 @@ static void gfar_timeout(struct net_device *dev)
        schedule_work(&priv->reset_task);
 }
 
+static void gfar_align_skb(struct sk_buff *skb)
+{
+       /* We need the data buffer to be aligned properly.  We will reserve
+        * as many bytes as needed to align the data properly
+        */
+       skb_reserve(skb, RXBUF_ALIGNMENT -
+               (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+}
+
 /* Interrupt Handler for Transmit complete */
-static int gfar_clean_tx_ring(struct net_device *dev)
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
+       struct net_device *dev = tx_queue->dev;
        struct gfar_private *priv = netdev_priv(dev);
-       struct txbd8 *bdp;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       struct txbd8 *bdp, *next = NULL;
        struct txbd8 *lbdp = NULL;
-       struct txbd8 *base = priv->tx_bd_base;
+       struct txbd8 *base = tx_queue->tx_bd_base;
        struct sk_buff *skb;
        int skb_dirtytx;
-       int tx_ring_size = priv->tx_ring_size;
-       int frags = 0;
+       int tx_ring_size = tx_queue->tx_ring_size;
+       int frags = 0, nr_txbds = 0;
        int i;
        int howmany = 0;
        u32 lstatus;
+       size_t buflen;
+
+       rx_queue = priv->rx_queue[tx_queue->qindex];
+       bdp = tx_queue->dirty_tx;
+       skb_dirtytx = tx_queue->skb_dirtytx;
 
-       bdp = priv->dirty_tx;
-       skb_dirtytx = priv->skb_dirtytx;
+       while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+               unsigned long flags;
 
-       while ((skb = priv->tx_skbuff[skb_dirtytx])) {
                frags = skb_shinfo(skb)->nr_frags;
-               lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+               /*
+                * When time stamping, one additional TxBD must be freed.
+                * Also, we need to dma_unmap_single() the TxPAL.
+                */
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+                       nr_txbds = frags + 2;
+               else
+                       nr_txbds = frags + 1;
+
+               lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
 
                lstatus = lbdp->lstatus;
 
@@ -1597,10 +2471,24 @@ static int gfar_clean_tx_ring(struct net_device *dev)
                                (lstatus & BD_LENGTH_MASK))
                        break;
 
-               dma_unmap_single(&priv->ofdev->dev,
-                               bdp->bufPtr,
-                               bdp->length,
-                               DMA_TO_DEVICE);
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+                       next = next_txbd(bdp, base, tx_ring_size);
+                       buflen = next->length + GMAC_FCB_LEN;
+               } else
+                       buflen = bdp->length;
+
+               dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+                               buflen, DMA_TO_DEVICE);
+
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+                       struct skb_shared_hwtstamps shhwtstamps;
+                       u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+                       shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+                       skb_tstamp_tx(skb, &shhwtstamps);
+                       bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+                       bdp = next;
+               }
 
                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
                bdp = next_txbd(bdp, base, tx_ring_size);
@@ -1618,106 +2506,96 @@ static int gfar_clean_tx_ring(struct net_device *dev)
                 * If there's room in the queue (limit it to rx_buffer_size)
                 * we add this skb back into the pool, if it's the right size
                 */
-               if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
+               if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
                                skb_recycle_check(skb, priv->rx_buffer_size +
-                                       RXBUF_ALIGNMENT))
-                       __skb_queue_head(&priv->rx_recycle, skb);
-               else
+                                       RXBUF_ALIGNMENT)) {
+                       gfar_align_skb(skb);
+                       skb_queue_head(&priv->rx_recycle, skb);
+               } else
                        dev_kfree_skb_any(skb);
 
-               priv->tx_skbuff[skb_dirtytx] = NULL;
+               tx_queue->tx_skbuff[skb_dirtytx] = NULL;
 
                skb_dirtytx = (skb_dirtytx + 1) &
                        TX_RING_MOD_MASK(tx_ring_size);
 
                howmany++;
-               priv->num_txbdfree += frags + 1;
+               spin_lock_irqsave(&tx_queue->txlock, flags);
+               tx_queue->num_txbdfree += nr_txbds;
+               spin_unlock_irqrestore(&tx_queue->txlock, flags);
        }
 
        /* If we freed a buffer, we can restart transmission, if necessary */
-       if (netif_queue_stopped(dev) && priv->num_txbdfree)
-               netif_wake_queue(dev);
+       if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
+               netif_wake_subqueue(dev, tx_queue->qindex);
 
        /* Update dirty indicators */
-       priv->skb_dirtytx = skb_dirtytx;
-       priv->dirty_tx = bdp;
-
-       dev->stats.tx_packets += howmany;
+       tx_queue->skb_dirtytx = skb_dirtytx;
+       tx_queue->dirty_tx = bdp;
 
        return howmany;
 }
 
-static void gfar_schedule_cleanup(struct net_device *dev)
+static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
 {
-       struct gfar_private *priv = netdev_priv(dev);
        unsigned long flags;
 
-       spin_lock_irqsave(&priv->txlock, flags);
-       spin_lock(&priv->rxlock);
-
-       if (napi_schedule_prep(&priv->napi)) {
-               gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
-               __napi_schedule(&priv->napi);
+       spin_lock_irqsave(&gfargrp->grplock, flags);
+       if (napi_schedule_prep(&gfargrp->napi)) {
+               gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+               __napi_schedule(&gfargrp->napi);
        } else {
                /*
                 * Clear IEVENT, so interrupts aren't called again
                 * because of the packets that have already arrived.
                 */
-               gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+               gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
        }
+       spin_unlock_irqrestore(&gfargrp->grplock, flags);
 
-       spin_unlock(&priv->rxlock);
-       spin_unlock_irqrestore(&priv->txlock, flags);
 }
 
 /* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
 {
-       gfar_schedule_cleanup((struct net_device *)dev_id);
+       gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
        return IRQ_HANDLED;
 }
 
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
                struct sk_buff *skb)
 {
+       struct net_device *dev = rx_queue->dev;
        struct gfar_private *priv = netdev_priv(dev);
-       u32 lstatus;
+       dma_addr_t buf;
 
-       bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
-                       priv->rx_buffer_size, DMA_FROM_DEVICE);
+       buf = dma_map_single(&priv->ofdev->dev, skb->data,
+                            priv->rx_buffer_size, DMA_FROM_DEVICE);
+       gfar_init_rxbdp(rx_queue, bdp, buf);
+}
 
-       lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       struct sk_buff *skb = NULL;
 
-       if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
-               lstatus |= BD_LFLAG(RXBD_WRAP);
+       skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+       if (!skb)
+               return NULL;
 
-       eieio();
+       gfar_align_skb(skb);
 
-       bdp->lstatus = lstatus;
+       return skb;
 }
 
-
 struct sk_buff * gfar_new_skb(struct net_device *dev)
 {
-       unsigned int alignamount;
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb = NULL;
 
-       skb = __skb_dequeue(&priv->rx_recycle);
-       if (!skb)
-               skb = netdev_alloc_skb(dev,
-                               priv->rx_buffer_size + RXBUF_ALIGNMENT);
-
+       skb = skb_dequeue(&priv->rx_recycle);
        if (!skb)
-               return NULL;
-
-       alignamount = RXBUF_ALIGNMENT -
-               (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
-
-       /* We need the data buffer to be aligned properly.  We will reserve
-        * as many bytes as needed to align the data properly
-        */
-       skb_reserve(skb, alignamount);
+               skb = gfar_alloc_skb(dev);
 
        return skb;
 }
@@ -1760,9 +2638,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
        }
 }
 
-irqreturn_t gfar_receive(int irq, void *dev_id)
+irqreturn_t gfar_receive(int irq, void *grp_id)
 {
-       gfar_schedule_cleanup((struct net_device *)dev_id);
+       gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
        return IRQ_HANDLED;
 }
 
@@ -1774,7 +2652,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
        if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
-               skb->ip_summed = CHECKSUM_NONE;
+               skb_checksum_none_assert(skb);
 }
 
 
@@ -1793,8 +2671,21 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 
        /* Remove the FCB from the skb */
        /* Remove the padded bytes, if there are any */
-       if (amount_pull)
+       if (amount_pull) {
+               skb_record_rx_queue(skb, fcb->rq);
                skb_pull(skb, amount_pull);
+       }
+
+       /* Get receive timestamp from the skb */
+       if (priv->hwts_rx_en) {
+               struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+               u64 *ns = (u64 *) skb->data;
+               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+               shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+       }
+
+       if (priv->padding)
+               skb_pull(skb, priv->padding);
 
        if (priv->rx_csum_enable)
                gfar_rx_checksum(skb, fcb);
@@ -1818,8 +2709,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  *   until the budget/quota has been reached. Returns the number
  *   of frames handled
  */
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
+       struct net_device *dev = rx_queue->dev;
        struct rxbd8 *bdp, *base;
        struct sk_buff *skb;
        int pkt_len;
@@ -1828,11 +2720,10 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
        struct gfar_private *priv = netdev_priv(dev);
 
        /* Get the first full descriptor */
-       bdp = priv->cur_rx;
-       base = priv->rx_bd_base;
+       bdp = rx_queue->cur_rx;
+       base = rx_queue->rx_bd_base;
 
-       amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
-               priv->padding;
+       amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
 
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
                struct sk_buff *newskb;
@@ -1841,11 +2732,15 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
                /* Add another skb for the future */
                newskb = gfar_new_skb(dev);
 
-               skb = priv->rx_skbuff[priv->skb_currx];
+               skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
                                priv->rx_buffer_size, DMA_FROM_DEVICE);
 
+               if (unlikely(!(bdp->status & RXBD_ERR) &&
+                               bdp->length > priv->rx_buffer_size))
+                       bdp->status = RXBD_LARGE;
+
                /* We drop the frame if we failed to allocate a new buffer */
                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
                                 bdp->status & RXBD_ERR)) {
@@ -1853,82 +2748,95 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 
                        if (unlikely(!newskb))
                                newskb = skb;
-                       else if (skb) {
-                               /*
-                                * We need to reset ->data to what it
-                                * was before gfar_new_skb() re-aligned
-                                * it to an RXBUF_ALIGNMENT boundary
-                                * before we put the skb back on the
-                                * recycle list.
-                                */
-                               skb->data = skb->head + NET_SKB_PAD;
-                               __skb_queue_head(&priv->rx_recycle, skb);
-                       }
+                       else if (skb)
+                               skb_queue_head(&priv->rx_recycle, skb);
                } else {
                        /* Increment the number of packets */
-                       dev->stats.rx_packets++;
+                       rx_queue->stats.rx_packets++;
                        howmany++;
 
                        if (likely(skb)) {
                                pkt_len = bdp->length - ETH_FCS_LEN;
                                /* Remove the FCS from the packet length */
                                skb_put(skb, pkt_len);
-                               dev->stats.rx_bytes += pkt_len;
-
-                               if (in_irq() || irqs_disabled())
-                                       printk("Interrupt problem!\n");
+                               rx_queue->stats.rx_bytes += pkt_len;
+                               skb_record_rx_queue(skb, rx_queue->qindex);
                                gfar_process_frame(dev, skb, amount_pull);
 
                        } else {
                                if (netif_msg_rx_err(priv))
                                        printk(KERN_WARNING
                                               "%s: Missing skb!\n", dev->name);
-                               dev->stats.rx_dropped++;
+                               rx_queue->stats.rx_dropped++;
                                priv->extra_stats.rx_skbmissing++;
                        }
 
                }
 
-               priv->rx_skbuff[priv->skb_currx] = newskb;
+               rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
 
                /* Setup the new bdp */
-               gfar_new_rxbdp(dev, bdp, newskb);
+               gfar_new_rxbdp(rx_queue, bdp, newskb);
 
                /* Update to the next pointer */
-               bdp = next_bd(bdp, base, priv->rx_ring_size);
+               bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
                /* update to point at the next skb */
-               priv->skb_currx =
-                   (priv->skb_currx + 1) &
-                   RX_RING_MOD_MASK(priv->rx_ring_size);
+               rx_queue->skb_currx =
+                   (rx_queue->skb_currx + 1) &
+                   RX_RING_MOD_MASK(rx_queue->rx_ring_size);
        }
 
        /* Update the current rxbd pointer to be the next one */
-       priv->cur_rx = bdp;
+       rx_queue->cur_rx = bdp;
 
        return howmany;
 }
 
 static int gfar_poll(struct napi_struct *napi, int budget)
 {
-       struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
-       struct net_device *dev = priv->ndev;
-       int tx_cleaned = 0;
-       int rx_cleaned = 0;
-       unsigned long flags;
+       struct gfar_priv_grp *gfargrp = container_of(napi,
+                       struct gfar_priv_grp, napi);
+       struct gfar_private *priv = gfargrp->priv;
+       struct gfar __iomem *regs = gfargrp->regs;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       struct gfar_priv_rx_q *rx_queue = NULL;
+       int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
+       int tx_cleaned = 0, i, left_over_budget = budget;
+       unsigned long serviced_queues = 0;
+       int num_queues = 0;
+
+       num_queues = gfargrp->num_rx_queues;
+       budget_per_queue = budget/num_queues;
 
        /* Clear IEVENT, so interrupts aren't called again
         * because of the packets that have already arrived */
-       gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
-
-       /* If we fail to get the lock, don't bother with the TX BDs */
-       if (spin_trylock_irqsave(&priv->txlock, flags)) {
-               tx_cleaned = gfar_clean_tx_ring(dev);
-               spin_unlock_irqrestore(&priv->txlock, flags);
+       gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+
+       while (num_queues && left_over_budget) {
+
+               budget_per_queue = left_over_budget/num_queues;
+               left_over_budget = 0;
+
+               for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+                       if (test_bit(i, &serviced_queues))
+                               continue;
+                       rx_queue = priv->rx_queue[i];
+                       tx_queue = priv->tx_queue[rx_queue->qindex];
+
+                       tx_cleaned += gfar_clean_tx_ring(tx_queue);
+                       rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
+                                                       budget_per_queue);
+                       rx_cleaned += rx_cleaned_per_queue;
+                       if(rx_cleaned_per_queue < budget_per_queue) {
+                               left_over_budget = left_over_budget +
+                                       (budget_per_queue - rx_cleaned_per_queue);
+                               set_bit(i, &serviced_queues);
+                               num_queues--;
+                       }
+               }
        }
 
-       rx_cleaned = gfar_clean_rx_ring(dev, budget);
-
        if (tx_cleaned)
                return budget;
 
@@ -1936,20 +2844,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
 
                /* Clear the halt bit in RSTAT */
-               gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+               gfar_write(&regs->rstat, gfargrp->rstat);
 
-               gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+               gfar_write(&regs->imask, IMASK_DEFAULT);
 
                /* If we are coalescing interrupts, update the timer */
                /* Otherwise, clear it */
-               if (likely(priv->rxcoalescing)) {
-                       gfar_write(&priv->regs->rxic, 0);
-                       gfar_write(&priv->regs->rxic, priv->rxic);
-               }
-               if (likely(priv->txcoalescing)) {
-                       gfar_write(&priv->regs->txic, 0);
-                       gfar_write(&priv->regs->txic, priv->txic);
-               }
+               gfar_configure_coalescing(priv,
+                               gfargrp->rx_bit_map, gfargrp->tx_bit_map);
        }
 
        return rx_cleaned;
@@ -1964,44 +2866,50 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 static void gfar_netpoll(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       int i = 0;
 
        /* If the device has multiple interrupts, run tx/rx */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               disable_irq(priv->interruptTransmit);
-               disable_irq(priv->interruptReceive);
-               disable_irq(priv->interruptError);
-               gfar_interrupt(priv->interruptTransmit, dev);
-               enable_irq(priv->interruptError);
-               enable_irq(priv->interruptReceive);
-               enable_irq(priv->interruptTransmit);
+               for (i = 0; i < priv->num_grps; i++) {
+                       disable_irq(priv->gfargrp[i].interruptTransmit);
+                       disable_irq(priv->gfargrp[i].interruptReceive);
+                       disable_irq(priv->gfargrp[i].interruptError);
+                       gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+                                               &priv->gfargrp[i]);
+                       enable_irq(priv->gfargrp[i].interruptError);
+                       enable_irq(priv->gfargrp[i].interruptReceive);
+                       enable_irq(priv->gfargrp[i].interruptTransmit);
+               }
        } else {
-               disable_irq(priv->interruptTransmit);
-               gfar_interrupt(priv->interruptTransmit, dev);
-               enable_irq(priv->interruptTransmit);
+               for (i = 0; i < priv->num_grps; i++) {
+                       disable_irq(priv->gfargrp[i].interruptTransmit);
+                       gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+                                               &priv->gfargrp[i]);
+                       enable_irq(priv->gfargrp[i].interruptTransmit);
+               }
        }
 }
 #endif
 
 /* The interrupt handler for devices with one interrupt */
-static irqreturn_t gfar_interrupt(int irq, void *dev_id)
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
 {
-       struct net_device *dev = dev_id;
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_grp *gfargrp = grp_id;
 
        /* Save ievent for future reference */
-       u32 events = gfar_read(&priv->regs->ievent);
+       u32 events = gfar_read(&gfargrp->regs->ievent);
 
        /* Check for reception */
        if (events & IEVENT_RX_MASK)
-               gfar_receive(irq, dev_id);
+               gfar_receive(irq, grp_id);
 
        /* Check for transmit completion */
        if (events & IEVENT_TX_MASK)
-               gfar_transmit(irq, dev_id);
+               gfar_transmit(irq, grp_id);
 
        /* Check for errors */
        if (events & IEVENT_ERR_MASK)
-               gfar_error(irq, dev_id);
+               gfar_error(irq, grp_id);
 
        return IRQ_HANDLED;
 }
@@ -2015,12 +2923,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
 static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        unsigned long flags;
        struct phy_device *phydev = priv->phydev;
        int new_state = 0;
 
-       spin_lock_irqsave(&priv->txlock, flags);
+       local_irq_save(flags);
+       lock_tx_qs(priv);
+
        if (phydev->link) {
                u32 tempval = gfar_read(&regs->maccfg2);
                u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2085,8 +2995,8 @@ static void adjust_link(struct net_device *dev)
 
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
-
-       spin_unlock_irqrestore(&priv->txlock, flags);
+       unlock_tx_qs(priv);
+       local_irq_restore(flags);
 }
 
 /* Update the hash table based on the current list of multicast
@@ -2095,12 +3005,12 @@ static void adjust_link(struct net_device *dev)
  * whenever dev->flags is changed */
 static void gfar_set_multi(struct net_device *dev)
 {
-       struct dev_mc_list *mc_ptr;
+       struct netdev_hw_addr *ha;
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->regs;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
 
-       if(dev->flags & IFF_PROMISC) {
+       if (dev->flags & IFF_PROMISC) {
                /* Set RCTRL to PROM */
                tempval = gfar_read(&regs->rctrl);
                tempval |= RCTRL_PROM;
@@ -2112,7 +3022,7 @@ static void gfar_set_multi(struct net_device *dev)
                gfar_write(&regs->rctrl, tempval);
        }
 
-       if(dev->flags & IFF_ALLMULTI) {
+       if (dev->flags & IFF_ALLMULTI) {
                /* Set the hash to rx all multicast frames */
                gfar_write(&regs->igaddr0, 0xffffffff);
                gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2164,21 +3074,18 @@ static void gfar_set_multi(struct net_device *dev)
                        em_num = 0;
                }
 
-               if(dev->mc_count == 0)
+               if (netdev_mc_empty(dev))
                        return;
 
                /* Parse the list, and set the appropriate bits */
-               for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+               netdev_for_each_mc_addr(ha, dev) {
                        if (idx < em_num) {
-                               gfar_set_mac_for_addr(dev, idx,
-                                               mc_ptr->dmi_addr);
+                               gfar_set_mac_for_addr(dev, idx, ha->addr);
                                idx++;
                        } else
-                               gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+                               gfar_set_hash_for_addr(dev, ha->addr);
                }
        }
-
-       return;
 }
 
 
@@ -2187,10 +3094,10 @@ static void gfar_set_multi(struct net_device *dev)
 static void gfar_clear_exact_match(struct net_device *dev)
 {
        int idx;
-       u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
+       static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
 
        for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
-               gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
+               gfar_set_mac_for_addr(dev, idx, zero_arr);
 }
 
 /* Set the appropriate hash bit for the given addr */
@@ -2219,21 +3126,21 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
        tempval = gfar_read(priv->hash_regs[whichreg]);
        tempval |= value;
        gfar_write(priv->hash_regs[whichreg], tempval);
-
-       return;
 }
 
 
 /* There are multiple MAC Address register pairs on some controllers
  * This function sets the numth pair to a given address
  */
-static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+                                 const u8 *addr)
 {
        struct gfar_private *priv = netdev_priv(dev);
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        int idx;
        char tmpbuf[MAC_ADDR_LEN];
        u32 tempval;
-       u32 __iomem *macptr = &priv->regs->macstnaddr1;
+       u32 __iomem *macptr = &regs->macstnaddr1;
 
        macptr += num*2;
 
@@ -2250,16 +3157,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 }
 
 /* GFAR error interrupt handler */
-static irqreturn_t gfar_error(int irq, void *dev_id)
+static irqreturn_t gfar_error(int irq, void *grp_id)
 {
-       struct net_device *dev = dev_id;
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_grp *gfargrp = grp_id;
+       struct gfar __iomem *regs = gfargrp->regs;
+       struct gfar_private *priv= gfargrp->priv;
+       struct net_device *dev = priv->ndev;
 
        /* Save ievent for future reference */
-       u32 events = gfar_read(&priv->regs->ievent);
+       u32 events = gfar_read(&regs->ievent);
 
        /* Clear IEVENT */
-       gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
+       gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
 
        /* Magic Packet is not an error. */
        if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2269,7 +3178,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
        /* Hmm... */
        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
                printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
-                      dev->name, events, gfar_read(&priv->regs->imask));
+                      dev->name, events, gfar_read(&regs->imask));
 
        /* Update the error counters */
        if (events & IEVENT_TXE) {
@@ -2280,14 +3189,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
                if (events & IEVENT_CRL)
                        dev->stats.tx_aborted_errors++;
                if (events & IEVENT_XFUN) {
+                       unsigned long flags;
+
                        if (netif_msg_tx_err(priv))
                                printk(KERN_DEBUG "%s: TX FIFO underrun, "
                                       "packet dropped.\n", dev->name);
                        dev->stats.tx_dropped++;
                        priv->extra_stats.tx_underrun++;
 
+                       local_irq_save(flags);
+                       lock_tx_qs(priv);
+
                        /* Reactivate the Tx Queues */
-                       gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+                       gfar_write(&regs->tstat, gfargrp->tstat);
+
+                       unlock_tx_qs(priv);
+                       local_irq_restore(flags);
                }
                if (netif_msg_tx_err(priv))
                        printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2296,11 +3213,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
                dev->stats.rx_errors++;
                priv->extra_stats.rx_bsy++;
 
-               gfar_receive(irq, dev_id);
+               gfar_receive(irq, grp_id);
 
                if (netif_msg_rx_err(priv))
                        printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
-                              dev->name, gfar_read(&priv->regs->rstat));
+                              dev->name, gfar_read(&regs->rstat));
        }
        if (events & IEVENT_BABR) {
                dev->stats.rx_errors++;
@@ -2325,27 +3242,29 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:fsl-gianfar");
-
 static struct of_device_id gfar_match[] =
 {
        {
                .type = "network",
                .compatible = "gianfar",
        },
+       {
+               .compatible = "fsl,etsec2",
+       },
        {},
 };
+MODULE_DEVICE_TABLE(of, gfar_match);
 
 /* Structure for a device driver */
 static struct of_platform_driver gfar_driver = {
-       .name = "fsl-gianfar",
-       .match_table = gfar_match,
-
+       .driver = {
+               .name = "fsl-gianfar",
+               .owner = THIS_MODULE,
+               .pm = GFAR_PM_OPS,
+               .of_match_table = gfar_match,
+       },
        .probe = gfar_probe,
        .remove = gfar_remove,
-       .suspend = gfar_suspend,
-       .resume = gfar_resume,
 };
 
 static int __init gfar_init(void)