Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
David S. Miller [Mon, 9 Nov 2009 07:00:54 +0000 (23:00 -0800)]
Conflicts:
drivers/net/can/usb/ems_usb.c

12 files changed:
1  2 
drivers/net/benet/be.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_main.c
drivers/net/can/dev.c
drivers/net/can/usb/ems_usb.c
drivers/net/fsl_pq_mdio.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/qlge/qlge_main.c
drivers/net/qlge/qlge_mpi.c
drivers/net/wireless/ipw2x00/ipw2200.c
net/can/bcm.c
net/ipv4/ipip.c

diff --combined drivers/net/benet/be.h
  #define DRV_VER                       "2.101.205"
  #define DRV_NAME              "be2net"
  #define BE_NAME                       "ServerEngines BladeEngine2 10Gbps NIC"
 +#define BE3_NAME              "ServerEngines BladeEngine3 10Gbps NIC"
  #define OC_NAME                       "Emulex OneConnect 10Gbps NIC"
 +#define OC_NAME1              "Emulex OneConnect 10Gbps NIC (be3)"
  #define DRV_DESC              BE_NAME "Driver"
  
  #define BE_VENDOR_ID          0x19a2
  #define BE_DEVICE_ID1         0x211
 +#define BE_DEVICE_ID2         0x221
  #define OC_DEVICE_ID1         0x700
  #define OC_DEVICE_ID2         0x701
 +#define OC_DEVICE_ID3         0x710
  
  static inline char *nic_name(struct pci_dev *pdev)
  {
 -      if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
 +      switch (pdev->device) {
 +      case OC_DEVICE_ID1:
 +      case OC_DEVICE_ID2:
                return OC_NAME;
 -      else
 +      case OC_DEVICE_ID3:
 +              return OC_NAME1;
 +      case BE_DEVICE_ID2:
 +              return BE3_NAME;
 +      default:
                return BE_NAME;
 +      }
  }
  
  /* Number of bytes of an RX frame that are copied to skb->data */
@@@ -192,6 -181,7 +192,6 @@@ struct be_drvr_stats 
  
  struct be_stats_obj {
        struct be_drvr_stats drvr_stats;
 -      struct net_device_stats net_stats;
        struct be_dma_mem cmd;
  };
  
@@@ -269,6 -259,8 +269,8 @@@ struct be_adapter 
        u32 port_num;
        bool promiscuous;
        u32 cap;
+       u32 rx_fc;              /* Rx flow control */
+       u32 tx_fc;              /* Tx flow control */
  };
  
  extern const struct ethtool_ops be_ethtool_ops;
@@@ -234,7 -234,7 +234,7 @@@ be_get_ethtool_stats(struct net_device 
        struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
        struct be_port_rxf_stats *port_stats =
                        &rxf_stats->port[adapter->port_num];
 -      struct net_device_stats *net_stats = &adapter->stats.net_stats;
 +      struct net_device_stats *net_stats = &netdev->stats;
        struct be_erx_stats *erx_stats = &hw_stats->erx;
        void *p = NULL;
        int i;
@@@ -281,55 -281,16 +281,55 @@@ be_get_stat_strings(struct net_device *
        }
  }
  
 -static int be_get_stats_count(struct net_device *netdev)
 +static int be_get_sset_count(struct net_device *netdev, int stringset)
  {
 -      return ETHTOOL_STATS_NUM;
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              return ETHTOOL_STATS_NUM;
 +      default:
 +              return -EINVAL;
 +      }
  }
  
  static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
  {
 -      ecmd->speed = SPEED_10000;
 +      struct be_adapter *adapter = netdev_priv(netdev);
 +      u8 mac_speed = 0, connector = 0;
 +      u16 link_speed = 0;
 +      bool link_up = false;
 +
 +      be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed);
 +
 +      /* link_speed is in units of 10 Mbps */
 +      if (link_speed) {
 +              ecmd->speed = link_speed*10;
 +      } else {
 +              switch (mac_speed) {
 +              case PHY_LINK_SPEED_1GBPS:
 +                      ecmd->speed = SPEED_1000;
 +                      break;
 +              case PHY_LINK_SPEED_10GBPS:
 +                      ecmd->speed = SPEED_10000;
 +                      break;
 +              }
 +      }
        ecmd->duplex = DUPLEX_FULL;
        ecmd->autoneg = AUTONEG_DISABLE;
 +      ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
 +
 +      be_cmd_read_port_type(adapter, adapter->port_num, &connector);
 +      switch (connector) {
 +      case 7:
 +              ecmd->port = PORT_FIBRE;
 +              break;
 +      default:
 +              ecmd->port = PORT_TP;
 +              break;
 +      }
 +
 +      ecmd->phy_address = adapter->port_num;
 +      ecmd->transceiver = XCVR_INTERNAL;
 +
        return 0;
  }
  
@@@ -362,45 -323,18 +362,47 @@@ be_set_pauseparam(struct net_device *ne
  
        if (ecmd->autoneg != 0)
                return -EINVAL;
+       adapter->tx_fc = ecmd->tx_pause;
+       adapter->rx_fc = ecmd->rx_pause;
  
-       status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
-                       ecmd->rx_pause);
-       if (!status)
+       status = be_cmd_set_flow_control(adapter,
+                                       adapter->tx_fc, adapter->rx_fc);
+       if (status)
                dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
  
        return status;
  }
  
  static int
 +be_phys_id(struct net_device *netdev, u32 data)
 +{
 +      struct be_adapter *adapter = netdev_priv(netdev);
 +      int status;
 +      u32 cur;
 +
 +      if (!netif_running(netdev))
 +              return 0;
 +
 +      be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
 +
 +      if (cur == BEACON_STATE_ENABLED)
 +              return 0;
 +
 +      if (data < 2)
 +              data = 2;
 +
 +      status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
 +                      BEACON_STATE_ENABLED);
 +      set_current_state(TASK_INTERRUPTIBLE);
 +      schedule_timeout(data*HZ);
 +
 +      status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
 +                      BEACON_STATE_DISABLED);
 +
 +      return status;
 +}
 +
 +static int
  be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
@@@ -432,8 -366,7 +434,8 @@@ const struct ethtool_ops be_ethtool_op
        .get_tso = ethtool_op_get_tso,
        .set_tso = ethtool_op_set_tso,
        .get_strings = be_get_stat_strings,
 -      .get_stats_count = be_get_stats_count,
 +      .phys_id = be_phys_id,
 +      .get_sset_count = be_get_sset_count,
        .get_ethtool_stats = be_get_ethtool_stats,
        .flash_device = be_do_flash,
  };
@@@ -31,10 -31,8 +31,10 @@@ MODULE_PARM_DESC(rx_frag_size, "Size o
  
  static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
 +      { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
 +      { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
        { 0 }
  };
  MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@@ -143,7 -141,7 +143,7 @@@ void netdev_stats_update(struct be_adap
        struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
        struct be_port_rxf_stats *port_stats =
                        &rxf_stats->port[adapter->port_num];
 -      struct net_device_stats *dev_stats = &adapter->stats.net_stats;
 +      struct net_device_stats *dev_stats = &adapter->netdev->stats;
        struct be_erx_stats *erx_stats = &hw_stats->erx;
  
        dev_stats->rx_packets = port_stats->rx_total_frames;
@@@ -271,7 -269,9 +271,7 @@@ static void be_rx_eqd_update(struct be_
  
  static struct net_device_stats *be_get_stats(struct net_device *dev)
  {
 -      struct be_adapter *adapter = netdev_priv(dev);
 -
 -      return &adapter->stats.net_stats;
 +      return &dev->stats;
  }
  
  static u32 be_calc_rate(u64 bytes, unsigned long ticks)
@@@ -758,7 -758,7 +758,7 @@@ static void be_rx_compl_process(struct 
        if ((adapter->cap == 0x400) && !vtm)
                vlanf = 0;
  
 -      skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
 +      skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
        if (!skb) {
                if (net_ratelimit())
                        dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
                return;
        }
  
 -      skb_reserve(skb, NET_IP_ALIGN);
 -
        skb_fill_rx_data(adapter, skb, rxcp);
  
        if (do_pkt_csum(rxcp, adapter->rx_csum))
@@@ -1588,8 -1590,6 +1588,8 @@@ static int be_open(struct net_device *n
        struct be_eq_obj *tx_eq = &adapter->tx_eq;
        bool link_up;
        int status;
 +      u8 mac_speed;
 +      u16 link_speed;
  
        /* First time posting */
        be_post_rx_frags(adapter);
        /* Rx compl queue may be in unarmed state; rearm it */
        be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
  
 -      status = be_cmd_link_status_query(adapter, &link_up);
 +      status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
 +                      &link_speed);
        if (status)
-               return status;
+               goto ret_sts;
        be_link_status_update(adapter, link_up);
  
+       status = be_vid_config(adapter);
+       if (status)
+               goto ret_sts;
+       status = be_cmd_set_flow_control(adapter,
+                                       adapter->tx_fc, adapter->rx_fc);
+       if (status)
+               goto ret_sts;
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-       return 0;
+ ret_sts:
+       return status;
  }
  
  static int be_setup(struct be_adapter *adapter)
        if (status != 0)
                goto rx_qs_destroy;
  
-       status = be_vid_config(adapter);
-       if (status != 0)
-               goto mccqs_destroy;
-       status = be_cmd_set_flow_control(adapter, true, true);
-       if (status != 0)
-               goto mccqs_destroy;
        return 0;
  
- mccqs_destroy:
-       be_mcc_queues_destroy(adapter);
  rx_qs_destroy:
        be_rx_queues_destroy(adapter);
  tx_qs_destroy:
@@@ -1910,6 -1910,10 +1911,10 @@@ static void be_netdev_init(struct net_d
  
        adapter->rx_csum = true;
  
+       /* Default settings for Rx and Tx flow control */
+       adapter->rx_fc = true;
+       adapter->tx_fc = true;
        netif_set_gso_max_size(netdev, 65535);
  
        BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
@@@ -2172,6 -2176,7 +2177,7 @@@ static int be_suspend(struct pci_dev *p
                be_close(netdev);
                rtnl_unlock();
        }
+       be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
        be_clear(adapter);
  
        pci_save_state(pdev);
diff --combined drivers/net/can/dev.c
@@@ -245,7 -245,7 +245,7 @@@ static void can_flush_echo_skb(struct n
        struct net_device_stats *stats = &dev->stats;
        int i;
  
 -      for (i = 0; i < CAN_ECHO_SKB_MAX; i++) {
 +      for (i = 0; i < priv->echo_skb_max; i++) {
                if (priv->echo_skb[i]) {
                        kfree_skb(priv->echo_skb[i]);
                        priv->echo_skb[i] = NULL;
   * of the device driver. The driver must protect access to
   * priv->echo_skb, if necessary.
   */
 -void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx)
 +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
 +                    unsigned int idx)
  {
        struct can_priv *priv = netdev_priv(dev);
  
 +      BUG_ON(idx >= priv->echo_skb_max);
 +
        /* check flag whether this packet has to be looped back */
        if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
                kfree_skb(skb);
@@@ -314,12 -311,10 +314,12 @@@ EXPORT_SYMBOL_GPL(can_put_echo_skb)
   * is handled in the device driver. The driver must protect
   * access to priv->echo_skb, if necessary.
   */
 -void can_get_echo_skb(struct net_device *dev, int idx)
 +void can_get_echo_skb(struct net_device *dev, unsigned int idx)
  {
        struct can_priv *priv = netdev_priv(dev);
  
 +      BUG_ON(idx >= priv->echo_skb_max);
 +
        if (priv->echo_skb[idx]) {
                netif_rx(priv->echo_skb[idx]);
                priv->echo_skb[idx] = NULL;
@@@ -332,12 -327,10 +332,12 @@@ EXPORT_SYMBOL_GPL(can_get_echo_skb)
    *
    * The function is typically called when TX failed.
    */
 -void can_free_echo_skb(struct net_device *dev, int idx)
 +void can_free_echo_skb(struct net_device *dev, unsigned int idx)
  {
        struct can_priv *priv = netdev_priv(dev);
  
 +      BUG_ON(idx >= priv->echo_skb_max);
 +
        if (priv->echo_skb[idx]) {
                kfree_skb(priv->echo_skb[idx]);
                priv->echo_skb[idx] = NULL;
@@@ -366,12 -359,17 +366,12 @@@ void can_restart(unsigned long data
        can_flush_echo_skb(dev);
  
        /* send restart message upstream */
 -      skb = dev_alloc_skb(sizeof(struct can_frame));
 +      skb = alloc_can_err_skb(dev, &cf);
        if (skb == NULL) {
                err = -ENOMEM;
                goto restart;
        }
 -      skb->dev = dev;
 -      skb->protocol = htons(ETH_P_CAN);
 -      cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
 -      memset(cf, 0, sizeof(struct can_frame));
 -      cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
 -      cf->can_dlc = CAN_ERR_DLC;
 +      cf->can_id |= CAN_ERR_RESTARTED;
  
        netif_rx(skb);
  
@@@ -444,66 -442,20 +444,66 @@@ static void can_setup(struct net_devic
        dev->features = NETIF_F_NO_CSUM;
  }
  
 +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
 +{
 +      struct sk_buff *skb;
 +
 +      skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
 +      if (unlikely(!skb))
 +              return NULL;
 +
 +      skb->protocol = htons(ETH_P_CAN);
 +      skb->pkt_type = PACKET_BROADCAST;
 +      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
 +      memset(*cf, 0, sizeof(struct can_frame));
 +
 +      return skb;
 +}
 +EXPORT_SYMBOL_GPL(alloc_can_skb);
 +
 +struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
 +{
 +      struct sk_buff *skb;
 +
 +      skb = alloc_can_skb(dev, cf);
 +      if (unlikely(!skb))
 +              return NULL;
 +
 +      (*cf)->can_id = CAN_ERR_FLAG;
 +      (*cf)->can_dlc = CAN_ERR_DLC;
 +
 +      return skb;
 +}
 +EXPORT_SYMBOL_GPL(alloc_can_err_skb);
 +
  /*
   * Allocate and setup space for the CAN network device
   */
 -struct net_device *alloc_candev(int sizeof_priv)
 +struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
  {
        struct net_device *dev;
        struct can_priv *priv;
 +      int size;
  
 -      dev = alloc_netdev(sizeof_priv, "can%d", can_setup);
 +      if (echo_skb_max)
 +              size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) +
 +                      echo_skb_max * sizeof(struct sk_buff *);
 +      else
 +              size = sizeof_priv;
 +
 +      dev = alloc_netdev(size, "can%d", can_setup);
        if (!dev)
                return NULL;
  
        priv = netdev_priv(dev);
  
 +      if (echo_skb_max) {
 +              priv->echo_skb_max = echo_skb_max;
 +              priv->echo_skb = (void *)priv +
 +                      ALIGN(sizeof_priv, sizeof(struct sk_buff *));
 +      }
 +
        priv->state = CAN_STATE_STOPPED;
  
        init_timer(&priv->restart_timer);
@@@ -637,6 -589,22 +637,22 @@@ static int can_changelink(struct net_de
        return 0;
  }
  
+ static size_t can_get_size(const struct net_device *dev)
+ {
+       struct can_priv *priv = netdev_priv(dev);
+       size_t size;
+       size = nla_total_size(sizeof(u32));   /* IFLA_CAN_STATE */
+       size += sizeof(struct can_ctrlmode);  /* IFLA_CAN_CTRLMODE */
+       size += nla_total_size(sizeof(u32));  /* IFLA_CAN_RESTART_MS */
+       size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
+       size += sizeof(struct can_clock);     /* IFLA_CAN_CLOCK */
+       if (priv->bittiming_const)            /* IFLA_CAN_BITTIMING_CONST */
+               size += sizeof(struct can_bittiming_const);
+       return size;
+ }
  static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
  {
        struct can_priv *priv = netdev_priv(dev);
@@@ -674,7 -642,7 +690,7 @@@ nla_put_failure
        return -EMSGSIZE;
  }
  
 -static int can_newlink(struct net_device *dev,
 +static int can_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[])
  {
        return -EOPNOTSUPP;
@@@ -687,6 -655,7 +703,7 @@@ static struct rtnl_link_ops can_link_op
        .setup          = can_setup,
        .newlink        = can_newlink,
        .changelink     = can_changelink,
+       .get_size       = can_get_size,
        .fill_info      = can_fill_info,
        .fill_xstats    = can_fill_xstats,
  };
@@@ -232,7 -232,7 +232,7 @@@ MODULE_DEVICE_TABLE(usb, ems_usb_table)
  #define INTR_IN_BUFFER_SIZE 4
  
  #define MAX_RX_URBS 10
 -#define MAX_TX_URBS CAN_ECHO_SKB_MAX
 +#define MAX_TX_URBS 10
  
  struct ems_usb;
  
@@@ -311,11 -311,15 +311,11 @@@ static void ems_usb_rx_can_msg(struct e
        int i;
        struct net_device_stats *stats = &dev->netdev->stats;
  
 -      skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
 +      skb = alloc_can_skb(dev->netdev, &cf);
        if (skb == NULL)
                return;
  
-       cf->can_id = msg->msg.can_msg.id;
 -      skb->protocol = htons(ETH_P_CAN);
 -
 -      cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
 -
+       cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
        cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
  
        if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME
@@@ -342,10 -346,18 +342,10 @@@ static void ems_usb_rx_err(struct ems_u
        struct sk_buff *skb;
        struct net_device_stats *stats = &dev->netdev->stats;
  
 -      skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
 +      skb = alloc_can_err_skb(dev->netdev, &cf);
        if (skb == NULL)
                return;
  
 -      skb->protocol = htons(ETH_P_CAN);
 -
 -      cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
 -      memset(cf, 0, sizeof(struct can_frame));
 -
 -      cf->can_id = CAN_ERR_FLAG;
 -      cf->can_dlc = CAN_ERR_DLC;
 -
        if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
                u8 state = msg->msg.can_state;
  
@@@ -801,6 -813,9 +801,9 @@@ static netdev_tx_t ems_usb_start_xmit(s
                msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
        }
  
+       /* Respect byte order */
+       msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id);
        for (i = 0; i < MAX_TX_URBS; i++) {
                if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
                        context = &dev->tx_contexts[i];
@@@ -1000,7 -1015,7 +1003,7 @@@ static int ems_usb_probe(struct usb_int
        struct ems_usb *dev;
        int i, err = -ENOMEM;
  
 -      netdev = alloc_candev(sizeof(struct ems_usb));
 +      netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
        if (!netdev) {
                dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
                return -ENOMEM;
@@@ -3,9 -3,8 +3,9 @@@
   * Provides Bus interface for MIIM regs
   *
   * Author: Andy Fleming <afleming@freescale.com>
 + * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
   *
 - * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
 + * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
   *
   * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
   *
@@@ -103,18 -102,13 +103,18 @@@ int fsl_pq_local_mdio_read(struct fsl_p
        return value;
  }
  
 +static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
 +{
 +      return (void __iomem __force *)bus->priv;
 +}
 +
  /*
   * Write value to the PHY at mii_id at register regnum,
   * on the bus, waiting until the write is done before returning.
   */
  int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
  {
 -      struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
 +      struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
  
        /* Write to the local MII regs */
        return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
   */
  int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  {
 -      struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
 +      struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
  
        /* Read the local MII regs */
        return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
  /* Reset the MIIM registers, and wait for the bus to free */
  static int fsl_pq_mdio_reset(struct mii_bus *bus)
  {
 -      struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
 +      struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
        int timeout = PHY_INIT_TIMEOUT;
  
        mutex_lock(&bus->mdio_lock);
@@@ -195,29 -189,19 +195,29 @@@ static int fsl_pq_mdio_find_free(struc
  
  
  #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 -static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
 +static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
  {
        struct gfar __iomem *enet_regs;
 +      u32 __iomem *ioremap_tbipa;
 +      u64 addr, size;
  
        /*
         * This is mildly evil, but so is our hardware for doing this.
         * Also, we have to cast back to struct gfar because of
         * definition weirdness done in gianfar.h.
         */
 -      enet_regs = (struct gfar __iomem *)
 -              ((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs));
 -
 -      return &enet_regs->tbipa;
 +      if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
 +              of_device_is_compatible(np, "fsl,gianfar-tbi") ||
 +              of_device_is_compatible(np, "gianfar")) {
 +              enet_regs = (struct gfar __iomem *)regs;
 +              return &enet_regs->tbipa;
 +      } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 +                      of_device_is_compatible(np, "fsl,etsec2-tbi")) {
 +              addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
 +              ioremap_tbipa = ioremap(addr, size);
 +              return ioremap_tbipa;
 +      } else
 +              return NULL;
  }
  #endif
  
@@@ -266,12 -250,11 +266,12 @@@ static int fsl_pq_mdio_probe(struct of_
  {
        struct device_node *np = ofdev->node;
        struct device_node *tbi;
 -      struct fsl_pq_mdio __iomem *regs;
 +      struct fsl_pq_mdio __iomem *regs = NULL;
 +      void __iomem *map;
        u32 __iomem *tbipa;
        struct mii_bus *new_bus;
        int tbiaddr = -1;
 -      u64 addr, size;
 +      u64 addr = 0, size = 0;
        int err = 0;
  
        new_bus = mdiobus_alloc();
  
        /* Set the PHY base address */
        addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
 -      regs = ioremap(addr, size);
 -
 -      if (NULL == regs) {
 +      map = ioremap(addr, size);
 +      if (!map) {
                err = -ENOMEM;
                goto err_free_bus;
        }
  
 +      if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
 +                      of_device_is_compatible(np, "fsl,gianfar-tbi") ||
 +                      of_device_is_compatible(np, "fsl,ucc-mdio") ||
 +                      of_device_is_compatible(np, "ucc_geth_phy"))
 +              map -= offsetof(struct fsl_pq_mdio, miimcfg);
 +      regs = map;
 +
        new_bus->priv = (void __force *)regs;
  
        new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
  
        if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
                        of_device_is_compatible(np, "fsl,gianfar-tbi") ||
 +                      of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 +                      of_device_is_compatible(np, "fsl,etsec2-tbi") ||
                        of_device_is_compatible(np, "gianfar")) {
  #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 -              tbipa = get_gfar_tbipa(regs);
 +              tbipa = get_gfar_tbipa(regs, np);
 +              if (!tbipa) {
 +                      err = -EINVAL;
 +                      goto err_free_irqs;
 +              }
  #else
                err = -ENODEV;
                goto err_free_irqs;
@@@ -409,7 -380,7 +409,7 @@@ static int fsl_pq_mdio_remove(struct of
  
        dev_set_drvdata(device, NULL);
  
 -      iounmap((void __iomem *)bus->priv);
 +      iounmap(fsl_pq_mdio_get_regs(bus));
        bus->priv = NULL;
        mdiobus_free(bus);
  
@@@ -434,12 -405,6 +434,12 @@@ static struct of_device_id fsl_pq_mdio_
        {
                .compatible = "fsl,gianfar-mdio",
        },
 +      {
 +              .compatible = "fsl,etsec2-tbi",
 +      },
 +      {
 +              .compatible = "fsl,etsec2-mdio",
 +      },
        {},
  };
  MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
@@@ -462,3 -427,4 +462,4 @@@ void fsl_pq_mdio_exit(void
        of_unregister_platform_driver(&fsl_pq_mdio_driver);
  }
  module_exit(fsl_pq_mdio_exit);
+ MODULE_LICENSE("GPL");
@@@ -44,6 -44,7 +44,7 @@@
  
  #include "ixgbe.h"
  #include "ixgbe_common.h"
+ #include "ixgbe_dcb_82599.h"
  
  char ixgbe_driver_name[] = "ixgbe";
  static const char ixgbe_driver_string[] =
@@@ -97,8 -98,6 +98,8 @@@ static struct pci_device_id ixgbe_pci_t
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
         board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
 +       board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
@@@ -228,6 -227,56 +229,56 @@@ static void ixgbe_unmap_and_free_tx_res
        /* tx_buffer_info must be completely set up in the transmit path */
  }
  
+ /**
+  * ixgbe_tx_is_paused - check if the tx ring is paused
+  * @adapter: the ixgbe adapter
+  * @tx_ring: the corresponding tx_ring
+  *
+  * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
+  * corresponding TC of this tx_ring when checking TFCS.
+  *
+  * Returns : true if paused
+  */
+ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+                                       struct ixgbe_ring *tx_ring)
+ {
+       int tc;
+       u32 txoff = IXGBE_TFCS_TXOFF;
+ #ifdef CONFIG_IXGBE_DCB
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               int reg_idx = tx_ring->reg_idx;
+               int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       tc = reg_idx >> 2;
+                       txoff = IXGBE_TFCS_TXOFF0;
+               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+                       tc = 0;
+                       txoff = IXGBE_TFCS_TXOFF;
+                       if (dcb_i == 8) {
+                               /* TC0, TC1 */
+                               tc = reg_idx >> 5;
+                               if (tc == 2) /* TC2, TC3 */
+                                       tc += (reg_idx - 64) >> 4;
+                               else if (tc == 3) /* TC4, TC5, TC6, TC7 */
+                                       tc += 1 + ((reg_idx - 96) >> 3);
+                       } else if (dcb_i == 4) {
+                               /* TC0, TC1 */
+                               tc = reg_idx >> 6;
+                               if (tc == 1) {
+                                       tc += (reg_idx - 64) >> 5;
+                                       if (tc == 2) /* TC2, TC3 */
+                                               tc += (reg_idx - 96) >> 4;
+                               }
+                       }
+               }
+               txoff <<= tc;
+       }
+ #endif
+       return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+ }
  static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
                                         struct ixgbe_ring *tx_ring,
                                         unsigned int eop)
        adapter->detect_tx_hung = false;
        if (tx_ring->tx_buffer_info[eop].time_stamp &&
            time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
-           !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
+           !ixgbe_tx_is_paused(adapter, tx_ring)) {
                /* detected Tx unit hang */
                union ixgbe_adv_tx_desc *tx_desc;
                tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@@ -374,8 -423,8 +425,8 @@@ static bool ixgbe_clean_tx_irq(struct i
        tx_ring->total_packets += total_packets;
        tx_ring->stats.packets += total_packets;
        tx_ring->stats.bytes += total_bytes;
 -      adapter->net_stats.tx_bytes += total_bytes;
 -      adapter->net_stats.tx_packets += total_packets;
 +      netdev->stats.tx_bytes += total_bytes;
 +      netdev->stats.tx_packets += total_packets;
        return (count < tx_ring->work_limit);
  }
  
@@@ -414,19 -463,23 +465,23 @@@ static void ixgbe_update_tx_dca(struct 
        u32 txctrl;
        int cpu = get_cpu();
        int q = tx_ring - adapter->tx_ring;
+       struct ixgbe_hw *hw = &adapter->hw;
  
        if (tx_ring->cpu != cpu) {
-               txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
                if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
                        txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
                        txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
                } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
                        txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
                        txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                  IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+                                 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
                }
-               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
                tx_ring->cpu = cpu;
        }
        put_cpu();
@@@ -616,14 -669,22 +671,14 @@@ static void ixgbe_alloc_rx_buffers(stru
  
                if (!bi->skb) {
                        struct sk_buff *skb;
 -                      skb = netdev_alloc_skb(adapter->netdev,
 -                                             (rx_ring->rx_buf_len +
 -                                              NET_IP_ALIGN));
 +                      skb = netdev_alloc_skb_ip_align(adapter->netdev,
 +                                                      rx_ring->rx_buf_len);
  
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
  
 -                      /*
 -                       * Make buffer alignment 2 beyond a 16 byte boundary
 -                       * this will result in a 16 byte aligned IP header after
 -                       * the 14 byte MAC header is removed
 -                       */
 -                      skb_reserve(skb, NET_IP_ALIGN);
 -
                        bi->skb = skb;
                        bi->dma = pci_map_single(pdev, skb->data,
                                                 rx_ring->rx_buf_len,
@@@ -703,7 -764,6 +758,7 @@@ static bool ixgbe_clean_rx_irq(struct i
                                 int *work_done, int work_to_do)
  {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 +      struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@@ -875,8 -935,8 +930,8 @@@ next_desc
  
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
 -      adapter->net_stats.rx_bytes += total_rx_bytes;
 -      adapter->net_stats.rx_packets += total_rx_packets;
 +      netdev->stats.rx_bytes += total_rx_bytes;
 +      netdev->stats.rx_packets += total_rx_packets;
  
        return cleaned;
  }
@@@ -1908,11 -1968,25 +1963,25 @@@ static void ixgbe_configure_tx(struct i
                        break;
                }
        }
        if (hw->mac.type == ixgbe_mac_82599EB) {
+               u32 rttdcs;
+               /* disable the arbiter while setting MTQC */
+               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               rttdcs |= IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
                /* We enable 8 traffic classes, DCB only */
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                        IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
                                        IXGBE_MTQC_8TC_8TQ));
+               else
+                       IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+               /* re-eable the arbiter */
+               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
        }
  }
  
@@@ -2466,7 -2540,10 +2535,10 @@@ static void ixgbe_configure(struct ixgb
        ixgbe_restore_vlan(adapter);
  #ifdef CONFIG_IXGBE_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               netif_set_gso_max_size(netdev, 32768);
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       netif_set_gso_max_size(netdev, 32768);
+               else
+                       netif_set_gso_max_size(netdev, 65536);
                ixgbe_configure_dcb(adapter);
        } else {
                netif_set_gso_max_size(netdev, 65536);
@@@ -4398,7 -4475,6 +4470,7 @@@ static void ixgbe_shutdown(struct pci_d
   **/
  void ixgbe_update_stats(struct ixgbe_adapter *adapter)
  {
 +      struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
        adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
  
        /* Fill out the OS statistics structure */
 -      adapter->net_stats.multicast = adapter->stats.mprc;
 +      netdev->stats.multicast = adapter->stats.mprc;
  
        /* Rx Errors */
 -      adapter->net_stats.rx_errors = adapter->stats.crcerrs +
 +      netdev->stats.rx_errors = adapter->stats.crcerrs +
                                       adapter->stats.rlec;
 -      adapter->net_stats.rx_dropped = 0;
 -      adapter->net_stats.rx_length_errors = adapter->stats.rlec;
 -      adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
 -      adapter->net_stats.rx_missed_errors = total_mpc;
 +      netdev->stats.rx_dropped = 0;
 +      netdev->stats.rx_length_errors = adapter->stats.rlec;
 +      netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
 +      netdev->stats.rx_missed_errors = total_mpc;
  }
  
  /**
@@@ -5296,8 -5372,10 +5368,8 @@@ static netdev_tx_t ixgbe_xmit_frame(str
   **/
  static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
  {
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 -
        /* only return the current stats */
 -      return &adapter->net_stats;
 +      return &netdev->stats;
  }
  
  /**
@@@ -5449,7 -5527,6 +5521,7 @@@ static const struct net_device_ops ixgb
        .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
        .ndo_fcoe_enable = ixgbe_fcoe_enable,
        .ndo_fcoe_disable = ixgbe_fcoe_disable,
 +      .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
  #endif /* IXGBE_FCOE */
  };
  
        return status;
  }
  
 +static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
 +{
 +      return PAGE_SIZE << qdev->lbq_buf_order;
 +}
 +
  /* Get the next large buffer. */
  static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  {
        return lbq_desc;
  }
  
 +static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
 +              struct rx_ring *rx_ring)
 +{
 +      struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
 +
 +      pci_dma_sync_single_for_cpu(qdev->pdev,
 +                                      pci_unmap_addr(lbq_desc, mapaddr),
 +                                  rx_ring->lbq_buf_size,
 +                                      PCI_DMA_FROMDEVICE);
 +
 +      /* If it's the last chunk of our master page then
 +       * we unmap it.
 +       */
 +      if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
 +                                      == ql_lbq_block_size(qdev))
 +              pci_unmap_page(qdev->pdev,
 +                              lbq_desc->p.pg_chunk.map,
 +                              ql_lbq_block_size(qdev),
 +                              PCI_DMA_FROMDEVICE);
 +      return lbq_desc;
 +}
 +
  /* Get the next small buffer. */
  static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  {
@@@ -1090,53 -1063,6 +1090,53 @@@ static void ql_write_cq_idx(struct rx_r
        ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  }
  
 +static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
 +                                              struct bq_desc *lbq_desc)
 +{
 +      if (!rx_ring->pg_chunk.page) {
 +              u64 map;
 +              rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
 +                                              GFP_ATOMIC,
 +                                              qdev->lbq_buf_order);
 +              if (unlikely(!rx_ring->pg_chunk.page)) {
 +                      QPRINTK(qdev, DRV, ERR,
 +                              "page allocation failed.\n");
 +                      return -ENOMEM;
 +              }
 +              rx_ring->pg_chunk.offset = 0;
 +              map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
 +                                      0, ql_lbq_block_size(qdev),
 +                                      PCI_DMA_FROMDEVICE);
 +              if (pci_dma_mapping_error(qdev->pdev, map)) {
 +                      __free_pages(rx_ring->pg_chunk.page,
 +                                      qdev->lbq_buf_order);
 +                      QPRINTK(qdev, DRV, ERR,
 +                              "PCI mapping failed.\n");
 +                      return -ENOMEM;
 +              }
 +              rx_ring->pg_chunk.map = map;
 +              rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
 +      }
 +
 +      /* Copy the current master pg_chunk info
 +       * to the current descriptor.
 +       */
 +      lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
 +
 +      /* Adjust the master page chunk for next
 +       * buffer get.
 +       */
 +      rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
 +      if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
 +              rx_ring->pg_chunk.page = NULL;
 +              lbq_desc->p.pg_chunk.last_flag = 1;
 +      } else {
 +              rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
 +              get_page(rx_ring->pg_chunk.page);
 +              lbq_desc->p.pg_chunk.last_flag = 0;
 +      }
 +      return 0;
 +}
  /* Process (refill) a large buffer queue. */
  static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  {
        u64 map;
        int i;
  
 -      while (rx_ring->lbq_free_cnt > 16) {
 +      while (rx_ring->lbq_free_cnt > 32) {
                for (i = 0; i < 16; i++) {
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "lbq: try cleaning clean_idx = %d.\n",
                                clean_idx);
                        lbq_desc = &rx_ring->lbq[clean_idx];
 -                      if (lbq_desc->p.lbq_page == NULL) {
 -                              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                                      "lbq: getting new page for index %d.\n",
 -                                      lbq_desc->index);
 -                              lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
 -                              if (lbq_desc->p.lbq_page == NULL) {
 -                                      rx_ring->lbq_clean_idx = clean_idx;
 -                                      QPRINTK(qdev, RX_STATUS, ERR,
 -                                              "Couldn't get a page.\n");
 -                                      return;
 -                              }
 -                              map = pci_map_page(qdev->pdev,
 -                                                 lbq_desc->p.lbq_page,
 -                                                 0, PAGE_SIZE,
 -                                                 PCI_DMA_FROMDEVICE);
 -                              if (pci_dma_mapping_error(qdev->pdev, map)) {
 -                                      rx_ring->lbq_clean_idx = clean_idx;
 -                                      put_page(lbq_desc->p.lbq_page);
 -                                      lbq_desc->p.lbq_page = NULL;
 -                                      QPRINTK(qdev, RX_STATUS, ERR,
 -                                              "PCI mapping failed.\n");
 +                      if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
 +                              QPRINTK(qdev, IFUP, ERR,
 +                                      "Could not get a page chunk.\n");
                                        return;
                                }
 +
 +                      map = lbq_desc->p.pg_chunk.map +
 +                              lbq_desc->p.pg_chunk.offset;
                                pci_unmap_addr_set(lbq_desc, mapaddr, map);
 -                              pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
 +                      pci_unmap_len_set(lbq_desc, maplen,
 +                                      rx_ring->lbq_buf_size);
                                *lbq_desc->addr = cpu_to_le64(map);
 -                      }
 +
 +                      pci_dma_sync_single_for_device(qdev->pdev, map,
 +                                              rx_ring->lbq_buf_size,
 +                                              PCI_DMA_FROMDEVICE);
                        clean_idx++;
                        if (clean_idx == rx_ring->lbq_len)
                                clean_idx = 0;
@@@ -1210,7 -1147,7 +1210,7 @@@ static void ql_update_sbq(struct ql_ada
                                        sbq_desc->index);
                                sbq_desc->p.skb =
                                    netdev_alloc_skb(qdev->ndev,
 -                                                   rx_ring->sbq_buf_size);
 +                                                   SMALL_BUFFER_SIZE);
                                if (sbq_desc->p.skb == NULL) {
                                        QPRINTK(qdev, PROBE, ERR,
                                                "Couldn't get an skb.\n");
                                skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
                                map = pci_map_single(qdev->pdev,
                                                     sbq_desc->p.skb->data,
 -                                                   rx_ring->sbq_buf_size /
 -                                                   2, PCI_DMA_FROMDEVICE);
 +                                                   rx_ring->sbq_buf_size,
 +                                                   PCI_DMA_FROMDEVICE);
                                if (pci_dma_mapping_error(qdev->pdev, map)) {
                                        QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
                                        rx_ring->sbq_clean_idx = clean_idx;
                                }
                                pci_unmap_addr_set(sbq_desc, mapaddr, map);
                                pci_unmap_len_set(sbq_desc, maplen,
 -                                                rx_ring->sbq_buf_size / 2);
 +                                                rx_ring->sbq_buf_size);
                                *sbq_desc->addr = cpu_to_le64(map);
                        }
  
@@@ -1543,24 -1480,27 +1543,24 @@@ static struct sk_buff *ql_build_rx_skb(
                         * chain it to the header buffer's skb and let
                         * it rip.
                         */
 -                      lbq_desc = ql_get_curr_lbuf(rx_ring);
 -                      pci_unmap_page(qdev->pdev,
 -                                     pci_unmap_addr(lbq_desc,
 -                                                    mapaddr),
 -                                     pci_unmap_len(lbq_desc, maplen),
 -                                     PCI_DMA_FROMDEVICE);
 +                      lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
                        QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Chaining page to skb.\n");
 -                      skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
 -                                         0, length);
 +                              "Chaining page at offset = %d,"
 +                              "for %d bytes  to skb.\n",
 +                              lbq_desc->p.pg_chunk.offset, length);
 +                      skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
 +                                              lbq_desc->p.pg_chunk.offset,
 +                                              length);
                        skb->len += length;
                        skb->data_len += length;
                        skb->truesize += length;
 -                      lbq_desc->p.lbq_page = NULL;
                } else {
                        /*
                         * The headers and data are in a single large buffer. We
                         * copy it to a new skb and let it go. This can happen with
                         * jumbo mtu on a non-TCP/UDP frame.
                         */
 -                      lbq_desc = ql_get_curr_lbuf(rx_ring);
 +                      lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
                        skb = netdev_alloc_skb(qdev->ndev, length);
                        if (skb == NULL) {
                                QPRINTK(qdev, PROBE, DEBUG,
                        skb_reserve(skb, NET_IP_ALIGN);
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
 -                      skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
 -                                         0, length);
 +                      skb_fill_page_desc(skb, 0,
 +                                              lbq_desc->p.pg_chunk.page,
 +                                              lbq_desc->p.pg_chunk.offset,
 +                                              length);
                        skb->len += length;
                        skb->data_len += length;
                        skb->truesize += length;
                        length -= length;
 -                      lbq_desc->p.lbq_page = NULL;
                        __pskb_pull_tail(skb,
                                (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
                                VLAN_ETH_HLEN : ETH_HLEN);
                 *         frames.  If the MTU goes up we could
                 *          eventually be in trouble.
                 */
 -              int size, offset, i = 0;
 -              __le64 *bq, bq_array[8];
 +              int size, i = 0;
                sbq_desc = ql_get_curr_sbuf(rx_ring);
                pci_unmap_single(qdev->pdev,
                                 pci_unmap_addr(sbq_desc, mapaddr),
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "%d bytes of headers & data in chain of large.\n", length);
                        skb = sbq_desc->p.skb;
 -                      bq = &bq_array[0];
 -                      memcpy(bq, skb->data, sizeof(bq_array));
                        sbq_desc->p.skb = NULL;
                        skb_reserve(skb, NET_IP_ALIGN);
 -              } else {
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Headers in small, %d bytes of data in chain of large.\n", length);
 -                      bq = (__le64 *)sbq_desc->p.skb->data;
                }
                while (length > 0) {
 -                      lbq_desc = ql_get_curr_lbuf(rx_ring);
 -                      pci_unmap_page(qdev->pdev,
 -                                     pci_unmap_addr(lbq_desc,
 -                                                    mapaddr),
 -                                     pci_unmap_len(lbq_desc,
 -                                                   maplen),
 -                                     PCI_DMA_FROMDEVICE);
 -                      size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
 -                      offset = 0;
 +                      lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 +                      size = (length < rx_ring->lbq_buf_size) ? length :
 +                              rx_ring->lbq_buf_size;
  
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "Adding page %d to skb for %d bytes.\n",
                                i, size);
 -                      skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
 -                                         offset, size);
 +                      skb_fill_page_desc(skb, i,
 +                                              lbq_desc->p.pg_chunk.page,
 +                                              lbq_desc->p.pg_chunk.offset,
 +                                              size);
                        skb->len += size;
                        skb->data_len += size;
                        skb->truesize += size;
                        length -= size;
 -                      lbq_desc->p.lbq_page = NULL;
 -                      bq++;
                        i++;
                }
                __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@@ -1661,7 -1613,6 +1661,7 @@@ static void ql_process_mac_rx_intr(stru
        if (unlikely(!skb)) {
                QPRINTK(qdev, RX_STATUS, DEBUG,
                        "No skb available, drop packet.\n");
 +              rx_ring->rx_dropped++;
                return;
        }
  
                QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
                                        ib_mac_rsp->flags2);
                dev_kfree_skb_any(skb);
 +              rx_ring->rx_errors++;
                return;
        }
  
         */
        if (skb->len > ndev->mtu + ETH_HLEN) {
                dev_kfree_skb_any(skb);
 +              rx_ring->rx_dropped++;
 +              return;
 +      }
 +
 +      /* loopback self test for ethtool */
 +      if (test_bit(QL_SELFTEST, &qdev->flags)) {
 +              ql_check_lb_frame(qdev, skb);
 +              dev_kfree_skb_any(skb);
                return;
        }
  
                        IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
                        (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
                        IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
 +              rx_ring->rx_multicast++;
        }
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
                QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
                }
        }
  
 -      qdev->stats.rx_packets++;
 -      qdev->stats.rx_bytes += skb->len;
 +      rx_ring->rx_packets++;
 +      rx_ring->rx_bytes += skb->len;
        skb_record_rx_queue(skb, rx_ring->cq_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                if (qdev->vlgrp &&
@@@ -1764,8 -1705,8 +1764,8 @@@ static void ql_process_mac_tx_intr(stru
        tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
        tx_ring_desc = &tx_ring->q[mac_rsp->tid];
        ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
 -      qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
 -      qdev->stats.tx_packets++;
 +      tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
 +      tx_ring->tx_packets++;
        dev_kfree_skb(tx_ring_desc->skb);
        tx_ring_desc->skb = NULL;
  
@@@ -1988,7 -1929,7 +1988,7 @@@ static int ql_napi_poll_msix(struct nap
        return work_done;
  }
  
 -static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
 +static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
  
        }
  }
  
 -static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 +static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
        u32 enable_bit = MAC_ADDR_E;
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  }
  
 -static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 +static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
        u32 enable_bit = 0;
@@@ -2105,12 -2046,12 +2105,12 @@@ static irqreturn_t qlge_isr(int irq, vo
         */
        var = ql_read32(qdev, ISR1);
        if (var & intr_context->irq_mask) {
 -                              QPRINTK(qdev, INTR, INFO,
 +              QPRINTK(qdev, INTR, INFO,
                        "Waking handler for rx_ring[0].\n");
                ql_disable_completion_interrupt(qdev, intr_context->intr);
 -                                      napi_schedule(&rx_ring->napi);
 -                              work_done++;
 -                      }
 +              napi_schedule(&rx_ring->napi);
 +              work_done++;
 +      }
        ql_enable_completion_interrupt(qdev, intr_context->intr);
        return work_done ? IRQ_HANDLED : IRQ_NONE;
  }
@@@ -2208,7 -2149,6 +2208,7 @@@ static netdev_tx_t qlge_send(struct sk_
                        __func__, tx_ring_idx);
                netif_stop_subqueue(ndev, tx_ring->wq_id);
                atomic_inc(&tx_ring->queue_stopped);
 +              tx_ring->tx_errors++;
                return NETDEV_TX_BUSY;
        }
        tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
                        NETDEV_TX_OK) {
                QPRINTK(qdev, TX_QUEUED, ERR,
                                "Could not map the segments.\n");
 +              tx_ring->tx_errors++;
                return NETDEV_TX_BUSY;
        }
        QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
        return NETDEV_TX_OK;
  }
  
 +
  static void ql_free_shadow_space(struct ql_adapter *qdev)
  {
        if (qdev->rx_ring_shadow_reg_area) {
  
  static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  {
 -      int i;
        struct bq_desc *lbq_desc;
  
 -      for (i = 0; i < rx_ring->lbq_len; i++) {
 -              lbq_desc = &rx_ring->lbq[i];
 -              if (lbq_desc->p.lbq_page) {
 +      uint32_t  curr_idx, clean_idx;
 +
 +      curr_idx = rx_ring->lbq_curr_idx;
 +      clean_idx = rx_ring->lbq_clean_idx;
 +      while (curr_idx != clean_idx) {
 +              lbq_desc = &rx_ring->lbq[curr_idx];
 +
 +              if (lbq_desc->p.pg_chunk.last_flag) {
                        pci_unmap_page(qdev->pdev,
 -                                     pci_unmap_addr(lbq_desc, mapaddr),
 -                                     pci_unmap_len(lbq_desc, maplen),
 +                              lbq_desc->p.pg_chunk.map,
 +                              ql_lbq_block_size(qdev),
                                       PCI_DMA_FROMDEVICE);
 -
 -                      put_page(lbq_desc->p.lbq_page);
 -                      lbq_desc->p.lbq_page = NULL;
 +                      lbq_desc->p.pg_chunk.last_flag = 0;
                }
 +
 +              put_page(lbq_desc->p.pg_chunk.page);
 +              lbq_desc->p.pg_chunk.page = NULL;
 +
 +              if (++curr_idx == rx_ring->lbq_len)
 +                      curr_idx = 0;
 +
        }
  }
  
@@@ -2686,7 -2615,6 +2686,7 @@@ static int ql_start_rx_ring(struct ql_a
        /* Set up the shadow registers for this ring. */
        rx_ring->prod_idx_sh_reg = shadow_reg;
        rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
 +      *rx_ring->prod_idx_sh_reg = 0;
        shadow_reg += sizeof(u64);
        shadow_reg_dma += sizeof(u64);
        rx_ring->lbq_base_indirect = shadow_reg;
                cqicb->sbq_addr =
                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
                cqicb->sbq_buf_size =
 -                  cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
 +                  cpu_to_le16((u16)(rx_ring->sbq_buf_size));
                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
                        (u16) rx_ring->sbq_len;
                cqicb->sbq_len = cpu_to_le16(bq_len);
@@@ -3340,7 -3268,7 +3340,7 @@@ static int ql_adapter_initialize(struc
        ql_write32(qdev, FSC, mask | value);
  
        ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
 -              min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
 +              min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
  
        /* Set RX packet routing to use port/pci function on which the
         * packet arrived on in addition to usual frame routing.
         * the same MAC address.
         */
        ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
 +      /* Reroute all packets to our Interface.
 +       * They may have been routed to MPI firmware
 +       * due to WOL.
 +       */
 +      value = ql_read32(qdev, MGMT_RCV_CFG);
 +      value &= ~MGMT_RCV_CFG_RM;
 +      mask = 0xffff0000;
 +
 +      /* Sticky reg needs clearing due to WOL. */
 +      ql_write32(qdev, MGMT_RCV_CFG, mask);
 +      ql_write32(qdev, MGMT_RCV_CFG, mask | value);
 +
 +      /* Default WOL is enable on Mezz cards */
 +      if (qdev->pdev->subsystem_device == 0x0068 ||
 +                      qdev->pdev->subsystem_device == 0x0180)
 +              qdev->wol = WAKE_MAGIC;
  
        /* Start up the rx queues. */
        for (i = 0; i < qdev->rx_ring_count; i++) {
  
        /* Initialize the port and set the max framesize. */
        status = qdev->nic_ops->port_initialize(qdev);
 -       if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
 -              return status;
 -       }
 +      if (status)
 +              QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
  
        /* Set up the MAC address and frame routing filter. */
        status = ql_cam_route_initialize(qdev);
@@@ -3478,55 -3392,6 +3478,55 @@@ static void ql_display_dev_info(struct 
        QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
  }
  
 +int ql_wol(struct ql_adapter *qdev)
 +{
 +      int status = 0;
 +      u32 wol = MB_WOL_DISABLE;
 +
 +      /* The CAM is still intact after a reset, but if we
 +       * are doing WOL, then we may need to program the
 +       * routing regs. We would also need to issue the mailbox
 +       * commands to instruct the MPI what to do per the ethtool
 +       * settings.
 +       */
 +
 +      if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
 +                      WAKE_MCAST | WAKE_BCAST)) {
 +              QPRINTK(qdev, IFDOWN, ERR,
 +                      "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
 +                      qdev->wol);
 +              return -EINVAL;
 +      }
 +
 +      if (qdev->wol & WAKE_MAGIC) {
 +              status = ql_mb_wol_set_magic(qdev, 1);
 +              if (status) {
 +                      QPRINTK(qdev, IFDOWN, ERR,
 +                              "Failed to set magic packet on %s.\n",
 +                              qdev->ndev->name);
 +                      return status;
 +              } else
 +                      QPRINTK(qdev, DRV, INFO,
 +                              "Enabled magic packet successfully on %s.\n",
 +                              qdev->ndev->name);
 +
 +              wol |= MB_WOL_MAGIC_PKT;
 +      }
 +
 +      if (qdev->wol) {
 +              /* Reroute all packets to Management Interface */
 +              ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
 +                      (MGMT_RCV_CFG_RM << 16)));
 +              wol |= MB_WOL_MODE_ON;
 +              status = ql_mb_wol_mode(qdev, wol);
 +              QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
 +                      (status == 0) ? "Sucessfully set" : "Failed", wol,
 +                      qdev->ndev->name);
 +      }
 +
 +      return status;
 +}
 +
  static int ql_adapter_down(struct ql_adapter *qdev)
  {
        int i, status = 0;
@@@ -3632,10 -3497,6 +3632,10 @@@ static int ql_configure_rings(struct ql
        struct rx_ring *rx_ring;
        struct tx_ring *tx_ring;
        int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
 +      unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
 +              LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
 +
 +      qdev->lbq_buf_order = get_order(lbq_buf_len);
  
        /* In a perfect world we have one RSS ring for each CPU
         * and each has it's own vector.  To do that we ask for
                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
                        rx_ring->lbq_size =
                            rx_ring->lbq_len * sizeof(__le64);
 -                      rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
 +                      rx_ring->lbq_buf_size = (u16)lbq_buf_len;
 +                      QPRINTK(qdev, IFUP, DEBUG,
 +                              "lbq_buf_size %d, order = %d\n",
 +                              rx_ring->lbq_buf_size, qdev->lbq_buf_order);
                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
                        rx_ring->sbq_size =
                            rx_ring->sbq_len * sizeof(__le64);
 -                      rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
 +                      rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
                        rx_ring->type = RX_Q;
                } else {
                        /*
@@@ -3736,63 -3594,14 +3736,63 @@@ error_up
        return err;
  }
  
 +static int ql_change_rx_buffers(struct ql_adapter *qdev)
 +{
 +      struct rx_ring *rx_ring;
 +      int i, status;
 +      u32 lbq_buf_len;
 +
 +      /* Wait for an oustanding reset to complete. */
 +      if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
 +              int i = 3;
 +              while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
 +                      QPRINTK(qdev, IFUP, ERR,
 +                               "Waiting for adapter UP...\n");
 +                      ssleep(1);
 +              }
 +
 +              if (!i) {
 +                      QPRINTK(qdev, IFUP, ERR,
 +                       "Timed out waiting for adapter UP\n");
 +                      return -ETIMEDOUT;
 +              }
 +      }
 +
 +      status = ql_adapter_down(qdev);
 +      if (status)
 +              goto error;
 +
 +      /* Get the new rx buffer size. */
 +      lbq_buf_len = (qdev->ndev->mtu > 1500) ?
 +              LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
 +      qdev->lbq_buf_order = get_order(lbq_buf_len);
 +
 +      for (i = 0; i < qdev->rss_ring_count; i++) {
 +              rx_ring = &qdev->rx_ring[i];
 +              /* Set the new size. */
 +              rx_ring->lbq_buf_size = lbq_buf_len;
 +      }
 +
 +      status = ql_adapter_up(qdev);
 +      if (status)
 +              goto error;
 +
 +      return status;
 +error:
 +      QPRINTK(qdev, IFUP, ALERT,
 +              "Driver up/down cycle failed, closing device.\n");
 +      set_bit(QL_ADAPTER_UP, &qdev->flags);
 +      dev_close(qdev->ndev);
 +      return status;
 +}
 +
  static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
 +      int status;
  
        if (ndev->mtu == 1500 && new_mtu == 9000) {
                QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
 -              queue_delayed_work(qdev->workqueue,
 -                              &qdev->mpi_port_cfg_work, 0);
        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
                QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
        } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
                return 0;
        } else
                return -EINVAL;
 +
 +      queue_delayed_work(qdev->workqueue,
 +                      &qdev->mpi_port_cfg_work, 3*HZ);
 +
 +      if (!netif_running(qdev->ndev)) {
 +              ndev->mtu = new_mtu;
 +              return 0;
 +      }
 +
        ndev->mtu = new_mtu;
 -      return 0;
 +      status = ql_change_rx_buffers(qdev);
 +      if (status) {
 +              QPRINTK(qdev, IFUP, ERR,
 +                      "Changing MTU failed.\n");
 +      }
 +
 +      return status;
  }
  
  static struct net_device_stats *qlge_get_stats(struct net_device
                                               *ndev)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
 -      return &qdev->stats;
 +      struct rx_ring *rx_ring = &qdev->rx_ring[0];
 +      struct tx_ring *tx_ring = &qdev->tx_ring[0];
 +      unsigned long pkts, mcast, dropped, errors, bytes;
 +      int i;
 +
 +      /* Get RX stats. */
 +      pkts = mcast = dropped = errors = bytes = 0;
 +      for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
 +                      pkts += rx_ring->rx_packets;
 +                      bytes += rx_ring->rx_bytes;
 +                      dropped += rx_ring->rx_dropped;
 +                      errors += rx_ring->rx_errors;
 +                      mcast += rx_ring->rx_multicast;
 +      }
 +      ndev->stats.rx_packets = pkts;
 +      ndev->stats.rx_bytes = bytes;
 +      ndev->stats.rx_dropped = dropped;
 +      ndev->stats.rx_errors = errors;
 +      ndev->stats.multicast = mcast;
 +
 +      /* Get TX stats. */
 +      pkts = errors = bytes = 0;
 +      for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
 +                      pkts += tx_ring->tx_packets;
 +                      bytes += tx_ring->tx_bytes;
 +                      errors += tx_ring->tx_errors;
 +      }
 +      ndev->stats.tx_packets = pkts;
 +      ndev->stats.tx_bytes = bytes;
 +      ndev->stats.tx_errors = errors;
 +      return &ndev->stats;
  }
  
  static void qlge_set_multicast_list(struct net_device *ndev)
@@@ -4104,7 -3868,8 +4104,7 @@@ static int __devinit ql_init_device(str
                                    struct net_device *ndev, int cards_found)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
 -      int pos, err = 0;
 -      u16 val16;
 +      int err = 0;
  
        memset((void *)qdev, 0, sizeof(*qdev));
        err = pci_enable_device(pdev);
        qdev->ndev = ndev;
        qdev->pdev = pdev;
        pci_set_drvdata(pdev, ndev);
 -      pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
 -      if (pos <= 0) {
 -              dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
 -                      "aborting.\n");
 -              return pos;
 -      } else {
 -              pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
 -              val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
 -              val16 |= (PCI_EXP_DEVCTL_CERE |
 -                        PCI_EXP_DEVCTL_NFERE |
 -                        PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
 -              pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
 +
 +      /* Set PCIe read request size */
 +      err = pcie_set_readrq(pdev, 4096);
 +      if (err) {
 +              dev_err(&pdev->dev, "Set readrq failed.\n");
 +              goto err_out;
        }
  
        err = pci_request_regions(pdev, DRV_NAME);
                goto err_out;
        }
  
+       /* Set PCIe reset type for EEH to fundamental. */
+       pdev->needs_freset = 1;
        pci_save_state(pdev);
        qdev->reg_base =
            ioremap_nocache(pci_resource_start(pdev, 1),
@@@ -4218,6 -3991,7 +4220,6 @@@ err_out
        return err;
  }
  
 -
  static const struct net_device_ops qlge_netdev_ops = {
        .ndo_open               = qlge_open,
        .ndo_stop               = qlge_close,
        .ndo_set_mac_address    = qlge_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = qlge_tx_timeout,
 -      .ndo_vlan_rx_register   = ql_vlan_rx_register,
 -      .ndo_vlan_rx_add_vid    = ql_vlan_rx_add_vid,
 -      .ndo_vlan_rx_kill_vid   = ql_vlan_rx_kill_vid,
 +      .ndo_vlan_rx_register   = qlge_vlan_rx_register,
 +      .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
 +      .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
  };
  
  static int __devinit qlge_probe(struct pci_dev *pdev,
        }
        ql_link_off(qdev);
        ql_display_dev_info(ndev);
 +      atomic_set(&qdev->lb_count, 0);
        cards_found++;
        return 0;
  }
  
 +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
 +{
 +      return qlge_send(skb, ndev);
 +}
 +
 +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
 +{
 +      return ql_clean_inbound_rx_ring(rx_ring, budget);
 +}
 +
  static void __devexit qlge_remove(struct pci_dev *pdev)
  {
        struct net_device *ndev = pci_get_drvdata(pdev);
@@@ -4430,7 -4193,6 +4432,7 @@@ static int qlge_suspend(struct pci_dev 
                        return err;
        }
  
 +      ql_wol(qdev);
        err = pci_save_state(pdev);
        if (err)
                return err;
@@@ -1,5 -1,25 +1,5 @@@
  #include "qlge.h"
  
 -static void ql_display_mb_sts(struct ql_adapter *qdev,
 -                                              struct mbox_params *mbcp)
 -{
 -      int i;
 -      static char *err_sts[] = {
 -              "Command Complete",
 -              "Command Not Supported",
 -              "Host Interface Error",
 -              "Checksum Error",
 -              "Unused Completion Status",
 -              "Test Failed",
 -              "Command Parameter Error"};
 -
 -      QPRINTK(qdev, DRV, DEBUG, "%s.\n",
 -              err_sts[mbcp->mbox_out[0] & 0x0000000f]);
 -      for (i = 0; i < mbcp->out_count; i++)
 -              QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
 -                              i, mbcp->mbox_out[i]);
 -}
 -
  int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  {
        int status;
@@@ -297,7 -317,6 +297,7 @@@ static void ql_init_fw_done(struct ql_a
        } else {
                QPRINTK(qdev, DRV, ERR, "Firmware Revision  = 0x%.08x.\n",
                        mbcp->mbox_out[1]);
 +              qdev->fw_rev_id = mbcp->mbox_out[1];
                status = ql_cam_route_initialize(qdev);
                if (status)
                        QPRINTK(qdev, IFUP, ERR,
@@@ -427,9 -446,6 +427,9 @@@ static int ql_mpi_handler(struct ql_ada
                ql_aen_lost(qdev, mbcp);
                break;
  
 +      case AEN_DCBX_CHG:
 +              /* Need to support AEN 8110 */
 +              break;
        default:
                QPRINTK(qdev, DRV, ERR,
                        "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
@@@ -483,7 -499,7 +483,7 @@@ static int ql_mailbox_command(struct ql
                /* Wait for the interrupt to come in. */
                status = ql_wait_mbx_cmd_cmplt(qdev);
                if (status)
-                       goto end;
+                       continue;
  
                /* Process the event.  If it's an AEN, it
                 * will be handled in-line or a worker
@@@ -521,6 -537,7 +521,6 @@@ done
                                        MB_CMD_STS_GOOD) &&
                ((mbcp->mbox_out[0] & 0x0000f000) !=
                                        MB_CMD_STS_INTRMDT)) {
 -              ql_display_mb_sts(qdev, mbcp);
                status = -EIO;
        }
  end:
@@@ -638,7 -655,7 +638,7 @@@ int ql_mb_idc_ack(struct ql_adapter *qd
   * for the current port.
   * Most likely will block.
   */
 -static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
 +int ql_mb_set_port_cfg(struct ql_adapter *qdev)
  {
        struct mbox_params mbc;
        struct mbox_params *mbcp = &mbc;
   * for the current port.
   * Most likely will block.
   */
 -static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
 +int ql_mb_get_port_cfg(struct ql_adapter *qdev)
  {
        struct mbox_params mbc;
        struct mbox_params *mbcp = &mbc;
        return status;
  }
  
 +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
 +{
 +      struct mbox_params mbc;
 +      struct mbox_params *mbcp = &mbc;
 +      int status;
 +
 +      memset(mbcp, 0, sizeof(struct mbox_params));
 +
 +      mbcp->in_count = 2;
 +      mbcp->out_count = 1;
 +
 +      mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
 +      mbcp->mbox_in[1] = wol;
 +
 +
 +      status = ql_mailbox_command(qdev, mbcp);
 +      if (status)
 +              return status;
 +
 +      if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
 +              QPRINTK(qdev, DRV, ERR,
 +                      "Failed to set WOL mode.\n");
 +              status = -EIO;
 +      }
 +      return status;
 +}
 +
 +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
 +{
 +      struct mbox_params mbc;
 +      struct mbox_params *mbcp = &mbc;
 +      int status;
 +      u8 *addr = qdev->ndev->dev_addr;
 +
 +      memset(mbcp, 0, sizeof(struct mbox_params));
 +
 +      mbcp->in_count = 8;
 +      mbcp->out_count = 1;
 +
 +      mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
 +      if (enable_wol) {
 +              mbcp->mbox_in[1] = (u32)addr[0];
 +              mbcp->mbox_in[2] = (u32)addr[1];
 +              mbcp->mbox_in[3] = (u32)addr[2];
 +              mbcp->mbox_in[4] = (u32)addr[3];
 +              mbcp->mbox_in[5] = (u32)addr[4];
 +              mbcp->mbox_in[6] = (u32)addr[5];
 +              mbcp->mbox_in[7] = 0;
 +      } else {
 +              mbcp->mbox_in[1] = 0;
 +              mbcp->mbox_in[2] = 1;
 +              mbcp->mbox_in[3] = 1;
 +              mbcp->mbox_in[4] = 1;
 +              mbcp->mbox_in[5] = 1;
 +              mbcp->mbox_in[6] = 1;
 +              mbcp->mbox_in[7] = 0;
 +      }
 +
 +      status = ql_mailbox_command(qdev, mbcp);
 +      if (status)
 +              return status;
 +
 +      if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
 +              QPRINTK(qdev, DRV, ERR,
 +                      "Failed to set WOL mode.\n");
 +              status = -EIO;
 +      }
 +      return status;
 +}
 +
  /* IDC - Inter Device Communication...
   * Some firmware commands require consent of adjacent FCOE
   * function.  This function waits for the OK, or a
@@@ -822,61 -769,6 +822,61 @@@ static int ql_idc_wait(struct ql_adapte
        return status;
  }
  
 +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
 +{
 +      struct mbox_params mbc;
 +      struct mbox_params *mbcp = &mbc;
 +      int status;
 +
 +      memset(mbcp, 0, sizeof(struct mbox_params));
 +
 +      mbcp->in_count = 2;
 +      mbcp->out_count = 1;
 +
 +      mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
 +      mbcp->mbox_in[1] = led_config;
 +
 +
 +      status = ql_mailbox_command(qdev, mbcp);
 +      if (status)
 +              return status;
 +
 +      if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
 +              QPRINTK(qdev, DRV, ERR,
 +                      "Failed to set LED Configuration.\n");
 +              status = -EIO;
 +      }
 +
 +      return status;
 +}
 +
 +int ql_mb_get_led_cfg(struct ql_adapter *qdev)
 +{
 +      struct mbox_params mbc;
 +      struct mbox_params *mbcp = &mbc;
 +      int status;
 +
 +      memset(mbcp, 0, sizeof(struct mbox_params));
 +
 +      mbcp->in_count = 1;
 +      mbcp->out_count = 2;
 +
 +      mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
 +
 +      status = ql_mailbox_command(qdev, mbcp);
 +      if (status)
 +              return status;
 +
 +      if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
 +              QPRINTK(qdev, DRV, ERR,
 +                      "Failed to get LED Configuration.\n");
 +              status = -EIO;
 +      } else
 +              qdev->led_config = mbcp->mbox_out[1];
 +
 +      return status;
 +}
 +
  int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
  {
        struct mbox_params mbc;
@@@ -1038,11 -930,8 +1038,11 @@@ void ql_mpi_idc_work(struct work_struc
        int status;
        struct mbox_params *mbcp = &qdev->idc_mbc;
        u32 aen;
 +      int timeout;
  
 +      rtnl_lock();
        aen = mbcp->mbox_out[1] >> 16;
 +      timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
  
        switch (aen) {
        default:
                        "Bug: Unhandled IDC action.\n");
                break;
        case MB_CMD_PORT_RESET:
 -      case MB_CMD_SET_PORT_CFG:
        case MB_CMD_STOP_FW:
                ql_link_off(qdev);
 +      case MB_CMD_SET_PORT_CFG:
                /* Signal the resulting link up AEN
                 * that the frame routing and mac addr
                 * needs to be set.
                 * */
                set_bit(QL_CAM_RT_SET, &qdev->flags);
 -              rtnl_lock();
 -              status = ql_mb_idc_ack(qdev);
 -              rtnl_unlock();
 -              if (status) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                      "Bug: No pending IDC!\n");
 +              /* Do ACK if required */
 +              if (timeout) {
 +                      status = ql_mb_idc_ack(qdev);
 +                      if (status)
 +                              QPRINTK(qdev, DRV, ERR,
 +                                      "Bug: No pending IDC!\n");
 +              } else {
 +                      QPRINTK(qdev, DRV, DEBUG,
 +                                  "IDC ACK not required\n");
 +                      status = 0; /* success */
                }
 +              break;
 +
 +      /* These sub-commands issued by another (FCoE)
 +       * function are requesting to do an operation
 +       * on the shared resource (MPI environment).
 +       * We currently don't issue these so we just
 +       * ACK the request.
 +       */
 +      case MB_CMD_IOP_RESTART_MPI:
 +      case MB_CMD_IOP_PREP_LINK_DOWN:
 +              /* Drop the link, reload the routing
 +               * table when link comes up.
 +               */
 +              ql_link_off(qdev);
 +              set_bit(QL_CAM_RT_SET, &qdev->flags);
 +              /* Fall through. */
 +      case MB_CMD_IOP_DVR_START:
 +      case MB_CMD_IOP_FLASH_ACC:
 +      case MB_CMD_IOP_CORE_DUMP_MPI:
 +      case MB_CMD_IOP_PREP_UPDATE_MPI:
 +      case MB_CMD_IOP_COMP_UPDATE_MPI:
 +      case MB_CMD_IOP_NONE:   /*  an IDC without params */
 +              /* Do ACK if required */
 +              if (timeout) {
 +                      status = ql_mb_idc_ack(qdev);
 +                      if (status)
 +                              QPRINTK(qdev, DRV, ERR,
 +                                  "Bug: No pending IDC!\n");
 +              } else {
 +                      QPRINTK(qdev, DRV, DEBUG,
 +                          "IDC ACK not required\n");
 +                      status = 0; /* success */
 +              }
 +              break;
        }
 +      rtnl_unlock();
  }
  
  void ql_mpi_work(struct work_struct *work)
@@@ -11275,7 -11275,6 +11275,7 @@@ static int ipw_up(struct ipw_priv *priv
                if (!(priv->config & CFG_CUSTOM_MAC))
                        eeprom_parse_mac(priv, priv->mac_addr);
                memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
 +              memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
  
                for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
                        if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
@@@ -11822,7 -11821,6 +11822,6 @@@ static int __devinit ipw_pci_probe(stru
                if (err) {
                        IPW_ERROR("Failed to register promiscuous network "
                                  "device (error %d).\n", err);
-                       unregister_ieee80211(priv->ieee);
                        unregister_netdev(priv->net_dev);
                        goto out_remove_sysfs;
                }
@@@ -11873,7 -11871,6 +11872,6 @@@ static void __devexit ipw_pci_remove(st
  
        mutex_unlock(&priv->mutex);
  
-       unregister_ieee80211(priv->ieee);
        unregister_netdev(priv->net_dev);
  
        if (priv->rxq) {
diff --combined net/can/bcm.c
@@@ -132,23 -132,27 +132,27 @@@ static inline struct bcm_sock *bcm_sk(c
  /*
   * procfs functions
   */
- static char *bcm_proc_getifname(int ifindex)
+ static char *bcm_proc_getifname(char *result, int ifindex)
  {
        struct net_device *dev;
  
        if (!ifindex)
                return "any";
  
-       /* no usage counting */
+       read_lock(&dev_base_lock);
        dev = __dev_get_by_index(&init_net, ifindex);
        if (dev)
-               return dev->name;
+               strcpy(result, dev->name);
+       else
+               strcpy(result, "???");
+       read_unlock(&dev_base_lock);
  
-       return "???";
+       return result;
  }
  
  static int bcm_proc_show(struct seq_file *m, void *v)
  {
+       char ifname[IFNAMSIZ];
        struct sock *sk = (struct sock *)m->private;
        struct bcm_sock *bo = bcm_sk(sk);
        struct bcm_op *op;
        seq_printf(m, " / sk %p", sk);
        seq_printf(m, " / bo %p", bo);
        seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
-       seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex));
+       seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
        seq_printf(m, " <<<\n");
  
        list_for_each_entry(op, &bo->rx_ops, list) {
                        continue;
  
                seq_printf(m, "rx_op: %03X %-5s ",
-                               op->can_id, bcm_proc_getifname(op->ifindex));
+                               op->can_id, bcm_proc_getifname(ifname, op->ifindex));
                seq_printf(m, "[%d]%c ", op->nframes,
                                (op->flags & RX_CHECK_DLC)?'d':' ');
                if (op->kt_ival1.tv64)
        list_for_each_entry(op, &bo->tx_ops, list) {
  
                seq_printf(m, "tx_op: %03X %s [%d] ",
-                               op->can_id, bcm_proc_getifname(op->ifindex),
+                               op->can_id,
+                               bcm_proc_getifname(ifname, op->ifindex),
                                op->nframes);
  
                if (op->kt_ival1.tv64)
@@@ -1534,7 -1539,7 +1539,7 @@@ static int bcm_recvmsg(struct kiocb *io
                return err;
        }
  
 -      sock_recv_timestamp(msg, sk, skb);
 +      sock_recv_ts_and_drops(msg, sk, skb);
  
        if (msg->msg_name) {
                msg->msg_namelen = sizeof(struct sockaddr_can);
@@@ -1576,6 -1581,7 +1581,6 @@@ static struct proto bcm_proto __read_mo
  static struct can_proto bcm_can_proto __read_mostly = {
        .type       = SOCK_DGRAM,
        .protocol   = CAN_BCM,
 -      .capability = -1,
        .ops        = &bcm_ops,
        .prot       = &bcm_proto,
  };
diff --combined net/ipv4/ipip.c
@@@ -134,13 -134,7 +134,13 @@@ static void ipip_fb_tunnel_init(struct 
  static void ipip_tunnel_init(struct net_device *dev);
  static void ipip_tunnel_setup(struct net_device *dev);
  
 -static DEFINE_RWLOCK(ipip_lock);
 +/*
 + * Locking : hash tables are protected by RCU and a spinlock
 + */
 +static DEFINE_SPINLOCK(ipip_lock);
 +
 +#define for_each_ip_tunnel_rcu(start) \
 +      for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
  
  static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
                __be32 remote, __be32 local)
        struct ip_tunnel *t;
        struct ipip_net *ipn = net_generic(net, ipip_net_id);
  
 -      for (t = ipn->tunnels_r_l[h0^h1]; t; t = t->next) {
 +      for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
                if (local == t->parms.iph.saddr &&
                    remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
                        return t;
 -      }
 -      for (t = ipn->tunnels_r[h0]; t; t = t->next) {
 +
 +      for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
                if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
                        return t;
 -      }
 -      for (t = ipn->tunnels_l[h1]; t; t = t->next) {
 +
 +      for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
                if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
                        return t;
 -      }
 -      if ((t = ipn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
 +
 +      t = rcu_dereference(ipn->tunnels_wc[0]);
 +      if (t && (t->dev->flags&IFF_UP))
                return t;
        return NULL;
  }
@@@ -200,9 -193,9 +200,9 @@@ static void ipip_tunnel_unlink(struct i
  
        for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
                if (t == *tp) {
 -                      write_lock_bh(&ipip_lock);
 +                      spin_lock_bh(&ipip_lock);
                        *tp = t->next;
 -                      write_unlock_bh(&ipip_lock);
 +                      spin_unlock_bh(&ipip_lock);
                        break;
                }
        }
@@@ -212,10 -205,10 +212,10 @@@ static void ipip_tunnel_link(struct ipi
  {
        struct ip_tunnel **tp = ipip_bucket(ipn, t);
  
 +      spin_lock_bh(&ipip_lock);
        t->next = *tp;
 -      write_lock_bh(&ipip_lock);
 -      *tp = t;
 -      write_unlock_bh(&ipip_lock);
 +      rcu_assign_pointer(*tp, t);
 +      spin_unlock_bh(&ipip_lock);
  }
  
  static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@@ -274,9 -267,9 +274,9 @@@ static void ipip_tunnel_uninit(struct n
        struct ipip_net *ipn = net_generic(net, ipip_net_id);
  
        if (dev == ipn->fb_tunnel_dev) {
 -              write_lock_bh(&ipip_lock);
 +              spin_lock_bh(&ipip_lock);
                ipn->tunnels_wc[0] = NULL;
 -              write_unlock_bh(&ipip_lock);
 +              spin_unlock_bh(&ipip_lock);
        } else
                ipip_tunnel_unlink(ipn, netdev_priv(dev));
        dev_put(dev);
@@@ -325,7 -318,7 +325,7 @@@ static int ipip_err(struct sk_buff *skb
  
        err = -ENOENT;
  
 -      read_lock(&ipip_lock);
 +      rcu_read_lock();
        t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
        if (t == NULL || t->parms.iph.daddr == 0)
                goto out;
                t->err_count = 1;
        t->err_time = jiffies;
  out:
 -      read_unlock(&ipip_lock);
 +      rcu_read_unlock();
        return err;
  }
  
@@@ -358,11 -351,11 +358,11 @@@ static int ipip_rcv(struct sk_buff *skb
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
  
 -      read_lock(&ipip_lock);
 +      rcu_read_lock();
        if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
                                        iph->saddr, iph->daddr)) != NULL) {
                if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 -                      read_unlock(&ipip_lock);
 +                      rcu_read_unlock();
                        kfree_skb(skb);
                        return 0;
                }
                nf_reset(skb);
                ipip_ecn_decapsulate(iph, skb);
                netif_rx(skb);
 -              read_unlock(&ipip_lock);
 +              rcu_read_unlock();
                return 0;
        }
 -      read_unlock(&ipip_lock);
 +      rcu_read_unlock();
  
        return -1;
  }
  static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
  {
        struct ip_tunnel *tunnel = netdev_priv(dev);
 -      struct net_device_stats *stats = &tunnel->dev->stats;
 +      struct net_device_stats *stats = &dev->stats;
 +      struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
        struct iphdr  *tiph = &tunnel->parms.iph;
        u8     tos = tunnel->parms.iph.tos;
        __be16 df = tiph->frag_off;
                goto tx_error;
        }
  
-       if (tiph->frag_off)
+       df |= old_iph->frag_off & htons(IP_DF);
+       if (df) {
                mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
-       else
-               mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
  
-       if (mtu < 68) {
-               stats->collisions++;
-               ip_rt_put(rt);
-               goto tx_error;
-       }
-       if (skb_dst(skb))
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+               if (mtu < 68) {
+                       stats->collisions++;
+                       ip_rt_put(rt);
+                       goto tx_error;
+               }
  
-       df |= (old_iph->frag_off&htons(IP_DF));
+               if (skb_dst(skb))
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
  
-       if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
-               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
-               ip_rt_put(rt);
-               goto tx_error;
+               if ((old_iph->frag_off & htons(IP_DF)) &&
+                   mtu < ntohs(old_iph->tot_len)) {
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+                       ip_rt_put(rt);
+                       goto tx_error;
+               }
        }
  
        if (tunnel->err_count > 0) {
                struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
                if (!new_skb) {
                        ip_rt_put(rt);
 -                      stats->tx_dropped++;
 +                      txq->tx_dropped++;
                        dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
                }
@@@ -754,19 -748,16 +756,19 @@@ static struct xfrm_tunnel ipip_handler 
  static const char banner[] __initconst =
        KERN_INFO "IPv4 over IPv4 tunneling driver\n";
  
 -static void ipip_destroy_tunnels(struct ipip_net *ipn)
 +static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
  {
        int prio;
  
        for (prio = 1; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
 -                      struct ip_tunnel *t;
 -                      while ((t = ipn->tunnels[prio][h]) != NULL)
 -                              unregister_netdevice(t->dev);
 +                      struct ip_tunnel *t = ipn->tunnels[prio][h];
 +
 +                      while (t != NULL) {
 +                              unregister_netdevice_queue(t->dev, head);
 +                              t = t->next;
 +                      }
                }
        }
  }
@@@ -819,13 -810,11 +821,13 @@@ err_alloc
  static void ipip_exit_net(struct net *net)
  {
        struct ipip_net *ipn;
 +      LIST_HEAD(list);
  
        ipn = net_generic(net, ipip_net_id);
        rtnl_lock();
 -      ipip_destroy_tunnels(ipn);
 -      unregister_netdevice(ipn->fb_tunnel_dev);
 +      ipip_destroy_tunnels(ipn, &list);
 +      unregister_netdevice_queue(ipn->fb_tunnel_dev, &list);
 +      unregister_netdevice_many(&list);
        rtnl_unlock();
        kfree(ipn);
  }