Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@g5.osdl.org>
Tue, 20 Jun 2006 01:55:56 +0000 (18:55 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 20 Jun 2006 01:55:56 +0000 (18:55 -0700)
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (109 commits)
  [ETHTOOL]: Fix UFO typo
  [SCTP]: Fix persistent slowdown in sctp when a gap ack consumes rx buffer.
  [SCTP]: Send only 1 window update SACK per message.
  [SCTP]: Don't do CRC32C checksum over loopback.
  [SCTP] Reset rtt_in_progress for the chunk when processing its sack.
  [SCTP]: Reject sctp packets with broadcast addresses.
  [SCTP]: Limit association max_retrans setting in setsockopt.
  [PFKEYV2]: Fix inconsistent typing in struct sadb_x_kmprivate.
  [IPV6]: Sum real space for RTAs.
  [IRDA]: Use put_unaligned() in irlmp_do_discovery().
  [BRIDGE]: Add support for NETIF_F_HW_CSUM devices
  [NET]: Add NETIF_F_GEN_CSUM and NETIF_F_ALL_CSUM
  [TG3]: Convert to non-LLTX
  [TG3]: Remove unnecessary tx_lock
  [TCP]: Add tcp_slow_start_after_idle sysctl.
  [BNX2]: Update version and reldate
  [BNX2]: Use CPU native page size
  [BNX2]: Use compressed firmware
  [BNX2]: Add firmware decompression
  [BNX2]: Allow WoL settings on new 5708 chips
  ...

Manual fixup for conflict in drivers/net/tulip/winbond-840.c

1  2 
drivers/net/Kconfig
drivers/net/forcedeth.c
drivers/net/tulip/winbond-840.c
drivers/net/wireless/orinoco.c
include/linux/pci_ids.h

diff --combined drivers/net/Kconfig
index 20bdb9732a095d486ea86bd4c1de9b0c7ace4c16,1fb0a195b6106b7b120f5f9f52b2187888a14e5b..0c6b45a11d15f876819d395314b5e8fd5f56078b
@@@ -447,7 -447,6 +447,7 @@@ config MIPS_GT96100ET
  config MIPS_AU1X00_ENET
        bool "MIPS AU1000 Ethernet support"
        depends on NET_ETHERNET && SOC_AU1X00
 +      select PHYLIB
        select CRC32
        help
          If you have an Alchemy Semi AU1X00 based system
@@@ -866,22 -865,6 +866,22 @@@ config DM900
          <file:Documentation/networking/net-modules.txt>.  The module will be
          called dm9000.
  
 +config SMC911X
 +      tristate "SMSC LAN911[5678] support"
 +      select CRC32
 +      select MII
 +      depends on NET_ETHERNET && ARCH_PXA
 +      help
 +        This is a driver for SMSC's LAN911x series of Ethernet chipsets
 +        including the new LAN9115, LAN9116, LAN9117, and LAN9118.
 +        Say Y if you want it compiled into the kernel, 
 +        and read the Ethernet-HOWTO, available from
 +        <http://www.linuxdoc.org/docs.html#howto>.
 +
 +        This driver is also available as a module. The module will be 
 +        called smc911x.  If you want to compile it as a module, say M 
 +        here and read <file:Documentation/modules.txt>
 +
  config NET_VENDOR_RACAL
        bool "Racal-Interlan (Micom) NI cards"
        depends on NET_ETHERNET && ISA
@@@ -2180,6 -2163,8 +2180,8 @@@ config TIGON
  config BNX2
        tristate "Broadcom NetXtremeII support"
        depends on PCI
+       select CRC32
+       select ZLIB_INFLATE
        help
          This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
  
@@@ -2328,23 -2313,6 +2330,23 @@@ config S2IO_NAP
  
          If in doubt, say N.
  
 +config MYRI10GE
 +      tristate "Myricom Myri-10G Ethernet support"
 +      depends on PCI
 +      select FW_LOADER
 +      select CRC32
 +      ---help---
 +        This driver supports Myricom Myri-10G Dual Protocol interface in
 +        Ethernet mode. If the eeprom on your board is not recent enough,
 +        you will need a newer firmware image.
 +        You may get this image or more information, at:
 +
 +        <http://www.myri.com/Myri-10G/>
 +
 +        To compile this driver as a module, choose M here and read
 +        <file:Documentation/networking/net-modules.txt>.  The module
 +        will be called myri10ge.
 +
  endmenu
  
  source "drivers/net/tokenring/Kconfig"
diff --combined drivers/net/forcedeth.c
index 4ab39c554d0d995c684434a6b78758b498a30091,5a8651b4b01dd297e7d42a4c5dc4f69bc67985c9..04a53f1dfdbdb8d58b8f61767457be0b000bbc4e
   *    0.52: 20 Jan 2006: Add MSI/MSIX support.
   *    0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
   *    0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
 + *    0.55: 22 Mar 2006: Add flow control (pause frame).
 + *    0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
   *
   * Known bugs:
   * We suspect that on some hardware no TX done interrupts are generated.
   * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
   * superfluous timer interrupts from the nic.
   */
 -#define FORCEDETH_VERSION             "0.54"
 +#define FORCEDETH_VERSION             "0.56"
  #define DRV_NAME                      "forcedeth"
  
  #include <linux/module.h>
  #define DEV_HAS_MSI             0x0040  /* device supports MSI */
  #define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
  #define DEV_HAS_POWER_CNTRL     0x0100  /* device supports power savings */
 +#define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
 +#define DEV_HAS_STATISTICS      0x0400  /* device supports hw statistics */
 +#define DEV_HAS_TEST_EXTENDED   0x0800  /* device supports extended diagnostic test */
  
  enum {
        NvRegIrqStatus = 0x000,
        NvRegMSIIrqMask = 0x030,
  #define NVREG_MSI_VECTOR_0_ENABLED 0x01
        NvRegMisc1 = 0x080,
 +#define NVREG_MISC1_PAUSE_TX  0x01
  #define NVREG_MISC1_HD                0x02
  #define NVREG_MISC1_FORCE     0x3b0f3c
  
  #define NVREG_XMITSTAT_BUSY   0x01
  
        NvRegPacketFilterFlags = 0x8c,
 -#define NVREG_PFF_ALWAYS      0x7F0008
 +#define NVREG_PFF_PAUSE_RX    0x08
 +#define NVREG_PFF_ALWAYS      0x7F0000
  #define NVREG_PFF_PROMISC     0x80
  #define NVREG_PFF_MYADDR      0x20
 +#define NVREG_PFF_LOOPBACK    0x10
  
        NvRegOffloadConfig = 0x90,
  #define NVREG_OFFLOAD_HOMEPHY 0x601
  #define NVREG_TXRXCTL_VLANINS 0x00080
        NvRegTxRingPhysAddrHigh = 0x148,
        NvRegRxRingPhysAddrHigh = 0x14C,
 +      NvRegTxPauseFrame = 0x170,
 +#define NVREG_TX_PAUSEFRAME_DISABLE   0x1ff0080
 +#define NVREG_TX_PAUSEFRAME_ENABLE    0x0c00030
        NvRegMIIStatus = 0x180,
  #define NVREG_MIISTAT_ERROR           0x0001
  #define NVREG_MIISTAT_LINKCHANGE      0x0008
  #define NVREG_POWERSTATE_D1           0x0001
  #define NVREG_POWERSTATE_D2           0x0002
  #define NVREG_POWERSTATE_D3           0x0003
 +      NvRegTxCnt = 0x280,
 +      NvRegTxZeroReXmt = 0x284,
 +      NvRegTxOneReXmt = 0x288,
 +      NvRegTxManyReXmt = 0x28c,
 +      NvRegTxLateCol = 0x290,
 +      NvRegTxUnderflow = 0x294,
 +      NvRegTxLossCarrier = 0x298,
 +      NvRegTxExcessDef = 0x29c,
 +      NvRegTxRetryErr = 0x2a0,
 +      NvRegRxFrameErr = 0x2a4,
 +      NvRegRxExtraByte = 0x2a8,
 +      NvRegRxLateCol = 0x2ac,
 +      NvRegRxRunt = 0x2b0,
 +      NvRegRxFrameTooLong = 0x2b4,
 +      NvRegRxOverflow = 0x2b8,
 +      NvRegRxFCSErr = 0x2bc,
 +      NvRegRxFrameAlignErr = 0x2c0,
 +      NvRegRxLenErr = 0x2c4,
 +      NvRegRxUnicast = 0x2c8,
 +      NvRegRxMulticast = 0x2cc,
 +      NvRegRxBroadcast = 0x2d0,
 +      NvRegTxDef = 0x2d4,
 +      NvRegTxFrame = 0x2d8,
 +      NvRegRxCnt = 0x2dc,
 +      NvRegTxPause = 0x2e0,
 +      NvRegRxPause = 0x2e4,
 +      NvRegRxDropFrame = 0x2e8,
        NvRegVlanControl = 0x300,
  #define NVREG_VLANCONTROL_ENABLE      0x2000
        NvRegMSIXMap0 = 0x3e0,
@@@ -487,18 -449,16 +487,18 @@@ typedef union _ring_type 
  /* General driver defaults */
  #define NV_WATCHDOG_TIMEO     (5*HZ)
  
 -#define RX_RING               128
 -#define TX_RING               256
 -/* 
 - * If your nic mysteriously hangs then try to reduce the limits
 - * to 1/0: It might be required to set NV_TX_LASTPACKET in the
 - * last valid ring entry. But this would be impossible to
 - * implement - probably a disassembly error.
 +#define RX_RING_DEFAULT               128
 +#define TX_RING_DEFAULT               256
 +#define RX_RING_MIN           128
 +#define TX_RING_MIN           64
 +#define RING_MAX_DESC_VER_1   1024
 +#define RING_MAX_DESC_VER_2_3 16384
 +/*
 + * Difference between the get and put pointers for the tx ring.
 + * This is used to throttle the amount of data outstanding in the
 + * tx ring.
   */
 -#define TX_LIMIT_STOP 255
 -#define TX_LIMIT_START        254
 +#define TX_LIMIT_DIFFERENCE   1
  
  /* rx/tx mac addr + type + vlan + align + slack*/
  #define NV_RX_HEADERS         (64)
  #define OOM_REFILL    (1+HZ/20)
  #define POLL_WAIT     (1+HZ/100)
  #define LINK_TIMEOUT  (3*HZ)
 +#define STATS_INTERVAL        (10*HZ)
  
 -/* 
 +/*
   * desc_ver values:
   * The nic supports three different descriptor types:
   * - DESC_VER_1: Original
  #define PHY_1000      0x2
  #define PHY_HALF      0x100
  
 -/* FIXME: MII defines that should be added to <linux/mii.h> */
 -#define MII_1000BT_CR 0x09
 -#define MII_1000BT_SR 0x0a
 -#define ADVERTISE_1000FULL    0x0200
 -#define ADVERTISE_1000HALF    0x0100
 -#define LPA_1000FULL  0x0800
 -#define LPA_1000HALF  0x0400
 +#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
 +#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
 +#define NV_PAUSEFRAME_RX_ENABLE  0x0004
 +#define NV_PAUSEFRAME_TX_ENABLE  0x0008
 +#define NV_PAUSEFRAME_RX_REQ     0x0010
 +#define NV_PAUSEFRAME_TX_REQ     0x0020
 +#define NV_PAUSEFRAME_AUTONEG    0x0040
  
  /* MSI/MSI-X defines */
  #define NV_MSI_X_MAX_VECTORS  8
  #define NV_MSI_X_VECTOR_TX    0x1
  #define NV_MSI_X_VECTOR_OTHER 0x2
  
 +/* statistics */
 +struct nv_ethtool_str {
 +      char name[ETH_GSTRING_LEN];
 +};
 +
 +static const struct nv_ethtool_str nv_estats_str[] = {
 +      { "tx_bytes" },
 +      { "tx_zero_rexmt" },
 +      { "tx_one_rexmt" },
 +      { "tx_many_rexmt" },
 +      { "tx_late_collision" },
 +      { "tx_fifo_errors" },
 +      { "tx_carrier_errors" },
 +      { "tx_excess_deferral" },
 +      { "tx_retry_error" },
 +      { "tx_deferral" },
 +      { "tx_packets" },
 +      { "tx_pause" },
 +      { "rx_frame_error" },
 +      { "rx_extra_byte" },
 +      { "rx_late_collision" },
 +      { "rx_runt" },
 +      { "rx_frame_too_long" },
 +      { "rx_over_errors" },
 +      { "rx_crc_errors" },
 +      { "rx_frame_align_error" },
 +      { "rx_length_error" },
 +      { "rx_unicast" },
 +      { "rx_multicast" },
 +      { "rx_broadcast" },
 +      { "rx_bytes" },
 +      { "rx_pause" },
 +      { "rx_drop_frame" },
 +      { "rx_packets" },
 +      { "rx_errors_total" }
 +};
 +
 +struct nv_ethtool_stats {
 +      u64 tx_bytes;
 +      u64 tx_zero_rexmt;
 +      u64 tx_one_rexmt;
 +      u64 tx_many_rexmt;
 +      u64 tx_late_collision;
 +      u64 tx_fifo_errors;
 +      u64 tx_carrier_errors;
 +      u64 tx_excess_deferral;
 +      u64 tx_retry_error;
 +      u64 tx_deferral;
 +      u64 tx_packets;
 +      u64 tx_pause;
 +      u64 rx_frame_error;
 +      u64 rx_extra_byte;
 +      u64 rx_late_collision;
 +      u64 rx_runt;
 +      u64 rx_frame_too_long;
 +      u64 rx_over_errors;
 +      u64 rx_crc_errors;
 +      u64 rx_frame_align_error;
 +      u64 rx_length_error;
 +      u64 rx_unicast;
 +      u64 rx_multicast;
 +      u64 rx_broadcast;
 +      u64 rx_bytes;
 +      u64 rx_pause;
 +      u64 rx_drop_frame;
 +      u64 rx_packets;
 +      u64 rx_errors_total;
 +};
 +
 +/* diagnostics */
 +#define NV_TEST_COUNT_BASE 3
 +#define NV_TEST_COUNT_EXTENDED 4
 +
 +static const struct nv_ethtool_str nv_etests_str[] = {
 +      { "link      (online/offline)" },
 +      { "register  (offline)       " },
 +      { "interrupt (offline)       " },
 +      { "loopback  (offline)       " }
 +};
 +
 +struct register_test {
 +      u32 reg;
 +      u32 mask;
 +};
 +
 +static const struct register_test nv_registers_test[] = {
 +      { NvRegUnknownSetupReg6, 0x01 },
 +      { NvRegMisc1, 0x03c },
 +      { NvRegOffloadConfig, 0x03ff },
 +      { NvRegMulticastAddrA, 0xffffffff },
 +      { NvRegUnknownSetupReg3, 0x0ff },
 +      { NvRegWakeUpFlags, 0x07777 },
 +      { 0,0 }
 +};
 +
  /*
   * SMP locking:
   * All hardware access under dev->priv->lock, except the performance
   * critical parts:
   * - rx is (pseudo-) lockless: it relies on the single-threading provided
   *    by the arch code for interrupts.
-  * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
+  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
   *    needs dev->priv->lock :-(
-  * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
+  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
   */
  
  /* in dev: base, irq */
@@@ -681,7 -545,6 +681,7 @@@ struct fe_priv 
        /* General data:
         * Locking: spin_lock(&np->lock); */
        struct net_device_stats stats;
 +      struct nv_ethtool_stats estats;
        int in_shutdown;
        u32 linkspeed;
        int duplex;
        int wolenabled;
        unsigned int phy_oui;
        u16 gigabit;
 +      int intr_test;
  
        /* General data: RO fields */
        dma_addr_t ring_addr;
         */
        ring_type rx_ring;
        unsigned int cur_rx, refill_rx;
 -      struct sk_buff *rx_skbuff[RX_RING];
 -      dma_addr_t rx_dma[RX_RING];
 +      struct sk_buff **rx_skbuff;
 +      dma_addr_t *rx_dma;
        unsigned int rx_buf_sz;
        unsigned int pkt_limit;
        struct timer_list oom_kick;
        struct timer_list nic_poll;
 +      struct timer_list stats_poll;
        u32 nic_poll_irq;
 +      int rx_ring_size;
  
        /* media detection workaround.
         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
         */
        ring_type tx_ring;
        unsigned int next_tx, nic_tx;
 -      struct sk_buff *tx_skbuff[TX_RING];
 -      dma_addr_t tx_dma[TX_RING];
 -      unsigned int tx_dma_len[TX_RING];
 +      struct sk_buff **tx_skbuff;
 +      dma_addr_t *tx_dma;
 +      unsigned int *tx_dma_len;
        u32 tx_flags;
 +      int tx_ring_size;
 +      int tx_limit_start;
 +      int tx_limit_stop;
  
        /* vlan fields */
        struct vlan_group *vlangrp;
        /* msi/msi-x fields */
        u32 msi_flags;
        struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
 +
 +      /* flow control */
 +      u32 pause_flags;
  };
  
  /*
@@@ -758,14 -612,12 +758,14 @@@ static int max_interrupt_work = 5
  
  /*
   * Optimization can be either throuput mode or cpu mode
 - * 
 + *
   * Throughput Mode: Every tx and rx packet will generate an interrupt.
   * CPU Mode: Interrupts are controlled by a timer.
   */
 -#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
 -#define NV_OPTIMIZATION_MODE_CPU        1
 +enum {
 +      NV_OPTIMIZATION_MODE_THROUGHPUT,
 +      NV_OPTIMIZATION_MODE_CPU
 +};
  static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
  
  /*
  static int poll_interval = -1;
  
  /*
 - * Disable MSI interrupts
 + * MSI interrupts
 + */
 +enum {
 +      NV_MSI_INT_DISABLED,
 +      NV_MSI_INT_ENABLED
 +};
 +static int msi = NV_MSI_INT_ENABLED;
 +
 +/*
 + * MSIX interrupts
   */
 -static int disable_msi = 0;
 +enum {
 +      NV_MSIX_INT_DISABLED,
 +      NV_MSIX_INT_ENABLED
 +};
 +static int msix = NV_MSIX_INT_ENABLED;
  
  /*
 - * Disable MSIX interrupts
 + * DMA 64bit
   */
 -static int disable_msix = 0;
 +enum {
 +      NV_DMA_64BIT_DISABLED,
 +      NV_DMA_64BIT_ENABLED
 +};
 +static int dma_64bit = NV_DMA_64BIT_ENABLED;
  
  static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  {
@@@ -862,7 -697,7 +862,7 @@@ static void setup_hw_rings(struct net_d
                        writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
                }
                if (rxtx_flags & NV_SETUP_TX_RING) {
 -                      writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
 +                      writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
                }
        } else {
                if (rxtx_flags & NV_SETUP_RX_RING) {
                        writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
                }
                if (rxtx_flags & NV_SETUP_TX_RING) {
 -                      writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
 -                      writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
 +                      writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
 +                      writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
                }
        }
  }
  
 +static void free_rings(struct net_device *dev)
 +{
 +      struct fe_priv *np = get_nvpriv(dev);
 +
 +      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 +              if(np->rx_ring.orig)
 +                      pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
 +                                          np->rx_ring.orig, np->ring_addr);
 +      } else {
 +              if (np->rx_ring.ex)
 +                      pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
 +                                          np->rx_ring.ex, np->ring_addr);
 +      }
 +      if (np->rx_skbuff)
 +              kfree(np->rx_skbuff);
 +      if (np->rx_dma)
 +              kfree(np->rx_dma);
 +      if (np->tx_skbuff)
 +              kfree(np->tx_skbuff);
 +      if (np->tx_dma)
 +              kfree(np->tx_dma);
 +      if (np->tx_dma_len)
 +              kfree(np->tx_dma_len);
 +}
 +
  static int using_multi_irqs(struct net_device *dev)
  {
        struct fe_priv *np = get_nvpriv(dev);
@@@ -1050,7 -860,7 +1050,7 @@@ static int phy_init(struct net_device *
  
        /* set advertise register */
        reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 -      reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
 +      reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
        if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
                printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
                return PHY_ERROR;
        mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
        if (mii_status & PHY_GIGABIT) {
                np->gigabit = PHY_GIGABIT;
 -              mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
 +              mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
                mii_control_1000 &= ~ADVERTISE_1000HALF;
                if (phyinterface & PHY_RGMII)
                        mii_control_1000 |= ADVERTISE_1000FULL;
                else
                        mii_control_1000 &= ~ADVERTISE_1000FULL;
  
 -              if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
 +              if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
                        printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
                        return PHY_ERROR;
                }
                        return PHY_ERROR;
                }
        }
 +      /* some phys clear out pause advertisment on reset, set it back */
 +      mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  
        /* restart auto negotiation */
        mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@@ -1239,7 -1047,7 +1239,7 @@@ static int nv_alloc_rx(struct net_devic
        while (np->cur_rx != refill_rx) {
                struct sk_buff *skb;
  
 -              nr = refill_rx % RX_RING;
 +              nr = refill_rx % np->rx_ring_size;
                if (np->rx_skbuff[nr] == NULL) {
  
                        skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
                refill_rx++;
        }
        np->refill_rx = refill_rx;
 -      if (np->cur_rx - refill_rx == RX_RING)
 +      if (np->cur_rx - refill_rx == np->rx_ring_size)
                return 1;
        return 0;
  }
@@@ -1302,14 -1110,14 +1302,14 @@@ static void nv_do_rx_refill(unsigned lo
        }
  }
  
 -static void nv_init_rx(struct net_device *dev) 
 +static void nv_init_rx(struct net_device *dev)
  {
        struct fe_priv *np = netdev_priv(dev);
        int i;
  
 -      np->cur_rx = RX_RING;
 +      np->cur_rx = np->rx_ring_size;
        np->refill_rx = 0;
 -      for (i = 0; i < RX_RING; i++)
 +      for (i = 0; i < np->rx_ring_size; i++)
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        np->rx_ring.orig[i].FlagLen = 0;
                else
@@@ -1322,7 -1130,7 +1322,7 @@@ static void nv_init_tx(struct net_devic
        int i;
  
        np->next_tx = np->nic_tx = 0;
 -      for (i = 0; i < TX_RING; i++) {
 +      for (i = 0; i < np->tx_ring_size; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        np->tx_ring.orig[i].FlagLen = 0;
                else
@@@ -1366,8 -1174,8 +1366,8 @@@ static void nv_drain_tx(struct net_devi
  {
        struct fe_priv *np = netdev_priv(dev);
        unsigned int i;
 -      
 -      for (i = 0; i < TX_RING; i++) {
 +
 +      for (i = 0; i < np->tx_ring_size; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        np->tx_ring.orig[i].FlagLen = 0;
                else
@@@ -1381,7 -1189,7 +1381,7 @@@ static void nv_drain_rx(struct net_devi
  {
        struct fe_priv *np = netdev_priv(dev);
        int i;
 -      for (i = 0; i < RX_RING; i++) {
 +      for (i = 0; i < np->rx_ring_size; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        np->rx_ring.orig[i].FlagLen = 0;
                else
@@@ -1405,7 -1213,7 +1405,7 @@@ static void drain_ring(struct net_devic
  
  /*
   * nv_start_xmit: dev->hard_start_xmit function
-  * Called with dev->xmit_lock held.
+  * Called with netif_tx_lock held.
   */
  static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
        u32 tx_flags = 0;
        u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
        unsigned int fragments = skb_shinfo(skb)->nr_frags;
 -      unsigned int nr = (np->next_tx - 1) % TX_RING;
 -      unsigned int start_nr = np->next_tx % TX_RING;
 +      unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
 +      unsigned int start_nr = np->next_tx % np->tx_ring_size;
        unsigned int i;
        u32 offset = 0;
        u32 bcnt;
  
        spin_lock_irq(&np->lock);
  
 -      if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
 +      if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
                spin_unlock_irq(&np->lock);
                netif_stop_queue(dev);
                return NETDEV_TX_BUSY;
        /* setup the header buffer */
        do {
                bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
 -              nr = (nr + 1) % TX_RING;
 +              nr = (nr + 1) % np->tx_ring_size;
  
                np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
                                                PCI_DMA_TODEVICE);
  
                do {
                        bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
 -                      nr = (nr + 1) % TX_RING;
 +                      nr = (nr + 1) % np->tx_ring_size;
  
                        np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
                                                      PCI_DMA_TODEVICE);
        } else {
                np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
                np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
 -      }       
 +      }
  
        dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
                dev->name, np->next_tx, entries, tx_flags_extra);
@@@ -1548,7 -1356,7 +1548,7 @@@ static void nv_tx_done(struct net_devic
        struct sk_buff *skb;
  
        while (np->nic_tx != np->next_tx) {
 -              i = np->nic_tx % TX_RING;
 +              i = np->nic_tx % np->tx_ring_size;
  
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
                                } else {
                                        np->stats.tx_packets++;
                                        np->stats.tx_bytes += skb->len;
 -                              }                               
 +                              }
                        }
                }
                nv_release_txskb(dev, i);
                np->nic_tx++;
        }
 -      if (np->next_tx - np->nic_tx < TX_LIMIT_START)
 +      if (np->next_tx - np->nic_tx < np->tx_limit_start)
                netif_wake_queue(dev);
  }
  
  /*
   * nv_tx_timeout: dev->tx_timeout function
-  * Called with dev->xmit_lock held.
+  * Called with netif_tx_lock held.
   */
  static void nv_tx_timeout(struct net_device *dev)
  {
                                        readl(base + i + 24), readl(base + i + 28));
                }
                printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
 -              for (i=0;i<TX_RING;i+= 4) {
 +              for (i=0;i<np->tx_ring_size;i+= 4) {
                        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                                printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
 -                                     i, 
 +                                     i,
                                       le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
                                       le32_to_cpu(np->tx_ring.orig[i].FlagLen),
                                       le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
                                       le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
                        } else {
                                printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
 -                                     i, 
 +                                     i,
                                       le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
                                       le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
                                       le32_to_cpu(np->tx_ring.ex[i].FlagLen),
@@@ -1742,14 -1550,15 +1742,14 @@@ static void nv_rx_process(struct net_de
        u32 Flags;
        u32 vlanflags = 0;
  
 -
        for (;;) {
                struct sk_buff *skb;
                int len;
                int i;
 -              if (np->cur_rx - np->refill_rx >= RX_RING)
 +              if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
                        break;  /* we scanned the whole ring - do not continue */
  
 -              i = np->cur_rx % RX_RING;
 +              i = np->cur_rx % np->rx_ring_size;
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                        Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
                        len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
                                        }
                                }
                        }
 -                      Flags &= NV_RX2_CHECKSUMMASK;
 -                      if (Flags == NV_RX2_CHECKSUMOK1 ||
 -                                      Flags == NV_RX2_CHECKSUMOK2 ||
 -                                      Flags == NV_RX2_CHECKSUMOK3) {
 -                              dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
 -                              np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
 -                      } else {
 -                              dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
 +                      if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
 +                              Flags &= NV_RX2_CHECKSUMMASK;
 +                              if (Flags == NV_RX2_CHECKSUMOK1 ||
 +                                  Flags == NV_RX2_CHECKSUMOK2 ||
 +                                  Flags == NV_RX2_CHECKSUMOK3) {
 +                                      dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
 +                                      np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
 +                              } else {
 +                                      dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
 +                              }
                        }
                }
                /* got a valid packet - forward it to the network core */
@@@ -1930,7 -1737,7 +1930,7 @@@ static int nv_change_mtu(struct net_dev
                 * Changing the MTU is a rare event, it shouldn't matter.
                 */
                nv_disable_irq(dev);
-               spin_lock_bh(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                spin_lock(&np->lock);
                /* stop engines */
                nv_stop_rx(dev);
                nv_drain_rx(dev);
                nv_drain_tx(dev);
                /* reinit driver view of the rx queue */
 -              nv_init_rx(dev);
 -              nv_init_tx(dev);
 -              /* alloc new rx buffers */
                set_bufsize(dev);
 -              if (nv_alloc_rx(dev)) {
 +              if (nv_init_ring(dev)) {
                        if (!np->in_shutdown)
                                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
                }
                /* reinit nic view of the rx queue */
                writel(np->rx_buf_sz, base + NvRegOffloadConfig);
                setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
 -              writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
 +              writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                        base + NvRegRingSizes);
                pci_push(base);
                writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
                nv_start_rx(dev);
                nv_start_tx(dev);
                spin_unlock(&np->lock);
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
                nv_enable_irq(dev);
        }
        return 0;
@@@ -1993,7 -1803,7 +1993,7 @@@ static int nv_set_mac_address(struct ne
        memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  
        if (netif_running(dev)) {
-               spin_lock_bh(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                spin_lock_irq(&np->lock);
  
                /* stop rx engine */
                /* restart rx engine */
                nv_start_rx(dev);
                spin_unlock_irq(&np->lock);
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
        } else {
                nv_copy_mac_to_hw(dev);
        }
  
  /*
   * nv_set_multicast: dev->set_multicast function
-  * Called with dev->xmit_lock held.
+  * Called with netif_tx_lock held.
   */
  static void nv_set_multicast(struct net_device *dev)
  {
        u8 __iomem *base = get_hwbase(dev);
        u32 addr[2];
        u32 mask[2];
 -      u32 pff;
 +      u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
  
        memset(addr, 0, sizeof(addr));
        memset(mask, 0, sizeof(mask));
  
        if (dev->flags & IFF_PROMISC) {
                printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
 -              pff = NVREG_PFF_PROMISC;
 +              pff |= NVREG_PFF_PROMISC;
        } else {
 -              pff = NVREG_PFF_MYADDR;
 +              pff |= NVREG_PFF_MYADDR;
  
                if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
                        u32 alwaysOff[2];
        spin_unlock_irq(&np->lock);
  }
  
 +void nv_update_pause(struct net_device *dev, u32 pause_flags)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +
 +      np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
 +
 +      if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
 +              u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
 +              if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
 +                      writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
 +                      np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
 +              } else {
 +                      writel(pff, base + NvRegPacketFilterFlags);
 +              }
 +      }
 +      if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
 +              u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
 +              if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
 +                      writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
 +                      writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
 +                      np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 +              } else {
 +                      writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
 +                      writel(regmisc, base + NvRegMisc1);
 +              }
 +      }
 +}
 +
  /**
   * nv_update_linkspeed: Setup the MAC according to the link partner
   * @dev: Network device to be configured
@@@ -2120,14 -1901,12 +2120,14 @@@ static int nv_update_linkspeed(struct n
  {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 -      int adv, lpa;
 +      int adv = 0;
 +      int lpa = 0;
 +      int adv_lpa, adv_pause, lpa_pause;
        int newls = np->linkspeed;
        int newdup = np->duplex;
        int mii_status;
        int retval = 0;
 -      u32 control_1000, status_1000, phyreg;
 +      u32 control_1000, status_1000, phyreg, pause_flags;
  
        /* BMSR_LSTATUS is latched, read it twice:
         * we want the current value.
                goto set_speed;
        }
  
 +      adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 +      lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
 +      dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
 +                              dev->name, adv, lpa);
 +
        retval = 1;
        if (np->gigabit == PHY_GIGABIT) {
 -              control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
 -              status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
 +              control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 +              status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  
                if ((control_1000 & ADVERTISE_1000FULL) &&
                        (status_1000 & LPA_1000FULL)) {
                }
        }
  
 -      adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 -      lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
 -      dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
 -                              dev->name, adv, lpa);
 -
        /* FIXME: handle parallel detection properly */
 -      lpa = lpa & adv;
 -      if (lpa & LPA_100FULL) {
 +      adv_lpa = lpa & adv;
 +      if (adv_lpa & LPA_100FULL) {
                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
                newdup = 1;
 -      } else if (lpa & LPA_100HALF) {
 +      } else if (adv_lpa & LPA_100HALF) {
                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
                newdup = 0;
 -      } else if (lpa & LPA_10FULL) {
 +      } else if (adv_lpa & LPA_10FULL) {
                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
                newdup = 1;
 -      } else if (lpa & LPA_10HALF) {
 +      } else if (adv_lpa & LPA_10HALF) {
                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
                newdup = 0;
        } else {
 -              dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
 +              dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
                newdup = 0;
        }
@@@ -2251,46 -2030,6 +2251,46 @@@ set_speed
        writel(np->linkspeed, base + NvRegLinkSpeed);
        pci_push(base);
  
 +      pause_flags = 0;
 +      /* setup pause frame */
 +      if (np->duplex != 0) {
 +              if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
 +                      adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
 +                      lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
 +
 +                      switch (adv_pause) {
 +                      case (ADVERTISE_PAUSE_CAP):
 +                              if (lpa_pause & LPA_PAUSE_CAP) {
 +                                      pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
 +                                      if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
 +                                              pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 +                              }
 +                              break;
 +                      case (ADVERTISE_PAUSE_ASYM):
 +                              if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
 +                              {
 +                                      pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 +                              }
 +                              break;
 +                      case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
 +                              if (lpa_pause & LPA_PAUSE_CAP)
 +                              {
 +                                      pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
 +                                      if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
 +                                              pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 +                              }
 +                              if (lpa_pause == LPA_PAUSE_ASYM)
 +                              {
 +                                      pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
 +                              }
 +                              break;
 +                      }
 +              } else {
 +                      pause_flags = np->pause_flags;
 +              }
 +      }
 +      nv_update_pause(dev, pause_flags);
 +
        return retval;
  }
  
@@@ -2351,7 -2090,7 +2351,7 @@@ static irqreturn_t nv_nic_irq(int foo, 
                spin_lock(&np->lock);
                nv_tx_done(dev);
                spin_unlock(&np->lock);
 -              
 +
                nv_rx_process(dev);
                if (nv_alloc_rx(dev)) {
                        spin_lock(&np->lock);
                                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
                        spin_unlock(&np->lock);
                }
 -              
 +
                if (events & NVREG_IRQ_LINK) {
                        spin_lock(&np->lock);
                        nv_link_irq(dev);
@@@ -2424,7 -2163,7 +2424,7 @@@ static irqreturn_t nv_nic_irq_tx(int fo
                spin_lock_irq(&np->lock);
                nv_tx_done(dev);
                spin_unlock_irq(&np->lock);
 -              
 +
                if (events & (NVREG_IRQ_TX_ERR)) {
                        dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
                                                dev->name, events);
@@@ -2467,7 -2206,7 +2467,7 @@@ static irqreturn_t nv_nic_irq_rx(int fo
                dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
 -              
 +
                nv_rx_process(dev);
                if (nv_alloc_rx(dev)) {
                        spin_lock_irq(&np->lock);
                                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
                        spin_unlock_irq(&np->lock);
                }
 -              
 +
                if (i > max_interrupt_work) {
                        spin_lock_irq(&np->lock);
                        /* disable interrupts on the nic */
@@@ -2514,7 -2253,7 +2514,7 @@@ static irqreturn_t nv_nic_irq_other(in
                dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
                if (!(events & np->irqmask))
                        break;
 -              
 +
                if (events & NVREG_IRQ_LINK) {
                        spin_lock_irq(&np->lock);
                        nv_link_irq(dev);
        return IRQ_RETVAL(i);
  }
  
 +static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
 +{
 +      struct net_device *dev = (struct net_device *) data;
 +      struct fe_priv *np = netdev_priv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +      u32 events;
 +
 +      dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
 +
 +      if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
 +              events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
 +              writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
 +      } else {
 +              events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
 +              writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
 +      }
 +      pci_push(base);
 +      dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
 +      if (!(events & NVREG_IRQ_TIMER))
 +              return IRQ_RETVAL(0);
 +
 +      spin_lock(&np->lock);
 +      np->intr_test = 1;
 +      spin_unlock(&np->lock);
 +
 +      dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
 +
 +      return IRQ_RETVAL(1);
 +}
 +
 +static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
 +{
 +      u8 __iomem *base = get_hwbase(dev);
 +      int i;
 +      u32 msixmap = 0;
 +
 +      /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
 +       * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
 +       * the remaining 8 interrupts.
 +       */
 +      for (i = 0; i < 8; i++) {
 +              if ((irqmask >> i) & 0x1) {
 +                      msixmap |= vector << (i << 2);
 +              }
 +      }
 +      writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
 +
 +      msixmap = 0;
 +      for (i = 0; i < 8; i++) {
 +              if ((irqmask >> (i + 8)) & 0x1) {
 +                      msixmap |= vector << (i << 2);
 +              }
 +      }
 +      writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
 +}
 +
 +static int nv_request_irq(struct net_device *dev, int intr_test)
 +{
 +      struct fe_priv *np = get_nvpriv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +      int ret = 1;
 +      int i;
 +
 +      if (np->msi_flags & NV_MSI_X_CAPABLE) {
 +              for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
 +                      np->msi_x_entry[i].entry = i;
 +              }
 +              if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
 +                      np->msi_flags |= NV_MSI_X_ENABLED;
 +                      if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
 +                              /* Request irq for rx handling */
 +                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
 +                                      printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
 +                                      pci_disable_msix(np->pci_dev);
 +                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 +                                      goto out_err;
 +                              }
 +                              /* Request irq for tx handling */
 +                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
 +                                      printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
 +                                      pci_disable_msix(np->pci_dev);
 +                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 +                                      goto out_free_rx;
 +                              }
 +                              /* Request irq for link and timer handling */
 +                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
 +                                      printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
 +                                      pci_disable_msix(np->pci_dev);
 +                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 +                                      goto out_free_tx;
 +                              }
 +                              /* map interrupts to their respective vector */
 +                              writel(0, base + NvRegMSIXMap0);
 +                              writel(0, base + NvRegMSIXMap1);
 +                              set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
 +                              set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
 +                              set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
 +                      } else {
 +                              /* Request irq for all interrupts */
 +                              if ((!intr_test &&
 +                                   request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
 +                                  (intr_test &&
 +                                   request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
 +                                      printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
 +                                      pci_disable_msix(np->pci_dev);
 +                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 +                                      goto out_err;
 +                              }
 +
 +                              /* map interrupts to vector 0 */
 +                              writel(0, base + NvRegMSIXMap0);
 +                              writel(0, base + NvRegMSIXMap1);
 +                      }
 +              }
 +      }
 +      if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
 +              if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
 +                      np->msi_flags |= NV_MSI_ENABLED;
 +                      if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
 +                          (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
 +                              printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
 +                              pci_disable_msi(np->pci_dev);
 +                              np->msi_flags &= ~NV_MSI_ENABLED;
 +                              goto out_err;
 +                      }
 +
 +                      /* map interrupts to vector 0 */
 +                      writel(0, base + NvRegMSIMap0);
 +                      writel(0, base + NvRegMSIMap1);
 +                      /* enable msi vector 0 */
 +                      writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
 +              }
 +      }
 +      if (ret != 0) {
 +              if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
 +                  (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0))
 +                      goto out_err;
 +
 +      }
 +
 +      return 0;
 +out_free_tx:
 +      free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
 +out_free_rx:
 +      free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
 +out_err:
 +      return 1;
 +}
 +
 +static void nv_free_irq(struct net_device *dev)
 +{
 +      struct fe_priv *np = get_nvpriv(dev);
 +      int i;
 +
 +      if (np->msi_flags & NV_MSI_X_ENABLED) {
 +              for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
 +                      free_irq(np->msi_x_entry[i].vector, dev);
 +              }
 +              pci_disable_msix(np->pci_dev);
 +              np->msi_flags &= ~NV_MSI_X_ENABLED;
 +      } else {
 +              free_irq(np->pci_dev->irq, dev);
 +              if (np->msi_flags & NV_MSI_ENABLED) {
 +                      pci_disable_msi(np->pci_dev);
 +                      np->msi_flags &= ~NV_MSI_ENABLED;
 +              }
 +      }
 +}
 +
  static void nv_do_nic_poll(unsigned long data)
  {
        struct net_device *dev = (struct net_device *) data;
        np->nic_poll_irq = 0;
  
        /* FIXME: Do we need synchronize_irq(dev->irq) here? */
 -      
 +
        writel(mask, base + NvRegIrqMask);
        pci_push(base);
  
@@@ -2789,56 -2359,6 +2789,56 @@@ static void nv_poll_controller(struct n
  }
  #endif
  
 +static void nv_do_stats_poll(unsigned long data)
 +{
 +      struct net_device *dev = (struct net_device *) data;
 +      struct fe_priv *np = netdev_priv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +
 +      np->estats.tx_bytes += readl(base + NvRegTxCnt);
 +      np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
 +      np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
 +      np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
 +      np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
 +      np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
 +      np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
 +      np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
 +      np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
 +      np->estats.tx_deferral += readl(base + NvRegTxDef);
 +      np->estats.tx_packets += readl(base + NvRegTxFrame);
 +      np->estats.tx_pause += readl(base + NvRegTxPause);
 +      np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
 +      np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
 +      np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
 +      np->estats.rx_runt += readl(base + NvRegRxRunt);
 +      np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
 +      np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
 +      np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
 +      np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
 +      np->estats.rx_length_error += readl(base + NvRegRxLenErr);
 +      np->estats.rx_unicast += readl(base + NvRegRxUnicast);
 +      np->estats.rx_multicast += readl(base + NvRegRxMulticast);
 +      np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
 +      np->estats.rx_bytes += readl(base + NvRegRxCnt);
 +      np->estats.rx_pause += readl(base + NvRegRxPause);
 +      np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
 +      np->estats.rx_packets =
 +              np->estats.rx_unicast +
 +              np->estats.rx_multicast +
 +              np->estats.rx_broadcast;
 +      np->estats.rx_errors_total =
 +              np->estats.rx_crc_errors +
 +              np->estats.rx_over_errors +
 +              np->estats.rx_frame_error +
 +              (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
 +              np->estats.rx_late_collision +
 +              np->estats.rx_runt +
 +              np->estats.rx_frame_too_long;
 +
 +      if (!np->in_shutdown)
 +              mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
 +}
 +
  static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  {
        struct fe_priv *np = netdev_priv(dev);
@@@ -2862,19 -2382,17 +2862,19 @@@ static int nv_set_wol(struct net_devic
  {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 +      u32 flags = 0;
  
 -      spin_lock_irq(&np->lock);
        if (wolinfo->wolopts == 0) {
 -              writel(0, base + NvRegWakeUpFlags);
                np->wolenabled = 0;
 -      }
 -      if (wolinfo->wolopts & WAKE_MAGIC) {
 -              writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
 +      } else if (wolinfo->wolopts & WAKE_MAGIC) {
                np->wolenabled = 1;
 +              flags = NVREG_WAKEUPFLAGS_ENABLE;
 +      }
 +      if (netif_running(dev)) {
 +              spin_lock_irq(&np->lock);
 +              writel(flags, base + NvRegWakeUpFlags);
 +              spin_unlock_irq(&np->lock);
        }
 -      spin_unlock_irq(&np->lock);
        return 0;
  }
  
@@@ -2888,17 -2406,9 +2888,17 @@@ static int nv_get_settings(struct net_d
        if (!netif_running(dev)) {
                /* We do not track link speed / duplex setting if the
                 * interface is disabled. Force a link check */
 -              nv_update_linkspeed(dev);
 +              if (nv_update_linkspeed(dev)) {
 +                      if (!netif_carrier_ok(dev))
 +                              netif_carrier_on(dev);
 +              } else {
 +                      if (netif_carrier_ok(dev))
 +                              netif_carrier_off(dev);
 +              }
        }
 -      switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
 +
 +      if (netif_carrier_ok(dev)) {
 +              switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
                case NVREG_LINKSPEED_10:
                        ecmd->speed = SPEED_10;
                        break;
                case NVREG_LINKSPEED_1000:
                        ecmd->speed = SPEED_1000;
                        break;
 +              }
 +              ecmd->duplex = DUPLEX_HALF;
 +              if (np->duplex)
 +                      ecmd->duplex = DUPLEX_FULL;
 +      } else {
 +              ecmd->speed = -1;
 +              ecmd->duplex = -1;
        }
 -      ecmd->duplex = DUPLEX_HALF;
 -      if (np->duplex)
 -              ecmd->duplex = DUPLEX_FULL;
  
        ecmd->autoneg = np->autoneg;
  
        if (np->autoneg) {
                ecmd->advertising |= ADVERTISED_Autoneg;
                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 -      } else {
 -              adv = np->fixed_mode;
 -      }
 -      if (adv & ADVERTISE_10HALF)
 -              ecmd->advertising |= ADVERTISED_10baseT_Half;
 -      if (adv & ADVERTISE_10FULL)
 -              ecmd->advertising |= ADVERTISED_10baseT_Full;
 -      if (adv & ADVERTISE_100HALF)
 -              ecmd->advertising |= ADVERTISED_100baseT_Half;
 -      if (adv & ADVERTISE_100FULL)
 -              ecmd->advertising |= ADVERTISED_100baseT_Full;
 -      if (np->autoneg && np->gigabit == PHY_GIGABIT) {
 -              adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
 -              if (adv & ADVERTISE_1000FULL)
 -                      ecmd->advertising |= ADVERTISED_1000baseT_Full;
 +              if (adv & ADVERTISE_10HALF)
 +                      ecmd->advertising |= ADVERTISED_10baseT_Half;
 +              if (adv & ADVERTISE_10FULL)
 +                      ecmd->advertising |= ADVERTISED_10baseT_Full;
 +              if (adv & ADVERTISE_100HALF)
 +                      ecmd->advertising |= ADVERTISED_100baseT_Half;
 +              if (adv & ADVERTISE_100FULL)
 +                      ecmd->advertising |= ADVERTISED_100baseT_Full;
 +              if (np->gigabit == PHY_GIGABIT) {
 +                      adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 +                      if (adv & ADVERTISE_1000FULL)
 +                              ecmd->advertising |= ADVERTISED_1000baseT_Full;
 +              }
        }
 -
        ecmd->supported = (SUPPORTED_Autoneg |
                SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
@@@ -2988,26 -2497,15 +2988,26 @@@ static int nv_set_settings(struct net_d
                return -EINVAL;
        }
  
 -      spin_lock_irq(&np->lock);
 -      if (ecmd->autoneg == AUTONEG_ENABLE) {
 +      netif_carrier_off(dev);
 +      if (netif_running(dev)) {
 +              nv_disable_irq(dev);
 +              spin_lock_bh(&dev->xmit_lock);
 +              spin_lock(&np->lock);
 +              /* stop engines */
 +              nv_stop_rx(dev);
 +              nv_stop_tx(dev);
 +              spin_unlock(&np->lock);
 +              spin_unlock_bh(&dev->xmit_lock);
 +      }
 +
 +      if (ecmd->autoneg == AUTONEG_ENABLE) {
                int adv, bmcr;
  
                np->autoneg = 1;
  
                /* advertise only what has been requested */
                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 -              adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
 +              adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
                if (ecmd->advertising & ADVERTISED_10baseT_Half)
                        adv |= ADVERTISE_10HALF;
                if (ecmd->advertising & ADVERTISED_10baseT_Full)
                        adv |= ADVERTISE_100HALF;
                if (ecmd->advertising & ADVERTISED_100baseT_Full)
                        adv |= ADVERTISE_100FULL;
 +              if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisments but disable tx pause */
 +                      adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 +              if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
 +                      adv |=  ADVERTISE_PAUSE_ASYM;
                mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  
                if (np->gigabit == PHY_GIGABIT) {
 -                      adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
 +                      adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
                        adv &= ~ADVERTISE_1000FULL;
                        if (ecmd->advertising & ADVERTISED_1000baseT_Full)
                                adv |= ADVERTISE_1000FULL;
 -                      mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
 +                      mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
                }
  
 +              if (netif_running(dev))
 +                      printk(KERN_INFO "%s: link down.\n", dev->name);
                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
                bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
                mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
                np->autoneg = 0;
  
                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 -              adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
 +              adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
                if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
                        adv |= ADVERTISE_10HALF;
                if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
                        adv |= ADVERTISE_100HALF;
                if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
                        adv |= ADVERTISE_100FULL;
 +              np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
 +              if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
 +                      adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 +                      np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
 +              }
 +              if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
 +                      adv |=  ADVERTISE_PAUSE_ASYM;
 +                      np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 +              }
                mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
                np->fixed_mode = adv;
  
                if (np->gigabit == PHY_GIGABIT) {
 -                      adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
 +                      adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
                        adv &= ~ADVERTISE_1000FULL;
 -                      mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
 +                      mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
                }
  
                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
 -              bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
 -              if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
 +              bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
 +              if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
                        bmcr |= BMCR_FULLDPLX;
 -              if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
 +              if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
                        bmcr |= BMCR_SPEED100;
                mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
 -
 -              if (netif_running(dev)) {
 +              if (np->phy_oui == PHY_OUI_MARVELL) {
 +                      /* reset the phy */
 +                      if (phy_reset(dev)) {
 +                              printk(KERN_INFO "%s: phy reset failed\n", dev->name);
 +                              return -EINVAL;
 +                      }
 +              } else if (netif_running(dev)) {
                        /* Wait a bit and then reconfigure the nic. */
                        udelay(10);
                        nv_linkchange(dev);
                }
        }
 -      spin_unlock_irq(&np->lock);
 +
 +      if (netif_running(dev)) {
 +              nv_start_rx(dev);
 +              nv_start_tx(dev);
 +              nv_enable_irq(dev);
 +      }
  
        return 0;
  }
@@@ -3125,39 -2598,24 +3125,39 @@@ static int nv_nway_reset(struct net_dev
        struct fe_priv *np = netdev_priv(dev);
        int ret;
  
 -      spin_lock_irq(&np->lock);
        if (np->autoneg) {
                int bmcr;
  
 +              netif_carrier_off(dev);
 +              if (netif_running(dev)) {
 +                      nv_disable_irq(dev);
 +                      spin_lock_bh(&dev->xmit_lock);
 +                      spin_lock(&np->lock);
 +                      /* stop engines */
 +                      nv_stop_rx(dev);
 +                      nv_stop_tx(dev);
 +                      spin_unlock(&np->lock);
 +                      spin_unlock_bh(&dev->xmit_lock);
 +                      printk(KERN_INFO "%s: link down.\n", dev->name);
 +              }
 +
                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
                bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
                mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  
 +              if (netif_running(dev)) {
 +                      nv_start_rx(dev);
 +                      nv_start_tx(dev);
 +                      nv_enable_irq(dev);
 +              }
                ret = 0;
        } else {
                ret = -EINVAL;
        }
 -      spin_unlock_irq(&np->lock);
  
        return ret;
  }
  
 -#ifdef NETIF_F_TSO
  static int nv_set_tso(struct net_device *dev, u32 value)
  {
        struct fe_priv *np = netdev_priv(dev);
        if ((np->driver_data & DEV_HAS_CHECKSUM))
                return ethtool_op_set_tso(dev, value);
        else
 -              return value ? -EOPNOTSUPP : 0;
 +              return -EOPNOTSUPP;
  }
 -#endif
  
 -static struct ethtool_ops ops = {
 -      .get_drvinfo = nv_get_drvinfo,
 -      .get_link = ethtool_op_get_link,
 -      .get_wol = nv_get_wol,
 -      .set_wol = nv_set_wol,
 -      .get_settings = nv_get_settings,
 -      .set_settings = nv_set_settings,
 -      .get_regs_len = nv_get_regs_len,
 -      .get_regs = nv_get_regs,
 -      .nway_reset = nv_nway_reset,
 -      .get_perm_addr = ethtool_op_get_perm_addr,
 -#ifdef NETIF_F_TSO
 -      .get_tso = ethtool_op_get_tso,
 -      .set_tso = nv_set_tso
 -#endif
 -};
 +static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
  
 -static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 +      ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
 +      ring->rx_mini_max_pending = 0;
 +      ring->rx_jumbo_max_pending = 0;
 +      ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
 +
 +      ring->rx_pending = np->rx_ring_size;
 +      ring->rx_mini_pending = 0;
 +      ring->rx_jumbo_pending = 0;
 +      ring->tx_pending = np->tx_ring_size;
 +}
 +
 +static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  {
 -      struct fe_priv *np = get_nvpriv(dev);
 +      struct fe_priv *np = netdev_priv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +      u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
 +      dma_addr_t ring_addr;
  
 -      spin_lock_irq(&np->lock);
 +      if (ring->rx_pending < RX_RING_MIN ||
 +          ring->tx_pending < TX_RING_MIN ||
 +          ring->rx_mini_pending != 0 ||
 +          ring->rx_jumbo_pending != 0 ||
 +          (np->desc_ver == DESC_VER_1 &&
 +           (ring->rx_pending > RING_MAX_DESC_VER_1 ||
 +            ring->tx_pending > RING_MAX_DESC_VER_1)) ||
 +          (np->desc_ver != DESC_VER_1 &&
 +           (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
 +            ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
 +              return -EINVAL;
 +      }
  
 -      /* save vlan group */
 -      np->vlangrp = grp;
 +      /* allocate new rings */
 +      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 +              rxtx_ring = pci_alloc_consistent(np->pci_dev,
 +                                          sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
 +                                          &ring_addr);
 +      } else {
 +              rxtx_ring = pci_alloc_consistent(np->pci_dev,
 +                                          sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
 +                                          &ring_addr);
 +      }
 +      rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
 +      rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
 +      tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
 +      tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
 +      tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
 +      if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
 +              /* fall back to old rings */
 +              if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 +                      if(rxtx_ring)
 +                              pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
 +                                                  rxtx_ring, ring_addr);
 +              } else {
 +                      if (rxtx_ring)
 +                              pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
 +                                                  rxtx_ring, ring_addr);
 +              }
 +              if (rx_skbuff)
 +                      kfree(rx_skbuff);
 +              if (rx_dma)
 +                      kfree(rx_dma);
 +              if (tx_skbuff)
 +                      kfree(tx_skbuff);
 +              if (tx_dma)
 +                      kfree(tx_dma);
 +              if (tx_dma_len)
 +                      kfree(tx_dma_len);
 +              goto exit;
 +      }
  
 -      if (grp) {
 -              /* enable vlan on MAC */
 -              np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
 +      if (netif_running(dev)) {
 +              nv_disable_irq(dev);
 +              spin_lock_bh(&dev->xmit_lock);
 +              spin_lock(&np->lock);
 +              /* stop engines */
 +              nv_stop_rx(dev);
 +              nv_stop_tx(dev);
 +              nv_txrx_reset(dev);
 +              /* drain queues */
 +              nv_drain_rx(dev);
 +              nv_drain_tx(dev);
 +              /* delete queues */
 +              free_rings(dev);
 +      }
 +
 +      /* set new values */
 +      np->rx_ring_size = ring->rx_pending;
 +      np->tx_ring_size = ring->tx_pending;
 +      np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
 +      np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
 +      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 +              np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
 +              np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
        } else {
 -              /* disable vlan on MAC */
 -              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
 -              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
 +              np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
 +              np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
        }
 +      np->rx_skbuff = (struct sk_buff**)rx_skbuff;
 +      np->rx_dma = (dma_addr_t*)rx_dma;
 +      np->tx_skbuff = (struct sk_buff**)tx_skbuff;
 +      np->tx_dma = (dma_addr_t*)tx_dma;
 +      np->tx_dma_len = (unsigned int*)tx_dma_len;
 +      np->ring_addr = ring_addr;
 +
 +      memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
 +      memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
 +      memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
 +      memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
 +      memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  
 -      writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 +      if (netif_running(dev)) {
 +              /* reinit driver view of the queues */
 +              set_bufsize(dev);
 +              if (nv_init_ring(dev)) {
 +                      if (!np->in_shutdown)
 +                              mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 +              }
  
 -      spin_unlock_irq(&np->lock);
 -};
 +              /* reinit nic view of the queues */
 +              writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 +              setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
 +              writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 +                      base + NvRegRingSizes);
 +              pci_push(base);
 +              writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 +              pci_push(base);
  
 -static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 +              /* restart engines */
 +              nv_start_rx(dev);
 +              nv_start_tx(dev);
 +              spin_unlock(&np->lock);
 +              spin_unlock_bh(&dev->xmit_lock);
 +              nv_enable_irq(dev);
 +      }
 +      return 0;
 +exit:
 +      return -ENOMEM;
 +}
 +
 +static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  {
 -      /* nothing to do */
 -};
 +      struct fe_priv *np = netdev_priv(dev);
  
 -static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
 +      pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
 +      pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
 +      pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
 +}
 +
 +static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +      int adv, bmcr;
 +
 +      if ((!np->autoneg && np->duplex == 0) ||
 +          (np->autoneg && !pause->autoneg && np->duplex == 0)) {
 +              printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
 +                     dev->name);
 +              return -EINVAL;
 +      }
 +      if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
 +              printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
 +              return -EINVAL;
 +      }
 +
 +      netif_carrier_off(dev);
 +      if (netif_running(dev)) {
 +              nv_disable_irq(dev);
 +              spin_lock_bh(&dev->xmit_lock);
 +              spin_lock(&np->lock);
 +              /* stop engines */
 +              nv_stop_rx(dev);
 +              nv_stop_tx(dev);
 +              spin_unlock(&np->lock);
 +              spin_unlock_bh(&dev->xmit_lock);
 +      }
 +
 +      np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
 +      if (pause->rx_pause)
 +              np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
 +      if (pause->tx_pause)
 +              np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
 +
 +      if (np->autoneg && pause->autoneg) {
 +              np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
 +
 +              adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 +              adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 +              if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
 +                      adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 +              if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
 +                      adv |=  ADVERTISE_PAUSE_ASYM;
 +              mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
 +
 +              if (netif_running(dev))
 +                      printk(KERN_INFO "%s: link down.\n", dev->name);
 +              bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
 +              bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 +              mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
 +      } else {
 +              np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
 +              if (pause->rx_pause)
 +                      np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
 +              if (pause->tx_pause)
 +                      np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 +
 +              if (!netif_running(dev))
 +                      nv_update_linkspeed(dev);
 +              else
 +                      nv_update_pause(dev, np->pause_flags);
 +      }
 +
 +      if (netif_running(dev)) {
 +              nv_start_rx(dev);
 +              nv_start_tx(dev);
 +              nv_enable_irq(dev);
 +      }
 +      return 0;
 +}
 +
 +static u32 nv_get_rx_csum(struct net_device *dev)
  {
 +      struct fe_priv *np = netdev_priv(dev);
 +      return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
 +}
 +
 +static int nv_set_rx_csum(struct net_device *dev, u32 data)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 -      int i;
 -      u32 msixmap = 0;
 +      int retcode = 0;
  
 -      /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
 -       * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
 -       * the remaining 8 interrupts.
 -       */
 -      for (i = 0; i < 8; i++) {
 -              if ((irqmask >> i) & 0x1) {
 -                      msixmap |= vector << (i << 2);
 +      if (np->driver_data & DEV_HAS_CHECKSUM) {
 +
 +              if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
 +                  (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
 +                      /* already set or unset */
 +                      return 0;
                }
 -      }
 -      writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
  
 -      msixmap = 0;
 -      for (i = 0; i < 8; i++) {
 -              if ((irqmask >> (i + 8)) & 0x1) {
 -                      msixmap |= vector << (i << 2);
 +              if (data) {
 +                      np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
 +              } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
 +                      np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
 +              } else {
 +                      printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
 +                      return -EINVAL;
 +              }
 +
 +              if (netif_running(dev)) {
 +                      spin_lock_irq(&np->lock);
 +                      writel(np->txrxctl_bits, base + NvRegTxRxControl);
 +                      spin_unlock_irq(&np->lock);
                }
 +      } else {
 +              return -EINVAL;
        }
 -      writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
 +
 +      return retcode;
  }
  
 -static int nv_request_irq(struct net_device *dev)
 +static int nv_set_tx_csum(struct net_device *dev, u32 data)
  {
 -      struct fe_priv *np = get_nvpriv(dev);
 +      struct fe_priv *np = netdev_priv(dev);
 +
 +      if (np->driver_data & DEV_HAS_CHECKSUM)
 +              return ethtool_op_set_tx_hw_csum(dev, data);
 +      else
 +              return -EOPNOTSUPP;
 +}
 +
 +static int nv_set_sg(struct net_device *dev, u32 data)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +
 +      if (np->driver_data & DEV_HAS_CHECKSUM)
 +              return ethtool_op_set_sg(dev, data);
 +      else
 +              return -EOPNOTSUPP;
 +}
 +
 +static int nv_get_stats_count(struct net_device *dev)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +
 +      if (np->driver_data & DEV_HAS_STATISTICS)
 +              return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
 +      else
 +              return 0;
 +}
 +
 +static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +
 +      /* update stats */
 +      nv_do_stats_poll((unsigned long)dev);
 +
 +      memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
 +}
 +
 +static int nv_self_test_count(struct net_device *dev)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +
 +      if (np->driver_data & DEV_HAS_TEST_EXTENDED)
 +              return NV_TEST_COUNT_EXTENDED;
 +      else
 +              return NV_TEST_COUNT_BASE;
 +}
 +
 +static int nv_link_test(struct net_device *dev)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +      int mii_status;
 +
 +      mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
 +      mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
 +
 +      /* check phy link status */
 +      if (!(mii_status & BMSR_LSTATUS))
 +              return 0;
 +      else
 +              return 1;
 +}
 +
 +static int nv_register_test(struct net_device *dev)
 +{
 +      u8 __iomem *base = get_hwbase(dev);
 +      int i = 0;
 +      u32 orig_read, new_read;
 +
 +      do {
 +              orig_read = readl(base + nv_registers_test[i].reg);
 +
 +              /* xor with mask to toggle bits */
 +              orig_read ^= nv_registers_test[i].mask;
 +
 +              writel(orig_read, base + nv_registers_test[i].reg);
 +
 +              new_read = readl(base + nv_registers_test[i].reg);
 +
 +              if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
 +                      return 0;
 +
 +              /* restore original value */
 +              orig_read ^= nv_registers_test[i].mask;
 +              writel(orig_read, base + nv_registers_test[i].reg);
 +
 +      } while (nv_registers_test[++i].reg != 0);
 +
 +      return 1;
 +}
 +
 +static int nv_interrupt_test(struct net_device *dev)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        int ret = 1;
 -      int i;
 +      int testcnt;
 +      u32 save_msi_flags, save_poll_interval = 0;
  
 -      if (np->msi_flags & NV_MSI_X_CAPABLE) {
 -              for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
 -                      np->msi_x_entry[i].entry = i;
 +      if (netif_running(dev)) {
 +              /* free current irq */
 +              nv_free_irq(dev);
 +              save_poll_interval = readl(base+NvRegPollingInterval);
 +      }
 +
 +      /* flag to test interrupt handler */
 +      np->intr_test = 0;
 +
 +      /* setup test irq */
 +      save_msi_flags = np->msi_flags;
 +      np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
 +      np->msi_flags |= 0x001; /* setup 1 vector */
 +      if (nv_request_irq(dev, 1))
 +              return 0;
 +
 +      /* setup timer interrupt */
 +      writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
 +      writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
 +
 +      nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
 +
 +      /* wait for at least one interrupt */
 +      msleep(100);
 +
 +      spin_lock_irq(&np->lock);
 +
 +      /* flag should be set within ISR */
 +      testcnt = np->intr_test;
 +      if (!testcnt)
 +              ret = 2;
 +
 +      nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
 +      if (!(np->msi_flags & NV_MSI_X_ENABLED))
 +              writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
 +      else
 +              writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
 +
 +      spin_unlock_irq(&np->lock);
 +
 +      nv_free_irq(dev);
 +
 +      np->msi_flags = save_msi_flags;
 +
 +      if (netif_running(dev)) {
 +              writel(save_poll_interval, base + NvRegPollingInterval);
 +              writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
 +              /* restore original irq */
 +              if (nv_request_irq(dev, 0))
 +                      return 0;
 +      }
 +
 +      return ret;
 +}
 +
 +static int nv_loopback_test(struct net_device *dev)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +      struct sk_buff *tx_skb, *rx_skb;
 +      dma_addr_t test_dma_addr;
 +      u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
 +      u32 Flags;
 +      int len, i, pkt_len;
 +      u8 *pkt_data;
 +      u32 filter_flags = 0;
 +      u32 misc1_flags = 0;
 +      int ret = 1;
 +
 +      if (netif_running(dev)) {
 +              nv_disable_irq(dev);
 +              filter_flags = readl(base + NvRegPacketFilterFlags);
 +              misc1_flags = readl(base + NvRegMisc1);
 +      } else {
 +              nv_txrx_reset(dev);
 +      }
 +
 +      /* reinit driver view of the rx queue */
 +      set_bufsize(dev);
 +      nv_init_ring(dev);
 +
 +      /* setup hardware for loopback */
 +      writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
 +      writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
 +
 +      /* reinit nic view of the rx queue */
 +      writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 +      setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
 +      writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 +              base + NvRegRingSizes);
 +      pci_push(base);
 +
 +      /* restart rx engine */
 +      nv_start_rx(dev);
 +      nv_start_tx(dev);
 +
 +      /* setup packet for tx */
 +      pkt_len = ETH_DATA_LEN;
 +      tx_skb = dev_alloc_skb(pkt_len);
 +      pkt_data = skb_put(tx_skb, pkt_len);
 +      for (i = 0; i < pkt_len; i++)
 +              pkt_data[i] = (u8)(i & 0xff);
 +      test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
 +                                     tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
 +
 +      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 +              np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
 +              np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
 +      } else {
 +              np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
 +              np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
 +              np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
 +      }
 +      writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 +      pci_push(get_hwbase(dev));
 +
 +      msleep(500);
 +
 +      /* check for rx of the packet */
 +      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 +              Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
 +              len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
 +
 +      } else {
 +              Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
 +              len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
 +      }
 +
 +      if (Flags & NV_RX_AVAIL) {
 +              ret = 0;
 +      } else if (np->desc_ver == DESC_VER_1) {
 +              if (Flags & NV_RX_ERROR)
 +                      ret = 0;
 +      } else {
 +              if (Flags & NV_RX2_ERROR) {
 +                      ret = 0;
                }
 -              if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
 -                      np->msi_flags |= NV_MSI_X_ENABLED;
 -                      if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
 -                              /* Request irq for rx handling */
 -                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
 -                                      printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
 -                                      pci_disable_msix(np->pci_dev);
 -                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 -                                      goto out_err;
 -                              }
 -                              /* Request irq for tx handling */
 -                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
 -                                      printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
 -                                      pci_disable_msix(np->pci_dev);
 -                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 -                                      goto out_free_rx;
 -                              }
 -                              /* Request irq for link and timer handling */
 -                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
 -                                      printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
 -                                      pci_disable_msix(np->pci_dev);
 -                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 -                                      goto out_free_tx;
 -                              }
 -                              /* map interrupts to their respective vector */
 -                              writel(0, base + NvRegMSIXMap0);
 -                              writel(0, base + NvRegMSIXMap1);
 -                              set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
 -                              set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
 -                              set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
 -                      } else {
 -                              /* Request irq for all interrupts */
 -                              if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
 -                                      printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
 -                                      pci_disable_msix(np->pci_dev);
 -                                      np->msi_flags &= ~NV_MSI_X_ENABLED;
 -                                      goto out_err;
 -                              }
 +      }
  
 -                              /* map interrupts to vector 0 */
 -                              writel(0, base + NvRegMSIXMap0);
 -                              writel(0, base + NvRegMSIXMap1);
 +      if (ret) {
 +              if (len != pkt_len) {
 +                      ret = 0;
 +                      dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
 +                              dev->name, len, pkt_len);
 +              } else {
 +                      rx_skb = np->rx_skbuff[0];
 +                      for (i = 0; i < pkt_len; i++) {
 +                              if (rx_skb->data[i] != (u8)(i & 0xff)) {
 +                                      ret = 0;
 +                                      dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
 +                                              dev->name, i);
 +                                      break;
 +                              }
                        }
                }
 +      } else {
 +              dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
        }
 -      if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
 -              if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
 -                      np->msi_flags |= NV_MSI_ENABLED;
 -                      if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
 -                              printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
 -                              pci_disable_msi(np->pci_dev);
 -                              np->msi_flags &= ~NV_MSI_ENABLED;
 -                              goto out_err;
 +
 +      pci_unmap_page(np->pci_dev, test_dma_addr,
 +                     tx_skb->end-tx_skb->data,
 +                     PCI_DMA_TODEVICE);
 +      dev_kfree_skb_any(tx_skb);
 +
 +      /* stop engines */
 +      nv_stop_rx(dev);
 +      nv_stop_tx(dev);
 +      nv_txrx_reset(dev);
 +      /* drain rx queue */
 +      nv_drain_rx(dev);
 +      nv_drain_tx(dev);
 +
 +      if (netif_running(dev)) {
 +              writel(misc1_flags, base + NvRegMisc1);
 +              writel(filter_flags, base + NvRegPacketFilterFlags);
 +              nv_enable_irq(dev);
 +      }
 +
 +      return ret;
 +}
 +
 +static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
 +{
 +      struct fe_priv *np = netdev_priv(dev);
 +      u8 __iomem *base = get_hwbase(dev);
 +      int result;
 +      memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
 +
 +      if (!nv_link_test(dev)) {
 +              test->flags |= ETH_TEST_FL_FAILED;
 +              buffer[0] = 1;
 +      }
 +
 +      if (test->flags & ETH_TEST_FL_OFFLINE) {
 +              if (netif_running(dev)) {
 +                      netif_stop_queue(dev);
 +                      spin_lock_bh(&dev->xmit_lock);
 +                      spin_lock_irq(&np->lock);
 +                      nv_disable_hw_interrupts(dev, np->irqmask);
 +                      if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
 +                              writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
 +                      } else {
 +                              writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
                        }
 +                      /* stop engines */
 +                      nv_stop_rx(dev);
 +                      nv_stop_tx(dev);
 +                      nv_txrx_reset(dev);
 +                      /* drain rx queue */
 +                      nv_drain_rx(dev);
 +                      nv_drain_tx(dev);
 +                      spin_unlock_irq(&np->lock);
 +                      spin_unlock_bh(&dev->xmit_lock);
 +              }
  
 -                      /* map interrupts to vector 0 */
 -                      writel(0, base + NvRegMSIMap0);
 -                      writel(0, base + NvRegMSIMap1);
 -                      /* enable msi vector 0 */
 -                      writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
 +              if (!nv_register_test(dev)) {
 +                      test->flags |= ETH_TEST_FL_FAILED;
 +                      buffer[1] = 1;
 +              }
 +
 +              result = nv_interrupt_test(dev);
 +              if (result != 1) {
 +                      test->flags |= ETH_TEST_FL_FAILED;
 +                      buffer[2] = 1;
 +              }
 +              if (result == 0) {
 +                      /* bail out */
 +                      return;
 +              }
 +
 +              if (!nv_loopback_test(dev)) {
 +                      test->flags |= ETH_TEST_FL_FAILED;
 +                      buffer[3] = 1;
 +              }
 +
 +              if (netif_running(dev)) {
 +                      /* reinit driver view of the rx queue */
 +                      set_bufsize(dev);
 +                      if (nv_init_ring(dev)) {
 +                              if (!np->in_shutdown)
 +                                      mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 +                      }
 +                      /* reinit nic view of the rx queue */
 +                      writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 +                      setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
 +                      writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 +                              base + NvRegRingSizes);
 +                      pci_push(base);
 +                      writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 +                      pci_push(base);
 +                      /* restart rx engine */
 +                      nv_start_rx(dev);
 +                      nv_start_tx(dev);
 +                      netif_start_queue(dev);
 +                      nv_enable_hw_interrupts(dev, np->irqmask);
                }
        }
 -      if (ret != 0) {
 -              if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
 -                      goto out_err;
 -      }
 +}
  
 -      return 0;
 -out_free_tx:
 -      free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
 -out_free_rx:
 -      free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
 -out_err:
 -      return 1;
 +static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
 +{
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
 +              break;
 +      case ETH_SS_TEST:
 +              memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
 +              break;
 +      }
  }
  
 -static void nv_free_irq(struct net_device *dev)
 +static struct ethtool_ops ops = {
 +      .get_drvinfo = nv_get_drvinfo,
 +      .get_link = ethtool_op_get_link,
 +      .get_wol = nv_get_wol,
 +      .set_wol = nv_set_wol,
 +      .get_settings = nv_get_settings,
 +      .set_settings = nv_set_settings,
 +      .get_regs_len = nv_get_regs_len,
 +      .get_regs = nv_get_regs,
 +      .nway_reset = nv_nway_reset,
 +      .get_perm_addr = ethtool_op_get_perm_addr,
 +      .get_tso = ethtool_op_get_tso,
 +      .set_tso = nv_set_tso,
 +      .get_ringparam = nv_get_ringparam,
 +      .set_ringparam = nv_set_ringparam,
 +      .get_pauseparam = nv_get_pauseparam,
 +      .set_pauseparam = nv_set_pauseparam,
 +      .get_rx_csum = nv_get_rx_csum,
 +      .set_rx_csum = nv_set_rx_csum,
 +      .get_tx_csum = ethtool_op_get_tx_csum,
 +      .set_tx_csum = nv_set_tx_csum,
 +      .get_sg = ethtool_op_get_sg,
 +      .set_sg = nv_set_sg,
 +      .get_strings = nv_get_strings,
 +      .get_stats_count = nv_get_stats_count,
 +      .get_ethtool_stats = nv_get_ethtool_stats,
 +      .self_test_count = nv_self_test_count,
 +      .self_test = nv_self_test,
 +};
 +
 +static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  {
        struct fe_priv *np = get_nvpriv(dev);
 -      int i;
  
 -      if (np->msi_flags & NV_MSI_X_ENABLED) {
 -              for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
 -                      free_irq(np->msi_x_entry[i].vector, dev);
 -              }
 -              pci_disable_msix(np->pci_dev);
 -              np->msi_flags &= ~NV_MSI_X_ENABLED;
 +      spin_lock_irq(&np->lock);
 +
 +      /* save vlan group */
 +      np->vlangrp = grp;
 +
 +      if (grp) {
 +              /* enable vlan on MAC */
 +              np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
        } else {
 -              free_irq(np->pci_dev->irq, dev);
 -              if (np->msi_flags & NV_MSI_ENABLED) {
 -                      pci_disable_msi(np->pci_dev);
 -                      np->msi_flags &= ~NV_MSI_ENABLED;
 -              }
 +              /* disable vlan on MAC */
 +              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
 +              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
        }
 -}
 +
 +      writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 +
 +      spin_unlock_irq(&np->lock);
 +};
 +
 +static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 +{
 +      /* nothing to do */
 +};
  
  static int nv_open(struct net_device *dev)
  {
  
        writel(0, base + NvRegAdapterControl);
  
 +      if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
 +              writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
 +
        /* 2) initialize descriptor rings */
        set_bufsize(dev);
        oom = nv_init_ring(dev);
  
        /* 4) give hw rings */
        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
 -      writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
 +      writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                base + NvRegRingSizes);
  
        /* 5) continue setup */
                        base + NvRegAdapterControl);
        writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
        writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
 -      writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
 +      if (np->wolenabled)
 +              writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  
        i = readl(base + NvRegPowerState);
        if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
        pci_push(base);
  
 -      if (nv_request_irq(dev)) {
 +      if (nv_request_irq(dev, 0)) {
                goto out_drain;
        }
  
        }
        if (oom)
                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 +
 +      /* start statistics timer */
 +      if (np->driver_data & DEV_HAS_STATISTICS)
 +              mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
 +
        spin_unlock_irq(&np->lock);
  
        return 0;
@@@ -4026,7 -2960,6 +4026,7 @@@ static int nv_close(struct net_device *
  
        del_timer_sync(&np->oom_kick);
        del_timer_sync(&np->nic_poll);
 +      del_timer_sync(&np->stats_poll);
  
        netif_stop_queue(dev);
        spin_lock_irq(&np->lock);
@@@ -4086,9 -3019,6 +4086,9 @@@ static int __devinit nv_probe(struct pc
        init_timer(&np->nic_poll);
        np->nic_poll.data = (unsigned long) dev;
        np->nic_poll.function = &nv_do_nic_poll;        /* timer handler */
 +      init_timer(&np->stats_poll);
 +      np->stats_poll.data = (unsigned long) dev;
 +      np->stats_poll.function = &nv_do_stats_poll;    /* timer handler */
  
        err = pci_enable_device(pci_dev);
        if (err) {
        if (err < 0)
                goto out_disable;
  
 -      if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
 +      if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
                np->register_size = NV_PCI_REGSZ_VER2;
        else
                np->register_size = NV_PCI_REGSZ_VER1;
                /* packet format 3: supports 40-bit addressing */
                np->desc_ver = DESC_VER_3;
                np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
 -              if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
 -                      printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
 -                                      pci_name(pci_dev));
 -              } else {
 -                      dev->features |= NETIF_F_HIGHDMA;
 -                      printk(KERN_INFO "forcedeth: using HIGHDMA\n");
 -              }
 -              if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
 -                      printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
 -                             pci_name(pci_dev));
 +              if (dma_64bit) {
 +                      if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
 +                              printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
 +                                     pci_name(pci_dev));
 +                      } else {
 +                              dev->features |= NETIF_F_HIGHDMA;
 +                              printk(KERN_INFO "forcedeth: using HIGHDMA\n");
 +                      }
 +                      if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
 +                              printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
 +                                     pci_name(pci_dev));
 +                      }
                }
        } else if (id->driver_data & DEV_HAS_LARGEDESC) {
                /* packet format 2: supports jumbo frames */
        }
  
        np->msi_flags = 0;
 -      if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
 +      if ((id->driver_data & DEV_HAS_MSI) && msi) {
                np->msi_flags |= NV_MSI_CAPABLE;
        }
 -      if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
 +      if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
                np->msi_flags |= NV_MSI_X_CAPABLE;
        }
  
 +      np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
 +      if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
 +              np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
 +      }
 +
 +
        err = -ENOMEM;
        np->base = ioremap(addr, np->register_size);
        if (!np->base)
  
        dev->irq = pci_dev->irq;
  
 +      np->rx_ring_size = RX_RING_DEFAULT;
 +      np->tx_ring_size = TX_RING_DEFAULT;
 +      np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
 +      np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
 +
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                np->rx_ring.orig = pci_alloc_consistent(pci_dev,
 -                                      sizeof(struct ring_desc) * (RX_RING + TX_RING),
 +                                      sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
                                        &np->ring_addr);
                if (!np->rx_ring.orig)
                        goto out_unmap;
 -              np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
 +              np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
        } else {
                np->rx_ring.ex = pci_alloc_consistent(pci_dev,
 -                                      sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
 +                                      sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
                                        &np->ring_addr);
                if (!np->rx_ring.ex)
                        goto out_unmap;
 -              np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
 +              np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
        }
 +      np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
 +      np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
 +      np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
 +      np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
 +      np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
 +      if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
 +              goto out_freering;
 +      memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
 +      memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
 +      memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
 +      memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
 +      memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  
        dev->open = nv_open;
        dev->stop = nv_close;
        if (i == 33) {
                printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
                       pci_name(pci_dev));
 -              goto out_freering;
 +              goto out_error;
        }
 -      
 +
        /* reset it */
        phy_init(dev);
  
        err = register_netdev(dev);
        if (err) {
                printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
 -              goto out_freering;
 +              goto out_error;
        }
        printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
                        dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
  
        return 0;
  
 -out_freering:
 -      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 -              pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
 -                                  np->rx_ring.orig, np->ring_addr);
 -      else
 -              pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
 -                                  np->rx_ring.ex, np->ring_addr);
 +out_error:
        pci_set_drvdata(pci_dev, NULL);
 +out_freering:
 +      free_rings(dev);
  out_unmap:
        iounmap(get_hwbase(dev));
  out_relreg:
  static void __devexit nv_remove(struct pci_dev *pci_dev)
  {
        struct net_device *dev = pci_get_drvdata(pci_dev);
 -      struct fe_priv *np = netdev_priv(dev);
  
        unregister_netdev(dev);
  
        /* free all structures */
 -      if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 -              pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
 -      else
 -              pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
 +      free_rings(dev);
        iounmap(get_hwbase(dev));
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
@@@ -4461,43 -3374,11 +4461,43 @@@ static struct pci_device_id pci_tbl[] 
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
 -              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
 -              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP61 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP61 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP61 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP61 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP65 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP65 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP65 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 +      },
 +      {       /* MCP65 Ethernet Controller */
 +              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
 +              .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
        },
        {0,},
  };
@@@ -4527,12 -3408,10 +4527,12 @@@ module_param(optimization_mode, int, 0)
  MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
  module_param(poll_interval, int, 0);
  MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
 -module_param(disable_msi, int, 0);
 -MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
 -module_param(disable_msix, int, 0);
 -MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
 +module_param(msi, int, 0);
 +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
 +module_param(msix, int, 0);
 +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
 +module_param(dma_64bit, int, 0);
 +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  
  MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
index 64ecf929d2acfcbb81c4de21b0a5ceaddd74a593,56d86c7c03862348d52a5ead0dd7c554168d649b..8fea2aa455d4690a10763f7a4dc070c755a9c3d0
                        Copyright (C) 2001 Manfred Spraul
        * ethtool support (jgarzik)
        * Replace some MII-related magic numbers with constants (jgarzik)
 -  
 +
        TODO:
        * enable pci_power_off
        * Wake-On-LAN
  */
 -  
 +
  #define DRV_NAME      "winbond-840"
  #define DRV_VERSION   "1.01-d"
  #define DRV_RELDATE   "Nov-17-2001"
@@@ -57,7 -57,7 +57,7 @@@ c-help-name: Winbond W89c840 PCI Ethern
  c-help-symbol: CONFIG_WINBOND_840
  c-help: This driver is for the Winbond W89c840 chip.  It also works with
  c-help: the TX9882 chip on the Compex RL100-ATX board.
 -c-help: More specific information and updates are available from 
 +c-help: More specific information and updates are available from
  c-help: http://www.scyld.com/network/drivers.html
  */
  
@@@ -207,7 -207,7 +207,7 @@@ Test with 'ping -s 10000' on a fast com
  
  */
  
 -\f
 +
  
  /*
    PCI probe table.
@@@ -374,7 -374,7 +374,7 @@@ static int netdev_ioctl(struct net_devi
  static struct ethtool_ops netdev_ethtool_ops;
  static int  netdev_close(struct net_device *dev);
  
 -\f
 +
  
  static int __devinit w840_probe1 (struct pci_dev *pdev,
                                  const struct pci_device_id *ent)
        np->mii_if.mdio_read = mdio_read;
        np->mii_if.mdio_write = mdio_write;
        np->base_addr = ioaddr;
 -      
 +
        pci_set_drvdata(pdev, dev);
  
        if (dev->mem_start)
@@@ -510,7 -510,7 +510,7 @@@ err_out_netdev
        return -ENODEV;
  }
  
 -\f
 +
  /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
     often serial bit streams generated by the host processor.
     The example below is for the common 93c46 EEPROM, 64 16 bit words. */
@@@ -660,7 -660,7 +660,7 @@@ static void mdio_write(struct net_devic
        return;
  }
  
 -\f
 +
  static int netdev_open(struct net_device *dev)
  {
        struct netdev_private *np = netdev_priv(dev);
@@@ -731,7 -731,7 +731,7 @@@ static int update_link(struct net_devic
                                dev->name, np->phys[0]);
                netif_carrier_on(dev);
        }
 -      
 +
        if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
                /* If the link partner doesn't support autonegotiation
                 * the MII detects it's abilities with the "parallel detection".
                result |= 0x20000000;
        if (result != np->csr6 && debug)
                printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
 -                               dev->name, fasteth ? 100 : 10, 
 +                               dev->name, fasteth ? 100 : 10,
                                duplex ? "full" : "half", np->phys[0]);
        return result;
  }
@@@ -947,7 -947,7 +947,7 @@@ static void init_registers(struct net_d
        iowrite32(i, ioaddr + PCIBusCfg);
  
        np->csr6 = 0;
 -      /* 128 byte Tx threshold; 
 +      /* 128 byte Tx threshold;
                Transmit on; Receive on; */
        update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
  
@@@ -1584,7 -1584,7 +1584,7 @@@ static int netdev_close(struct net_devi
  static void __devexit w840_remove1 (struct pci_dev *pdev)
  {
        struct net_device *dev = pci_get_drvdata(pdev);
 -      
 +
        if (dev) {
                struct netdev_private *np = netdev_priv(dev);
                unregister_netdev(dev);
   * - get_stats:
   *    spin_lock_irq(np->lock), doesn't touch hw if not present
   * - hard_start_xmit:
-  *    netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
+  *    synchronize_irq + netif_tx_disable;
   * - tx_timeout:
-  *    netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+  *    netif_device_detach + netif_tx_disable;
   * - set_multicast_list
-  *    netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+  *    netif_device_detach + netif_tx_disable;
   * - interrupt handler
   *    doesn't touch hw if not present, synchronize_irq waits for
   *    running instances of the interrupt handler.
@@@ -1635,12 -1635,11 +1635,11 @@@ static int w840_suspend (struct pci_de
                netif_device_detach(dev);
                update_csr6(dev, 0);
                iowrite32(0, ioaddr + IntrEnable);
-               netif_stop_queue(dev);
                spin_unlock_irq(&np->lock);
  
-               spin_unlock_wait(&dev->xmit_lock);
                synchronize_irq(dev->irq);
+               netif_tx_disable(dev);
+       
                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
  
                /* no more hardware accesses behind this line. */
index b563decf599e69182975ccf78f5b180a94b026f3,a5fcfcde63d18c2ebea07136e7f4c074ecb543c6..8a31b591a901198f36b67eab7b4ab1612aa76992
@@@ -201,12 -201,41 +201,12 @@@ static struct 
  /* Data types                                                       */
  /********************************************************************/
  
 -/* Used in Event handling.
 - * We avoid nested structures as they break on ARM -- Moustafa */
 -struct hermes_tx_descriptor_802_11 {
 -      /* hermes_tx_descriptor */
 -      __le16 status;
 -      __le16 reserved1;
 -      __le16 reserved2;
 -      __le32 sw_support;
 -      u8 retry_count;
 -      u8 tx_rate;
 -      __le16 tx_control;
 -
 -      /* ieee80211_hdr */
 +/* Beginning of the Tx descriptor, used in TxExc handling */
 +struct hermes_txexc_data {
 +      struct hermes_tx_descriptor desc;
        __le16 frame_ctl;
        __le16 duration_id;
        u8 addr1[ETH_ALEN];
 -      u8 addr2[ETH_ALEN];
 -      u8 addr3[ETH_ALEN];
 -      __le16 seq_ctl;
 -      u8 addr4[ETH_ALEN];
 -
 -      __le16 data_len;
 -
 -      /* ethhdr */
 -      u8 h_dest[ETH_ALEN];    /* destination eth addr */
 -      u8 h_source[ETH_ALEN];  /* source ether addr    */
 -      __be16 h_proto;         /* packet type ID field */
 -
 -      /* p8022_hdr */
 -      u8 dsap;
 -      u8 ssap;
 -      u8 ctrl;
 -      u8 oui[3];
 -
 -      __be16 ethertype;
  } __attribute__ ((packed));
  
  /* Rx frame header except compatibility 802.3 header */
@@@ -421,39 -450,53 +421,39 @@@ static int orinoco_xmit(struct sk_buff 
        hermes_t *hw = &priv->hw;
        int err = 0;
        u16 txfid = priv->txfid;
 -      char *p;
        struct ethhdr *eh;
 -      int len, data_len, data_off;
 +      int data_off;
        struct hermes_tx_descriptor desc;
        unsigned long flags;
  
 -      TRACE_ENTER(dev->name);
 -
        if (! netif_running(dev)) {
                printk(KERN_ERR "%s: Tx on stopped device!\n",
                       dev->name);
 -              TRACE_EXIT(dev->name);
 -              return 1;
 +              return NETDEV_TX_BUSY;
        }
        
        if (netif_queue_stopped(dev)) {
                printk(KERN_DEBUG "%s: Tx while transmitter busy!\n", 
                       dev->name);
 -              TRACE_EXIT(dev->name);
 -              return 1;
 +              return NETDEV_TX_BUSY;
        }
        
        if (orinoco_lock(priv, &flags) != 0) {
                printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
                       dev->name);
 -              TRACE_EXIT(dev->name);
 -              return 1;
 +              return NETDEV_TX_BUSY;
        }
  
        if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) {
                /* Oops, the firmware hasn't established a connection,
                     silently drop the packet (this seems to be the
                     safest approach). */
 -              stats->tx_errors++;
 -              orinoco_unlock(priv, &flags);
 -              dev_kfree_skb(skb);
 -              TRACE_EXIT(dev->name);
 -              return 0;
 +              goto drop;
        }
  
 -      /* Length of the packet body */
 -      /* FIXME: what if the skb is smaller than this? */
 -      len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
 -      skb = skb_padto(skb, len);
 -      if (skb == NULL)
 -              goto fail;
 -      len -= ETH_HLEN;
 +      /* Check packet length */
 +      if (skb->len < ETH_HLEN)
 +              goto drop;
  
        eh = (struct ethhdr *)skb->data;
  
                if (net_ratelimit())
                        printk(KERN_ERR "%s: Error %d writing Tx descriptor "
                               "to BAP\n", dev->name, err);
 -              stats->tx_errors++;
 -              goto fail;
 +              goto busy;
        }
  
        /* Clear the 802.11 header and data length fields - some
  
        /* Encapsulate Ethernet-II frames */
        if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
 -              struct header_struct hdr;
 -              data_len = len;
 -              data_off = HERMES_802_3_OFFSET + sizeof(hdr);
 -              p = skb->data + ETH_HLEN;
 -
 -              /* 802.3 header */
 -              memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
 -              memcpy(hdr.src, eh->h_source, ETH_ALEN);
 -              hdr.len = htons(data_len + ENCAPS_OVERHEAD);
 -              
 -              /* 802.2 header */
 -              memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
 -                      
 -              hdr.ethertype = eh->h_proto;
 -              err  = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
 -                                       txfid, HERMES_802_3_OFFSET);
 +              struct header_struct {
 +                      struct ethhdr eth;      /* 802.3 header */
 +                      u8 encap[6];            /* 802.2 header */
 +              } __attribute__ ((packed)) hdr;
 +
 +              /* Strip destination and source from the data */
 +              skb_pull(skb, 2 * ETH_ALEN);
 +              data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
 +
 +              /* And move them to a separate header */
 +              memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
 +              hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
 +              memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
 +
 +              err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
 +                                      txfid, HERMES_802_3_OFFSET);
                if (err) {
                        if (net_ratelimit())
                                printk(KERN_ERR "%s: Error %d writing packet "
                                       "header to BAP\n", dev->name, err);
 -                      stats->tx_errors++;
 -                      goto fail;
 +                      goto busy;
                }
 -              /* Actual xfer length - allow for padding */
 -              len = ALIGN(data_len, 2);
 -              if (len < ETH_ZLEN - ETH_HLEN)
 -                      len = ETH_ZLEN - ETH_HLEN;
        } else { /* IEEE 802.3 frame */
 -              data_len = len + ETH_HLEN;
                data_off = HERMES_802_3_OFFSET;
 -              p = skb->data;
 -              /* Actual xfer length - round up for odd length packets */
 -              len = ALIGN(data_len, 2);
 -              if (len < ETH_ZLEN)
 -                      len = ETH_ZLEN;
        }
  
 -      err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
 +      err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
                                txfid, data_off);
        if (err) {
                printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
                       dev->name, err);
 -              stats->tx_errors++;
 -              goto fail;
 +              goto busy;
        }
  
        /* Finally, we actually initiate the send */
                if (net_ratelimit())
                        printk(KERN_ERR "%s: Error %d transmitting packet\n",
                                dev->name, err);
 -              stats->tx_errors++;
 -              goto fail;
 +              goto busy;
        }
  
        dev->trans_start = jiffies;
 -      stats->tx_bytes += data_off + data_len;
 +      stats->tx_bytes += data_off + skb->len;
 +      goto ok;
  
 -      orinoco_unlock(priv, &flags);
 + drop:
 +      stats->tx_errors++;
 +      stats->tx_dropped++;
  
 + ok:
 +      orinoco_unlock(priv, &flags);
        dev_kfree_skb(skb);
 +      return NETDEV_TX_OK;
  
 -      TRACE_EXIT(dev->name);
 -
 -      return 0;
 - fail:
 -      TRACE_EXIT(dev->name);
 -
 + busy:
 +      if (err == -EIO)
 +              schedule_work(&priv->reset_work);
        orinoco_unlock(priv, &flags);
 -      return err;
 +      return NETDEV_TX_BUSY;
  }
  
  static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
@@@ -575,7 -629,7 +575,7 @@@ static void __orinoco_ev_txexc(struct n
        struct net_device_stats *stats = &priv->stats;
        u16 fid = hermes_read_regn(hw, TXCOMPLFID);
        u16 status;
 -      struct hermes_tx_descriptor_802_11 hdr;
 +      struct hermes_txexc_data hdr;
        int err = 0;
  
        if (fid == DUMMY_FID)
  
        /* Read part of the frame header - we need status and addr1 */
        err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
 -                             offsetof(struct hermes_tx_descriptor_802_11,
 -                                      addr2),
 +                             sizeof(struct hermes_txexc_data),
                               fid, 0);
  
        hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
         * exceeded, because that's the only status that really mean
         * that this particular node went away.
         * Other errors means that *we* screwed up. - Jean II */
 -      status = le16_to_cpu(hdr.status);
 +      status = le16_to_cpu(hdr.desc.status);
        if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
                union iwreq_data        wrqu;
  
@@@ -1343,12 -1398,16 +1343,12 @@@ int __orinoco_down(struct net_device *d
        return 0;
  }
  
 -int orinoco_reinit_firmware(struct net_device *dev)
 +static int orinoco_allocate_fid(struct net_device *dev)
  {
        struct orinoco_private *priv = netdev_priv(dev);
        struct hermes *hw = &priv->hw;
        int err;
  
 -      err = hermes_init(hw);
 -      if (err)
 -              return err;
 -
        err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
        if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
                /* Try workaround for old Symbol firmware bug */
        return err;
  }
  
 +int orinoco_reinit_firmware(struct net_device *dev)
 +{
 +      struct orinoco_private *priv = netdev_priv(dev);
 +      struct hermes *hw = &priv->hw;
 +      int err;
 +
 +      err = hermes_init(hw);
 +      if (!err)
 +              err = orinoco_allocate_fid(dev);
 +
 +      return err;
 +}
 +
  static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
  {
        hermes_t *hw = &priv->hw;
@@@ -1787,7 -1833,9 +1787,9 @@@ static int __orinoco_program_rids(struc
        /* Set promiscuity / multicast*/
        priv->promiscuous = 0;
        priv->mc_count = 0;
-       __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
+       /* FIXME: what about netif_tx_lock */
+       __orinoco_set_multicast_list(dev);
  
        return 0;
  }
@@@ -2226,12 -2274,14 +2228,12 @@@ static int orinoco_init(struct net_devi
        u16 reclen;
        int len;
  
 -      TRACE_ENTER(dev->name);
 -
        /* No need to lock, the hw_unavailable flag is already set in
         * alloc_orinocodev() */
        priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
  
        /* Initialize the firmware */
 -      err = orinoco_reinit_firmware(dev);
 +      err = hermes_init(hw);
        if (err != 0) {
                printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
                       dev->name, err);
  
        printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
  
 +      err = orinoco_allocate_fid(dev);
 +      if (err) {
 +              printk(KERN_ERR "%s: failed to allocate NIC buffer!\n",
 +                     dev->name);
 +              goto out;
 +      }
 +
        /* Get allowed channels */
        err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
                                  &priv->channel_mask);
        printk(KERN_DEBUG "%s: ready\n", dev->name);
  
   out:
 -      TRACE_EXIT(dev->name);
        return err;
  }
  
@@@ -2753,6 -2797,8 +2755,6 @@@ static int orinoco_ioctl_getiwrange(str
        int numrates;
        int i, k;
  
 -      TRACE_ENTER(dev->name);
 -
        rrq->length = sizeof(struct iw_range);
        memset(range, 0, sizeof(struct iw_range));
  
        IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
        IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
  
 -      TRACE_EXIT(dev->name);
 -
        return 0;
  }
  
@@@ -3023,6 -3071,8 +3025,6 @@@ static int orinoco_ioctl_getessid(struc
        int err = 0;
        unsigned long flags;
  
 -      TRACE_ENTER(dev->name);
 -
        if (netif_running(dev)) {
                err = orinoco_hw_get_essid(priv, &active, essidbuf);
                if (err)
        erq->flags = 1;
        erq->length = strlen(essidbuf) + 1;
  
 -      TRACE_EXIT(dev->name);
 -      
        return 0;
  }
  
@@@ -4296,6 -4348,69 +4298,6 @@@ static struct ethtool_ops orinoco_ethto
        .get_link = ethtool_op_get_link,
  };
  
 -/********************************************************************/
 -/* Debugging                                                        */
 -/********************************************************************/
 -
 -#if 0
 -static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
 -{
 -      printk(KERN_DEBUG "RX descriptor:\n");
 -      printk(KERN_DEBUG "  status      = 0x%04x\n", frame->desc.status);
 -      printk(KERN_DEBUG "  time        = 0x%08x\n", frame->desc.time);
 -      printk(KERN_DEBUG "  silence     = 0x%02x\n", frame->desc.silence);
 -      printk(KERN_DEBUG "  signal      = 0x%02x\n", frame->desc.signal);
 -      printk(KERN_DEBUG "  rate        = 0x%02x\n", frame->desc.rate);
 -      printk(KERN_DEBUG "  rxflow      = 0x%02x\n", frame->desc.rxflow);
 -      printk(KERN_DEBUG "  reserved    = 0x%08x\n", frame->desc.reserved);
 -
 -      printk(KERN_DEBUG "IEEE 802.11 header:\n");
 -      printk(KERN_DEBUG "  frame_ctl   = 0x%04x\n",
 -             frame->p80211.frame_ctl);
 -      printk(KERN_DEBUG "  duration_id = 0x%04x\n",
 -             frame->p80211.duration_id);
 -      printk(KERN_DEBUG "  addr1       = %02x:%02x:%02x:%02x:%02x:%02x\n",
 -             frame->p80211.addr1[0], frame->p80211.addr1[1],
 -             frame->p80211.addr1[2], frame->p80211.addr1[3],
 -             frame->p80211.addr1[4], frame->p80211.addr1[5]);
 -      printk(KERN_DEBUG "  addr2       = %02x:%02x:%02x:%02x:%02x:%02x\n",
 -             frame->p80211.addr2[0], frame->p80211.addr2[1],
 -             frame->p80211.addr2[2], frame->p80211.addr2[3],
 -             frame->p80211.addr2[4], frame->p80211.addr2[5]);
 -      printk(KERN_DEBUG "  addr3       = %02x:%02x:%02x:%02x:%02x:%02x\n",
 -             frame->p80211.addr3[0], frame->p80211.addr3[1],
 -             frame->p80211.addr3[2], frame->p80211.addr3[3],
 -             frame->p80211.addr3[4], frame->p80211.addr3[5]);
 -      printk(KERN_DEBUG "  seq_ctl     = 0x%04x\n",
 -             frame->p80211.seq_ctl);
 -      printk(KERN_DEBUG "  addr4       = %02x:%02x:%02x:%02x:%02x:%02x\n",
 -             frame->p80211.addr4[0], frame->p80211.addr4[1],
 -             frame->p80211.addr4[2], frame->p80211.addr4[3],
 -             frame->p80211.addr4[4], frame->p80211.addr4[5]);
 -      printk(KERN_DEBUG "  data_len    = 0x%04x\n",
 -             frame->p80211.data_len);
 -
 -      printk(KERN_DEBUG "IEEE 802.3 header:\n");
 -      printk(KERN_DEBUG "  dest        = %02x:%02x:%02x:%02x:%02x:%02x\n",
 -             frame->p8023.h_dest[0], frame->p8023.h_dest[1],
 -             frame->p8023.h_dest[2], frame->p8023.h_dest[3],
 -             frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
 -      printk(KERN_DEBUG "  src         = %02x:%02x:%02x:%02x:%02x:%02x\n",
 -             frame->p8023.h_source[0], frame->p8023.h_source[1],
 -             frame->p8023.h_source[2], frame->p8023.h_source[3],
 -             frame->p8023.h_source[4], frame->p8023.h_source[5]);
 -      printk(KERN_DEBUG "  len         = 0x%04x\n", frame->p8023.h_proto);
 -
 -      printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
 -      printk(KERN_DEBUG "  DSAP        = 0x%02x\n", frame->p8022.dsap);
 -      printk(KERN_DEBUG "  SSAP        = 0x%02x\n", frame->p8022.ssap);
 -      printk(KERN_DEBUG "  ctrl        = 0x%02x\n", frame->p8022.ctrl);
 -      printk(KERN_DEBUG "  OUI         = %02x:%02x:%02x\n",
 -             frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
 -      printk(KERN_DEBUG "  ethertype  = 0x%04x\n", frame->ethertype);
 -}
 -#endif /* 0 */
 -
  /********************************************************************/
  /* Module initialization                                            */
  /********************************************************************/
diff --combined include/linux/pci_ids.h
index c3fe769c9129fde52e77da6f90dc495b3dd02e44,cf45e8bb69a8a736315421ba00f29f4f4e687c91..bcfe9d4f56aea1ba730f329aaf6f7af084c05f50
  #define PCI_DEVICE_ID_PLX_DJINN_ITOO  0x1151
  #define PCI_DEVICE_ID_PLX_R753                0x1152
  #define PCI_DEVICE_ID_PLX_OLITEC      0x1187
 +#define PCI_DEVICE_ID_PLX_PCI200SYN   0x3196
  #define PCI_DEVICE_ID_PLX_9050                0x9050
  #define PCI_DEVICE_ID_PLX_9080                0x9080
  #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2        0xa001
  #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100         0x034E
  #define PCI_DEVICE_ID_NVIDIA_NVENET_14              0x0372
  #define PCI_DEVICE_ID_NVIDIA_NVENET_15              0x0373
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_16              0x03E5
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_17              0x03E6
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_18              0x03EE
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_19              0x03EF
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_20              0x0450
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_21              0x0451
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_22              0x0452
 +#define PCI_DEVICE_ID_NVIDIA_NVENET_23              0x0453
  
  #define PCI_VENDOR_ID_IMS             0x10e0
  #define PCI_DEVICE_ID_IMS_TT128               0x9128
  
  #define PCI_VENDOR_ID_SAMSUNG         0x144d
  
 +#define PCI_VENDOR_ID_MYRICOM         0x14c1
  
  #define PCI_VENDOR_ID_TITAN           0x14D2
  #define PCI_DEVICE_ID_TITAN_010L      0x8001
  #define PCI_DEVICE_ID_TIGON3_5751F    0x167e
  #define PCI_DEVICE_ID_TIGON3_5787M    0x1693
  #define PCI_DEVICE_ID_TIGON3_5782     0x1696
+ #define PCI_DEVICE_ID_TIGON3_5786     0x169a
  #define PCI_DEVICE_ID_TIGON3_5787     0x169b
  #define PCI_DEVICE_ID_TIGON3_5788     0x169c
  #define PCI_DEVICE_ID_TIGON3_5789     0x169d
  #define PCI_DEVICE_ID_INTEL_80960_RP  0x1960
  #define PCI_DEVICE_ID_INTEL_82840_HB  0x1a21
  #define PCI_DEVICE_ID_INTEL_82845_HB  0x1a30
+ #define PCI_DEVICE_ID_INTEL_IOAT      0x1a38
  #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
  #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
  #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413