net: wireless: sd8797: Marvell sd8797 Wi-Fi driver
[linux-2.6.git] / drivers / net / 8139cp.c
index f5223ec..cc4c210 100644 (file)
@@ -46,6 +46,8 @@
 
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define DRV_NAME               "8139cp"
 #define DRV_VERSION            "1.3"
 #define DRV_RELDATE            "Mar 22, 2004"
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
+#include <linux/gfp.h>
 #include <linux/mii.h>
 #include <linux/if_vlan.h>
 #include <linux/crc32.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 
-/* VLAN tagging feature enable/disable */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define CP_VLAN_TAG_USED 1
-#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
-       do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
-#else
-#define CP_VLAN_TAG_USED 0
-#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
-       do { (tx_desc)->opts2 = 0; } while (0)
-#endif
-
 /* These identify the driver base version and may not be removed. */
 static char version[] =
-KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
 
 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
@@ -104,8 +97,6 @@ static int multicast_filter_limit = 32;
 module_param(multicast_filter_limit, int, 0);
 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
 
-#define PFX                    DRV_NAME ": "
-
 #define CP_DEF_MSG_ENABLE      (NETIF_MSG_DRV          | \
                                 NETIF_MSG_PROBE        | \
                                 NETIF_MSG_LINK)
@@ -127,7 +118,6 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered mu
          (CP)->tx_tail - (CP)->tx_head - 1)
 
 #define PKT_BUF_SZ             1536    /* Size of each temporary Rx buffer.*/
-#define RX_OFFSET              2
 #define CP_INTERNAL_PHY                32
 
 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
@@ -303,26 +293,26 @@ static const unsigned int cp_rx_config =
          (RX_DMA_BURST << RxCfgDMAShift);
 
 struct cp_desc {
-       u32             opts1;
-       u32             opts2;
-       u64             addr;
+       __le32          opts1;
+       __le32          opts2;
+       __le64          addr;
 };
 
 struct cp_dma_stats {
-       u64                     tx_ok;
-       u64                     rx_ok;
-       u64                     tx_err;
-       u32                     rx_err;
-       u16                     rx_fifo;
-       u16                     frame_align;
-       u32                     tx_ok_1col;
-       u32                     tx_ok_mcol;
-       u64                     rx_ok_phys;
-       u64                     rx_ok_bcast;
-       u32                     rx_ok_mcast;
-       u16                     tx_abort;
-       u16                     tx_underrun;
-} __attribute__((packed));
+       __le64                  tx_ok;
+       __le64                  rx_ok;
+       __le64                  tx_err;
+       __le32                  rx_err;
+       __le16                  rx_fifo;
+       __le16                  frame_align;
+       __le32                  tx_ok_1col;
+       __le32                  tx_ok_mcol;
+       __le64                  rx_ok_phys;
+       __le64                  rx_ok_bcast;
+       __le32                  rx_ok_mcast;
+       __le16                  tx_abort;
+       __le16                  tx_underrun;
+} __packed;
 
 struct cp_extra_stats {
        unsigned long           rx_frags;
@@ -334,11 +324,12 @@ struct cp_private {
        spinlock_t              lock;
        u32                     msg_enable;
 
+       struct napi_struct      napi;
+
        struct pci_dev          *pdev;
        u32                     rx_config;
        u16                     cpcmd;
 
-       struct net_device_stats net_stats;
        struct cp_extra_stats   cp_stats;
 
        unsigned                rx_head         ____cacheline_aligned;
@@ -354,9 +345,6 @@ struct cp_private {
        unsigned                rx_buf_sz;
        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 
-#if CP_VLAN_TAG_USED
-       struct vlan_group       *vlgrp;
-#endif
        dma_addr_t              ring_dma;
 
        struct mii_if_info      mii_if;
@@ -394,7 +382,7 @@ static int cp_get_eeprom(struct net_device *dev,
 static int cp_set_eeprom(struct net_device *dev,
                         struct ethtool_eeprom *eeprom, u8 *data);
 
-static struct pci_device_id cp_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
        { },
@@ -421,24 +409,6 @@ static struct {
 };
 
 
-#if CP_VLAN_TAG_USED
-static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-       struct cp_private *cp = netdev_priv(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       cp->vlgrp = grp;
-       if (grp)
-               cp->cpcmd |= RxVlanOn;
-       else
-               cp->cpcmd &= ~RxVlanOn;
-
-       cpw16(CpCmd, cp->cpcmd);
-       spin_unlock_irqrestore(&cp->lock, flags);
-}
-#endif /* CP_VLAN_TAG_USED */
-
 static inline void cp_set_rxbufsize (struct cp_private *cp)
 {
        unsigned int mtu = cp->dev->mtu;
@@ -453,60 +423,54 @@ static inline void cp_set_rxbufsize (struct cp_private *cp)
 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
                              struct cp_desc *desc)
 {
+       u32 opts2 = le32_to_cpu(desc->opts2);
+
        skb->protocol = eth_type_trans (skb, cp->dev);
 
-       cp->net_stats.rx_packets++;
-       cp->net_stats.rx_bytes += skb->len;
-       cp->dev->last_rx = jiffies;
+       cp->dev->stats.rx_packets++;
+       cp->dev->stats.rx_bytes += skb->len;
 
-#if CP_VLAN_TAG_USED
-       if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
-               vlan_hwaccel_receive_skb(skb, cp->vlgrp,
-                                        be16_to_cpu(desc->opts2 & 0xffff));
-       } else
-#endif
-               netif_receive_skb(skb);
+       if (opts2 & RxVlanTagged)
+               __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+
+       napi_gro_receive(&cp->napi, skb);
 }
 
 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
                            u32 status, u32 len)
 {
-       if (netif_msg_rx_err (cp))
-               printk (KERN_DEBUG
-                       "%s: rx err, slot %d status 0x%x len %d\n",
-                       cp->dev->name, rx_tail, status, len);
-       cp->net_stats.rx_errors++;
+       netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
+                 rx_tail, status, len);
+       cp->dev->stats.rx_errors++;
        if (status & RxErrFrame)
-               cp->net_stats.rx_frame_errors++;
+               cp->dev->stats.rx_frame_errors++;
        if (status & RxErrCRC)
-               cp->net_stats.rx_crc_errors++;
+               cp->dev->stats.rx_crc_errors++;
        if ((status & RxErrRunt) || (status & RxErrLong))
-               cp->net_stats.rx_length_errors++;
+               cp->dev->stats.rx_length_errors++;
        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
-               cp->net_stats.rx_length_errors++;
+               cp->dev->stats.rx_length_errors++;
        if (status & RxErrFIFO)
-               cp->net_stats.rx_fifo_errors++;
+               cp->dev->stats.rx_fifo_errors++;
 }
 
 static inline unsigned int cp_rx_csum_ok (u32 status)
 {
        unsigned int protocol = (status >> 16) & 0x3;
 
-       if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
-               return 1;
-       else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
-               return 1;
-       else if ((protocol == RxProtoIP) && (!(status & IPFail)))
+       if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+           ((protocol == RxProtoUDP) && !(status & UDPFail)))
                return 1;
-       return 0;
+       else
+               return 0;
 }
 
-static int cp_rx_poll (struct net_device *dev, int *budget)
+static int cp_rx_poll(struct napi_struct *napi, int budget)
 {
-       struct cp_private *cp = netdev_priv(dev);
-       unsigned rx_tail = cp->rx_tail;
-       unsigned rx_work = dev->quota;
-       unsigned rx;
+       struct cp_private *cp = container_of(napi, struct cp_private, napi);
+       struct net_device *dev = cp->dev;
+       unsigned int rx_tail = cp->rx_tail;
+       int rx;
 
 rx_status_loop:
        rx = 0;
@@ -517,7 +481,7 @@ rx_status_loop:
                dma_addr_t mapping;
                struct sk_buff *skb, *new_skb;
                struct cp_desc *desc;
-               unsigned buflen;
+               const unsigned buflen = cp->rx_buf_sz;
 
                skb = cp->rx_skb[rx_tail];
                BUG_ON(!skb);
@@ -537,7 +501,7 @@ rx_status_loop:
                         * that RX fragments are never encountered
                         */
                        cp_rx_err_acct(cp, rx_tail, status, len);
-                       cp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        cp->cp_stats.rx_frags++;
                        goto rx_next;
                }
@@ -547,31 +511,27 @@ rx_status_loop:
                        goto rx_next;
                }
 
-               if (netif_msg_rx_status(cp))
-                       printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
-                              dev->name, rx_tail, status, len);
+               netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
+                         rx_tail, status, len);
 
-               buflen = cp->rx_buf_sz + RX_OFFSET;
-               new_skb = dev_alloc_skb (buflen);
+               new_skb = netdev_alloc_skb_ip_align(dev, buflen);
                if (!new_skb) {
-                       cp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        goto rx_next;
                }
 
-               skb_reserve(new_skb, RX_OFFSET);
-
-               pci_unmap_single(cp->pdev, mapping,
+               dma_unmap_single(&cp->pdev->dev, mapping,
                                 buflen, PCI_DMA_FROMDEVICE);
 
                /* Handle checksum offloading for incoming packets. */
                if (cp_rx_csum_ok(status))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
-                       skb->ip_summed = CHECKSUM_NONE;
+                       skb_checksum_none_assert(skb);
 
                skb_put(skb, len);
 
-               mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
+               mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
                                         PCI_DMA_FROMDEVICE);
                cp->rx_skb[rx_tail] = new_skb;
 
@@ -588,33 +548,28 @@ rx_next:
                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
                rx_tail = NEXT_RX(rx_tail);
 
-               if (!rx_work--)
+               if (rx >= budget)
                        break;
        }
 
        cp->rx_tail = rx_tail;
 
-       dev->quota -= rx;
-       *budget -= rx;
-
        /* if we did not reach work limit, then we're done with
         * this round of polling
         */
-       if (rx_work) {
+       if (rx < budget) {
                unsigned long flags;
 
                if (cpr16(IntrStatus) & cp_rx_intr_mask)
                        goto rx_status_loop;
 
-               local_irq_save(flags);
+               spin_lock_irqsave(&cp->lock, flags);
+               __napi_complete(napi);
                cpw16_f(IntrMask, cp_intr_mask);
-               __netif_rx_complete(dev);
-               local_irq_restore(flags);
-
-               return 0;       /* done */
+               spin_unlock_irqrestore(&cp->lock, flags);
        }
 
-       return 1;               /* not done */
+       return rx;
 }
 
 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
@@ -631,9 +586,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
        if (!status || (status == 0xFFFF))
                return IRQ_NONE;
 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
-                       dev->name, status, cpr8(Cmd), cpr16(CpCmd));
+       netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
+                 status, cpr8(Cmd), cpr16(CpCmd));
 
        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 
@@ -647,9 +601,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
        }
 
        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
-               if (netif_rx_schedule_prep(dev)) {
+               if (napi_schedule_prep(&cp->napi)) {
                        cpw16_f(IntrMask, cp_norx_intr_mask);
-                       __netif_rx_schedule(dev);
+                       __napi_schedule(&cp->napi);
                }
 
        if (status & (TxOK | TxErr | TxEmpty | SWInt))
@@ -664,8 +618,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 
                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
-               printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
-                      dev->name, status, pci_status);
+               netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
+                          status, pci_status);
 
                /* TODO: reset hardware */
        }
@@ -704,31 +658,30 @@ static void cp_tx (struct cp_private *cp)
                skb = cp->tx_skb[tx_tail];
                BUG_ON(!skb);
 
-               pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
+               dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
                                 le32_to_cpu(txd->opts1) & 0xffff,
                                 PCI_DMA_TODEVICE);
 
                if (status & LastFrag) {
                        if (status & (TxError | TxFIFOUnder)) {
-                               if (netif_msg_tx_err(cp))
-                                       printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
-                                              cp->dev->name, status);
-                               cp->net_stats.tx_errors++;
+                               netif_dbg(cp, tx_err, cp->dev,
+                                         "tx err, status 0x%x\n", status);
+                               cp->dev->stats.tx_errors++;
                                if (status & TxOWC)
-                                       cp->net_stats.tx_window_errors++;
+                                       cp->dev->stats.tx_window_errors++;
                                if (status & TxMaxCol)
-                                       cp->net_stats.tx_aborted_errors++;
+                                       cp->dev->stats.tx_aborted_errors++;
                                if (status & TxLinkFail)
-                                       cp->net_stats.tx_carrier_errors++;
+                                       cp->dev->stats.tx_carrier_errors++;
                                if (status & TxFIFOUnder)
-                                       cp->net_stats.tx_fifo_errors++;
+                                       cp->dev->stats.tx_fifo_errors++;
                        } else {
-                               cp->net_stats.collisions +=
+                               cp->dev->stats.collisions +=
                                        ((status >> TxColCntShift) & TxColCntMask);
-                               cp->net_stats.tx_packets++;
-                               cp->net_stats.tx_bytes += skb->len;
-                               if (netif_msg_tx_done(cp))
-                                       printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
+                               cp->dev->stats.tx_packets++;
+                               cp->dev->stats.tx_bytes += skb->len;
+                               netif_dbg(cp, tx_done, cp->dev,
+                                         "tx done, slot %d\n", tx_tail);
                        }
                        dev_kfree_skb_irq(skb);
                }
@@ -744,15 +697,20 @@ static void cp_tx (struct cp_private *cp)
                netif_wake_queue(cp->dev);
 }
 
-static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
+static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
+{
+       return vlan_tx_tag_present(skb) ?
+               TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+}
+
+static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+                                       struct net_device *dev)
 {
        struct cp_private *cp = netdev_priv(dev);
        unsigned entry;
        u32 eor, flags;
        unsigned long intr_flags;
-#if CP_VLAN_TAG_USED
-       u32 vlan_tag = 0;
-#endif
+       __le32 opts2;
        int mss = 0;
 
        spin_lock_irqsave(&cp->lock, intr_flags);
@@ -761,20 +719,15 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
                netif_stop_queue(dev);
                spin_unlock_irqrestore(&cp->lock, intr_flags);
-               printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
-                      dev->name);
-               return 1;
+               netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+               return NETDEV_TX_BUSY;
        }
 
-#if CP_VLAN_TAG_USED
-       if (cp->vlgrp && vlan_tx_tag_present(skb))
-               vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
-#endif
-
        entry = cp->tx_head;
        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
-       if (dev->features & NETIF_F_TSO)
-               mss = skb_shinfo(skb)->gso_size;
+       mss = skb_shinfo(skb)->gso_size;
+
+       opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 
        if (skb_shinfo(skb)->nr_frags == 0) {
                struct cp_desc *txd = &cp->tx_ring[entry];
@@ -782,8 +735,8 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                dma_addr_t mapping;
 
                len = skb->len;
-               mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
-               CP_VLAN_TX_TAG(txd, vlan_tag);
+               mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+               txd->opts2 = opts2;
                txd->addr = cpu_to_le64(mapping);
                wmb();
 
@@ -818,7 +771,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                 */
                first_eor = eor;
                first_len = skb_headlen(skb);
-               first_mapping = pci_map_single(cp->pdev, skb->data,
+               first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
                                               first_len, PCI_DMA_TODEVICE);
                cp->tx_skb[entry] = skb;
                entry = NEXT_TX(entry);
@@ -830,7 +783,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                        dma_addr_t mapping;
 
                        len = this_frag->size;
-                       mapping = pci_map_single(cp->pdev,
+                       mapping = dma_map_single(&cp->pdev->dev,
                                                 ((void *) page_address(this_frag->page) +
                                                  this_frag->page_offset),
                                                 len, PCI_DMA_TODEVICE);
@@ -854,7 +807,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                                ctrl |= LastFrag;
 
                        txd = &cp->tx_ring[entry];
-                       CP_VLAN_TX_TAG(txd, vlan_tag);
+                       txd->opts2 = opts2;
                        txd->addr = cpu_to_le64(mapping);
                        wmb();
 
@@ -866,7 +819,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                }
 
                txd = &cp->tx_ring[first_entry];
-               CP_VLAN_TX_TAG(txd, vlan_tag);
+               txd->opts2 = opts2;
                txd->addr = cpu_to_le64(first_mapping);
                wmb();
 
@@ -887,18 +840,16 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                wmb();
        }
        cp->tx_head = entry;
-       if (netif_msg_tx_queued(cp))
-               printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
-                      dev->name, entry, skb->len);
+       netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+                 entry, skb->len);
        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
                netif_stop_queue(dev);
 
        spin_unlock_irqrestore(&cp->lock, intr_flags);
 
        cpw8(TxPoll, NormalTxPoll);
-       dev->trans_start = jiffies;
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 /* Set or clear the multicast filter for this adaptor.
@@ -908,7 +859,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
 {
        struct cp_private *cp = netdev_priv(dev);
        u32 mc_filter[2];       /* Multicast hash filter */
-       int i, rx_mode;
+       int rx_mode;
        u32 tmp;
 
        /* Note: do not reorder, GCC is clever about common statements. */
@@ -918,18 +869,17 @@ static void __cp_set_rx_mode (struct net_device *dev)
                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
                    AcceptAllPhys;
                mc_filter[1] = mc_filter[0] = 0xffffffff;
-       } else if ((dev->mc_count > multicast_filter_limit)
-                  || (dev->flags & IFF_ALLMULTI)) {
+       } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+                  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to filter perfectly -- accept all multicasts. */
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
                mc_filter[1] = mc_filter[0] = 0xffffffff;
        } else {
-               struct dev_mc_list *mclist;
+               struct netdev_hw_addr *ha;
                rx_mode = AcceptBroadcast | AcceptMyPhys;
                mc_filter[1] = mc_filter[0] = 0;
-               for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
-                    i++, mclist = mclist->next) {
-                       int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+               netdev_for_each_mc_addr(ha, dev) {
+                       int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 
                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
                        rx_mode |= AcceptMulticast;
@@ -959,7 +909,7 @@ static void cp_set_rx_mode (struct net_device *dev)
 static void __cp_get_stats(struct cp_private *cp)
 {
        /* only lower 24 bits valid; write any value to clear */
-       cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
+       cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
        cpw32 (RxMissed, 0);
 }
 
@@ -974,7 +924,7 @@ static struct net_device_stats *cp_get_stats(struct net_device *dev)
                __cp_get_stats(cp);
        spin_unlock_irqrestore(&cp->lock, flags);
 
-       return &cp->net_stats;
+       return &dev->stats;
 }
 
 static void cp_stop_hw (struct cp_private *cp)
@@ -1002,7 +952,7 @@ static void cp_reset_hw (struct cp_private *cp)
                schedule_timeout_uninterruptible(10);
        }
 
-       printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
+       netdev_err(cp->dev, "hardware reset timeout\n");
 }
 
 static inline void cp_start_hw (struct cp_private *cp)
@@ -1021,8 +971,8 @@ static void cp_init_hw (struct cp_private *cp)
        cpw8_f (Cfg9346, Cfg9346_Unlock);
 
        /* Restore our idea of the MAC address. */
-       cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
-       cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+       cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+       cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 
        cp_start_hw(cp);
        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
@@ -1055,22 +1005,21 @@ static void cp_init_hw (struct cp_private *cp)
        cpw8_f(Cfg9346, Cfg9346_Lock);
 }
 
-static int cp_refill_rx (struct cp_private *cp)
+static int cp_refill_rx(struct cp_private *cp)
 {
+       struct net_device *dev = cp->dev;
        unsigned i;
 
        for (i = 0; i < CP_RX_RING_SIZE; i++) {
                struct sk_buff *skb;
                dma_addr_t mapping;
 
-               skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
+               skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
                if (!skb)
                        goto err_out;
 
-               skb_reserve(skb, RX_OFFSET);
-
-               mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
-                                        PCI_DMA_FROMDEVICE);
+               mapping = dma_map_single(&cp->pdev->dev, skb->data,
+                                        cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
                cp->rx_skb[i] = skb;
 
                cp->rx_ring[i].opts2 = 0;
@@ -1110,7 +1059,8 @@ static int cp_alloc_rings (struct cp_private *cp)
 {
        void *mem;
 
-       mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
+       mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
+                                &cp->ring_dma, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
@@ -1128,7 +1078,7 @@ static void cp_clean_rings (struct cp_private *cp)
        for (i = 0; i < CP_RX_RING_SIZE; i++) {
                if (cp->rx_skb[i]) {
                        desc = cp->rx_ring + i;
-                       pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
+                       dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(cp->rx_skb[i]);
                }
@@ -1139,12 +1089,12 @@ static void cp_clean_rings (struct cp_private *cp)
                        struct sk_buff *skb = cp->tx_skb[i];
 
                        desc = cp->tx_ring + i;
-                       pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
+                       dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
                                         le32_to_cpu(desc->opts1) & 0xffff,
                                         PCI_DMA_TODEVICE);
                        if (le32_to_cpu(desc->opts1) & LastFrag)
                                dev_kfree_skb(skb);
-                       cp->net_stats.tx_dropped++;
+                       cp->dev->stats.tx_dropped++;
                }
        }
 
@@ -1158,7 +1108,8 @@ static void cp_clean_rings (struct cp_private *cp)
 static void cp_free_rings (struct cp_private *cp)
 {
        cp_clean_rings(cp);
-       pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
+       dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
+                         cp->ring_dma);
        cp->rx_ring = NULL;
        cp->tx_ring = NULL;
 }
@@ -1168,13 +1119,14 @@ static int cp_open (struct net_device *dev)
        struct cp_private *cp = netdev_priv(dev);
        int rc;
 
-       if (netif_msg_ifup(cp))
-               printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+       netif_dbg(cp, ifup, dev, "enabling interface\n");
 
        rc = cp_alloc_rings(cp);
        if (rc)
                return rc;
 
+       napi_enable(&cp->napi);
+
        cp_init_hw(cp);
 
        rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
@@ -1188,6 +1140,7 @@ static int cp_open (struct net_device *dev)
        return 0;
 
 err_out_hw:
+       napi_disable(&cp->napi);
        cp_stop_hw(cp);
        cp_free_rings(cp);
        return rc;
@@ -1198,8 +1151,9 @@ static int cp_close (struct net_device *dev)
        struct cp_private *cp = netdev_priv(dev);
        unsigned long flags;
 
-       if (netif_msg_ifdown(cp))
-               printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+       napi_disable(&cp->napi);
+
+       netif_dbg(cp, ifdown, dev, "disabling interface\n");
 
        spin_lock_irqsave(&cp->lock, flags);
 
@@ -1210,7 +1164,6 @@ static int cp_close (struct net_device *dev)
 
        spin_unlock_irqrestore(&cp->lock, flags);
 
-       synchronize_irq(dev->irq);
        free_irq(dev->irq, dev);
 
        cp_free_rings(cp);
@@ -1223,9 +1176,9 @@ static void cp_tx_timeout(struct net_device *dev)
        unsigned long flags;
        int rc;
 
-       printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
-              dev->name, cpr8(Cmd), cpr16(CpCmd),
-              cpr16(IntrStatus), cpr16(IntrMask));
+       netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
+                   cpr8(Cmd), cpr16(CpCmd),
+                   cpr16(IntrStatus), cpr16(IntrMask));
 
        spin_lock_irqsave(&cp->lock, flags);
 
@@ -1237,8 +1190,6 @@ static void cp_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 
        spin_unlock_irqrestore(&cp->lock, flags);
-
-       return;
 }
 
 #ifdef BROKEN
@@ -1379,9 +1330,14 @@ static int cp_get_regs_len(struct net_device *dev)
        return CP_REGS_SIZE;
 }
 
-static int cp_get_stats_count (struct net_device *dev)
+static int cp_get_sset_count (struct net_device *dev, int sset)
 {
-       return CP_NUM_STATS;
+       switch (sset) {
+       case ETH_SS_STATS:
+               return CP_NUM_STATS;
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1428,32 +1384,28 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
        cp->msg_enable = value;
 }
 
-static u32 cp_get_rx_csum(struct net_device *dev)
+static int cp_set_features(struct net_device *dev, u32 features)
 {
        struct cp_private *cp = netdev_priv(dev);
-       return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
-}
+       unsigned long flags;
 
-static int cp_set_rx_csum(struct net_device *dev, u32 data)
-{
-       struct cp_private *cp = netdev_priv(dev);
-       u16 cmd = cp->cpcmd, newcmd;
+       if (!((dev->features ^ features) & NETIF_F_RXCSUM))
+               return 0;
 
-       newcmd = cmd;
+       spin_lock_irqsave(&cp->lock, flags);
 
-       if (data)
-               newcmd |= RxChkSum;
+       if (features & NETIF_F_RXCSUM)
+               cp->cpcmd |= RxChkSum;
        else
-               newcmd &= ~RxChkSum;
+               cp->cpcmd &= ~RxChkSum;
 
-       if (newcmd != cmd) {
-               unsigned long flags;
+       if (features & NETIF_F_HW_VLAN_RX)
+               cp->cpcmd |= RxVlanOn;
+       else
+               cp->cpcmd &= ~RxVlanOn;
 
-               spin_lock_irqsave(&cp->lock, flags);
-               cp->cpcmd = newcmd;
-               cpw16_f(CpCmd, newcmd);
-               spin_unlock_irqrestore(&cp->lock, flags);
-       }
+       cpw16_f(CpCmd, cp->cpcmd);
+       spin_unlock_irqrestore(&cp->lock, flags);
 
        return 0;
 }
@@ -1517,13 +1469,14 @@ static void cp_get_ethtool_stats (struct net_device *dev,
        dma_addr_t dma;
        int i;
 
-       nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
+       nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
+                                      &dma, GFP_KERNEL);
        if (!nic_stats)
                return;
 
        /* begin NIC statistics dump */
        cpw32(StatsAddr + 4, (u64)dma >> 32);
-       cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
+       cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
        cpr32(StatsAddr);
 
        for (i = 0; i < 1000; i++) {
@@ -1552,33 +1505,24 @@ static void cp_get_ethtool_stats (struct net_device *dev,
        tmp_stats[i++] = cp->cp_stats.rx_frags;
        BUG_ON(i != CP_NUM_STATS);
 
-       pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
+       dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
 }
 
 static const struct ethtool_ops cp_ethtool_ops = {
        .get_drvinfo            = cp_get_drvinfo,
        .get_regs_len           = cp_get_regs_len,
-       .get_stats_count        = cp_get_stats_count,
+       .get_sset_count         = cp_get_sset_count,
        .get_settings           = cp_get_settings,
        .set_settings           = cp_set_settings,
        .nway_reset             = cp_nway_reset,
        .get_link               = ethtool_op_get_link,
        .get_msglevel           = cp_get_msglevel,
        .set_msglevel           = cp_set_msglevel,
-       .get_rx_csum            = cp_get_rx_csum,
-       .set_rx_csum            = cp_set_rx_csum,
-       .get_tx_csum            = ethtool_op_get_tx_csum,
-       .set_tx_csum            = ethtool_op_set_tx_csum, /* local! */
-       .get_sg                 = ethtool_op_get_sg,
-       .set_sg                 = ethtool_op_set_sg,
-       .get_tso                = ethtool_op_get_tso,
-       .set_tso                = ethtool_op_set_tso,
        .get_regs               = cp_get_regs,
        .get_wol                = cp_get_wol,
        .set_wol                = cp_set_wol,
        .get_strings            = cp_get_strings,
        .get_ethtool_stats      = cp_get_ethtool_stats,
-       .get_perm_addr          = ethtool_op_get_perm_addr,
        .get_eeprom_len         = cp_get_eeprom_len,
        .get_eeprom             = cp_get_eeprom,
        .set_eeprom             = cp_set_eeprom,
@@ -1599,6 +1543,28 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
        return rc;
 }
 
+static int cp_set_mac_address(struct net_device *dev, void *p)
+{
+       struct cp_private *cp = netdev_priv(dev);
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+       spin_lock_irq(&cp->lock);
+
+       cpw8_f(Cfg9346, Cfg9346_Unlock);
+       cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+       cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
+       cpw8_f(Cfg9346, Cfg9346_Lock);
+
+       spin_unlock_irq(&cp->lock);
+
+       return 0;
+}
+
 /* Serial EEPROM section. */
 
 /*  EEPROM_Ctrl bits. */
@@ -1814,6 +1780,26 @@ static void cp_set_d3_state (struct cp_private *cp)
        pci_set_power_state (cp->pdev, PCI_D3hot);
 }
 
+static const struct net_device_ops cp_netdev_ops = {
+       .ndo_open               = cp_open,
+       .ndo_stop               = cp_close,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = cp_set_mac_address,
+       .ndo_set_multicast_list = cp_set_rx_mode,
+       .ndo_get_stats          = cp_get_stats,
+       .ndo_do_ioctl           = cp_ioctl,
+       .ndo_start_xmit         = cp_start_xmit,
+       .ndo_tx_timeout         = cp_tx_timeout,
+       .ndo_set_features       = cp_set_features,
+#ifdef BROKEN
+       .ndo_change_mtu         = cp_change_mtu,
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = cp_poll_controller,
+#endif
+};
+
 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct net_device *dev;
@@ -1822,29 +1808,24 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        void __iomem *regs;
        resource_size_t pciaddr;
        unsigned int addr_len, i, pci_using_dac;
-       u8 pci_rev;
 
 #ifndef MODULE
        static int version_printed;
        if (version_printed++ == 0)
-               printk("%s", version);
+               pr_info("%s", version);
 #endif
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
-
        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
-           pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
-               dev_err(&pdev->dev,
-                          "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
-                          pdev->vendor, pdev->device, pci_rev);
-               dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
+           pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
+               dev_info(&pdev->dev,
+                        "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
+                        pdev->vendor, pdev->device, pdev->revision);
                return -ENODEV;
        }
 
        dev = alloc_etherdev(sizeof(struct cp_private));
        if (!dev)
                return -ENOMEM;
-       SET_MODULE_OWNER(dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        cp = netdev_priv(dev);
@@ -1887,23 +1868,22 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Configure DMA attributes. */
        if ((sizeof(dma_addr_t) > 4) &&
-           !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
-           !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+           !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
                pci_using_dac = 0;
 
-               rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (rc) {
                        dev_err(&pdev->dev,
-                                  "No usable DMA configuration, aborting.\n");
+                               "No usable DMA configuration, aborting\n");
                        goto err_out_res;
                }
-               rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                if (rc) {
                        dev_err(&pdev->dev,
-                                  "No usable consistent DMA configuration, "
-                                  "aborting.\n");
+                               "No usable consistent DMA configuration, aborting\n");
                        goto err_out_res;
                }
        }
@@ -1911,11 +1891,14 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
 
+       dev->features |= NETIF_F_RXCSUM;
+       dev->hw_features |= NETIF_F_RXCSUM;
+
        regs = ioremap(pciaddr, CP_REGS_SIZE);
        if (!regs) {
                rc = -EIO;
                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
-                      (unsigned long long)pci_resource_len(pdev, 1),
+                       (unsigned long long)pci_resource_len(pdev, 1),
                       (unsigned long long)pciaddr);
                goto err_out_res;
        }
@@ -1927,39 +1910,25 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        /* read MAC address from EEPROM */
        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
        for (i = 0; i < 3; i++)
-               ((u16 *) (dev->dev_addr))[i] =
-                   le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
+               ((__le16 *) (dev->dev_addr))[i] =
+                   cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       dev->open = cp_open;
-       dev->stop = cp_close;
-       dev->set_multicast_list = cp_set_rx_mode;
-       dev->hard_start_xmit = cp_start_xmit;
-       dev->get_stats = cp_get_stats;
-       dev->do_ioctl = cp_ioctl;
-       dev->poll = cp_rx_poll;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = cp_poll_controller;
-#endif
-       dev->weight = 16;       /* arbitrary? from NAPI_HOWTO.txt. */
-#ifdef BROKEN
-       dev->change_mtu = cp_change_mtu;
-#endif
+       dev->netdev_ops = &cp_netdev_ops;
+       netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
        dev->ethtool_ops = &cp_ethtool_ops;
-       dev->tx_timeout = cp_tx_timeout;
        dev->watchdog_timeo = TX_TIMEOUT;
 
-#if CP_VLAN_TAG_USED
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-       dev->vlan_rx_register = cp_vlan_rx_register;
-#endif
 
        if (pci_using_dac)
                dev->features |= NETIF_F_HIGHDMA;
 
-#if 0 /* disabled by default until verified */
-       dev->features |= NETIF_F_TSO;
-#endif
+       /* disabled by default until verified */
+       dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+               NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+       dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+               NETIF_F_HIGHDMA;
 
        dev->irq = pdev->irq;
 
@@ -1967,15 +1936,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                goto err_out_iomap;
 
-       printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
-               "%02x:%02x:%02x:%02x:%02x:%02x, "
-               "IRQ %d\n",
-               dev->name,
-               dev->base_addr,
-               dev->dev_addr[0], dev->dev_addr[1],
-               dev->dev_addr[2], dev->dev_addr[3],
-               dev->dev_addr[4], dev->dev_addr[5],
-               dev->irq);
+       netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
+                   dev->base_addr, dev->dev_addr, dev->irq);
 
        pci_set_drvdata(pdev, dev);
 
@@ -2088,7 +2050,7 @@ static struct pci_driver cp_driver = {
 static int __init cp_init (void)
 {
 #ifdef MODULE
-       printk("%s", version);
+       pr_info("%s", version);
 #endif
        return pci_register_driver(&cp_driver);
 }