bitops: rename for_each_bit() to for_each_set_bit()
[linux-2.6.git] / drivers / net / gianfar.c
index 197b358..61a7b43 100644 (file)
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -357,8 +356,11 @@ static void gfar_init_mac(struct net_device *ndev)
        /* Configure the coalescing support */
        gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
-       if (priv->rx_filer_enable)
+       if (priv->rx_filer_enable) {
                rctrl |= RCTRL_FILREN;
+               /* Program the RIR0 reg with the required distribution */
+               gfar_write(&regs->rir0, DEFAULT_RIR0);
+       }
 
        if (priv->rx_csum_enable)
                rctrl |= RCTRL_CHECKSUMMING;
@@ -414,6 +416,36 @@ static void gfar_init_mac(struct net_device *ndev)
        gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
 }
 
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       struct netdev_queue *txq;
+       unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+       unsigned long tx_packets = 0, tx_bytes = 0;
+       int i = 0;
+
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               rx_packets += priv->rx_queue[i]->stats.rx_packets;
+               rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+               rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+       }
+
+       dev->stats.rx_packets = rx_packets;
+       dev->stats.rx_bytes = rx_bytes;
+       dev->stats.rx_dropped = rx_dropped;
+
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               txq = netdev_get_tx_queue(dev, i);
+               tx_bytes += txq->tx_bytes;
+               tx_packets += txq->tx_packets;
+       }
+
+       dev->stats.tx_bytes = tx_bytes;
+       dev->stats.tx_packets = tx_packets;
+
+       return &dev->stats;
+}
+
 static const struct net_device_ops gfar_netdev_ops = {
        .ndo_open = gfar_enet_open,
        .ndo_start_xmit = gfar_start_xmit,
@@ -422,7 +454,7 @@ static const struct net_device_ops gfar_netdev_ops = {
        .ndo_set_multicast_list = gfar_set_multi,
        .ndo_tx_timeout = gfar_timeout,
        .ndo_do_ioctl = gfar_ioctl,
-       .ndo_select_queue = gfar_select_queue,
+       .ndo_get_stats = gfar_get_stats,
        .ndo_vlan_rx_register = gfar_vlan_rx_register,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
@@ -472,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
        return priv->vlgrp || priv->rx_csum_enable;
 }
 
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-       return skb_get_queue_mapping(skb);
-}
 static void free_tx_pointers(struct gfar_private *priv)
 {
        int i = 0;
@@ -970,7 +998,7 @@ static int gfar_probe(struct of_device *ofdev,
        }
 
        /* Need to reverse the bit maps as  bit_map's MSB is q0
-        * but, for_each_bit parses from right to left, which
+        * but, for_each_set_bit parses from right to left, which
         * basically reverses the queue numbers */
        for (i = 0; i< priv->num_grps; i++) {
                priv->gfargrp[i].tx_bit_map = reverse_bitmap(
@@ -983,7 +1011,7 @@ static int gfar_probe(struct of_device *ofdev,
         * also assign queues to groups */
        for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
                priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-               for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
                                priv->num_rx_queues) {
                        priv->gfargrp[grp_idx].num_rx_queues++;
                        priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -991,7 +1019,7 @@ static int gfar_probe(struct of_device *ofdev,
                        rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
                }
                priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-               for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+               for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
                                priv->num_tx_queues) {
                        priv->gfargrp[grp_idx].num_tx_queues++;
                        priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1022,6 +1050,9 @@ static int gfar_probe(struct of_device *ofdev,
                priv->rx_queue[i]->rxic = DEFAULT_RXIC;
        }
 
+       /* enable filer if using multiple RX queues*/
+       if(priv->num_rx_queues > 1)
+               priv->rx_filer_enable = 1;
        /* Enable most messages by default */
        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
 
@@ -1246,7 +1277,7 @@ static int gfar_restore(struct device *dev)
                phy_start(priv->phydev);
 
        netif_device_attach(ndev);
-       napi_enable(&priv->gfargrp.napi);
+       enable_napi(priv);
 
        return 0;
 }
@@ -1678,7 +1709,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
 
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
-               for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+               for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
                        if (likely(priv->tx_queue[i]->txcoalescing)) {
                                gfar_write(baddr + i, 0);
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
@@ -1686,7 +1717,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
                }
 
                baddr = &regs->rxic0;
-               for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+               for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
                        if (likely(priv->rx_queue[i]->rxcoalescing)) {
                                gfar_write(baddr + i, 0);
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
@@ -1928,19 +1959,17 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* total number of fragments in the SKB */
        nr_frags = skb_shinfo(skb)->nr_frags;
 
-       spin_lock_irqsave(&tx_queue->txlock, flags);
-
        /* check if there is space to queue this packet */
        if ((nr_frags+1) > tx_queue->num_txbdfree) {
                /* no space, stop the queue */
                netif_tx_stop_queue(txq);
                dev->stats.tx_fifo_errors++;
-               spin_unlock_irqrestore(&tx_queue->txlock, flags);
                return NETDEV_TX_BUSY;
        }
 
        /* Update transmit stats */
-       dev->stats.tx_bytes += skb->len;
+       txq->tx_bytes += skb->len;
+       txq->tx_packets ++;
 
        txbdp = txbdp_start = tx_queue->cur_tx;
 
@@ -1999,6 +2028,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
 
        /*
+        * We can work in parallel with gfar_clean_tx_ring(), except
+        * when modifying num_txbdfree. Note that we didn't grab the lock
+        * when we were reading the num_txbdfree and checking for available
+        * space, that's because outside of this function it can only grow,
+        * and once we've got needed space, it cannot suddenly disappear.
+        *
+        * The lock also protects us from gfar_error(), which can modify
+        * regs->tstat and thus retrigger the transfers, which is why we
+        * also must grab the lock before setting ready bit for the first
+        * to be transmitted BD.
+        */
+       spin_lock_irqsave(&tx_queue->txlock, flags);
+
+       /*
         * The powerpc-specific eieio() is used, as wmb() has too strong
         * semantics (it requires synchronization between cacheable and
         * uncacheable mappings, which eieio doesn't provide and which we
@@ -2225,6 +2268,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        skb_dirtytx = tx_queue->skb_dirtytx;
 
        while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+               unsigned long flags;
+
                frags = skb_shinfo(skb)->nr_frags;
                lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
 
@@ -2269,7 +2314,9 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                        TX_RING_MOD_MASK(tx_ring_size);
 
                howmany++;
+               spin_lock_irqsave(&tx_queue->txlock, flags);
                tx_queue->num_txbdfree += frags + 1;
+               spin_unlock_irqrestore(&tx_queue->txlock, flags);
        }
 
        /* If we freed a buffer, we can restart transmission, if necessary */
@@ -2280,8 +2327,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        tx_queue->skb_dirtytx = skb_dirtytx;
        tx_queue->dirty_tx = bdp;
 
-       dev->stats.tx_packets += howmany;
-
        return howmany;
 }
 
@@ -2419,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        fcb = (struct rxfcb *)skb->data;
 
        /* Remove the FCB from the skb */
-       skb_set_queue_mapping(skb, fcb->rq);
        /* Remove the padded bytes, if there are any */
-       if (amount_pull)
+       if (amount_pull) {
+               skb_record_rx_queue(skb, fcb->rq);
                skb_pull(skb, amount_pull);
+       }
 
        if (priv->rx_csum_enable)
                gfar_rx_checksum(skb, fcb);
@@ -2495,24 +2541,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                        }
                } else {
                        /* Increment the number of packets */
-                       dev->stats.rx_packets++;
+                       rx_queue->stats.rx_packets++;
                        howmany++;
 
                        if (likely(skb)) {
                                pkt_len = bdp->length - ETH_FCS_LEN;
                                /* Remove the FCS from the packet length */
                                skb_put(skb, pkt_len);
-                               dev->stats.rx_bytes += pkt_len;
-
-                               if (in_irq() || irqs_disabled())
-                                       printk("Interrupt problem!\n");
+                               rx_queue->stats.rx_bytes += pkt_len;
+                               skb_record_rx_queue(skb, rx_queue->qindex);
                                gfar_process_frame(dev, skb, amount_pull);
 
                        } else {
                                if (netif_msg_rx_err(priv))
                                        printk(KERN_WARNING
                                               "%s: Missing skb!\n", dev->name);
-                               dev->stats.rx_dropped++;
+                               rx_queue->stats.rx_dropped++;
                                priv->extra_stats.rx_skbmissing++;
                        }
 
@@ -2550,7 +2594,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        int tx_cleaned = 0, i, left_over_budget = budget;
        unsigned long serviced_queues = 0;
        int num_queues = 0;
-       unsigned long flags;
 
        num_queues = gfargrp->num_rx_queues;
        budget_per_queue = budget/num_queues;
@@ -2564,20 +2607,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
                budget_per_queue = left_over_budget/num_queues;
                left_over_budget = 0;
 
-               for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+               for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
                        if (test_bit(i, &serviced_queues))
                                continue;
                        rx_queue = priv->rx_queue[i];
                        tx_queue = priv->tx_queue[rx_queue->qindex];
 
-                       /* If we fail to get the lock,
-                        * don't bother with the TX BDs */
-                       if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
-                               tx_cleaned += gfar_clean_tx_ring(tx_queue);
-                               spin_unlock_irqrestore(&tx_queue->txlock,
-                                                       flags);
-                       }
-
+                       tx_cleaned += gfar_clean_tx_ring(tx_queue);
                        rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
                                                        budget_per_queue);
                        rx_cleaned += rx_cleaned_per_queue;
@@ -2639,6 +2675,7 @@ static void gfar_netpoll(struct net_device *dev)
                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
                                                &priv->gfargrp[i]);
                        enable_irq(priv->gfargrp[i].interruptTransmit);
+               }
        }
 }
 #endif
@@ -2826,11 +2863,11 @@ static void gfar_set_multi(struct net_device *dev)
                        em_num = 0;
                }
 
-               if (dev->mc_count == 0)
+               if (netdev_mc_empty(dev))
                        return;
 
                /* Parse the list, and set the appropriate bits */
-               for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+               netdev_for_each_mc_addr(mc_ptr, dev) {
                        if (idx < em_num) {
                                gfar_set_mac_for_addr(dev, idx,
                                                mc_ptr->dmi_addr);
@@ -2945,14 +2982,22 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
                if (events & IEVENT_CRL)
                        dev->stats.tx_aborted_errors++;
                if (events & IEVENT_XFUN) {
+                       unsigned long flags;
+
                        if (netif_msg_tx_err(priv))
                                printk(KERN_DEBUG "%s: TX FIFO underrun, "
                                       "packet dropped.\n", dev->name);
                        dev->stats.tx_dropped++;
                        priv->extra_stats.tx_underrun++;
 
+                       local_irq_save(flags);
+                       lock_tx_qs(priv);
+
                        /* Reactivate the Tx Queues */
                        gfar_write(&regs->tstat, gfargrp->tstat);
+
+                       unlock_tx_qs(priv);
+                       local_irq_restore(flags);
                }
                if (netif_msg_tx_err(priv))
                        printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);