Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6.git] / net / core / netpoll.c
index 7de6dca..06be243 100644 (file)
@@ -35,7 +35,6 @@
 
 #define MAX_UDP_CHUNK 1460
 #define MAX_SKBS 32
-#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
 
 static struct sk_buff_head skb_pool;
 
@@ -49,6 +48,7 @@ static atomic_t trapped;
                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
                                sizeof(struct iphdr) + sizeof(struct ethhdr))
 
+static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
 static unsigned int carrier_timeout = 4;
@@ -75,8 +75,7 @@ static void queue_process(struct work_struct *work)
 
                local_irq_save(flags);
                __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_stopped(txq) ||
-                   netif_tx_queue_frozen(txq) ||
+               if (netif_tx_queue_frozen_or_stopped(txq) ||
                    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
                        __netif_tx_unlock(txq);
@@ -194,14 +193,28 @@ void netpoll_poll_dev(struct net_device *dev)
 
        poll_napi(dev);
 
+       if (dev->priv_flags & IFF_SLAVE) {
+               if (dev->npinfo) {
+                       struct net_device *bond_dev = dev->master;
+                       struct sk_buff *skb;
+                       while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+                               skb->dev = bond_dev;
+                               skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+                       }
+               }
+       }
+
        service_arp_queue(dev->npinfo);
 
+       zap_completion_queue();
 }
+EXPORT_SYMBOL(netpoll_poll_dev);
 
 void netpoll_poll(struct netpoll *np)
 {
        netpoll_poll_dev(np->dev);
 }
+EXPORT_SYMBOL(netpoll_poll);
 
 static void refill_skbs(void)
 {
@@ -219,11 +232,40 @@ static void refill_skbs(void)
        spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
+static void zap_completion_queue(void)
+{
+       unsigned long flags;
+       struct softnet_data *sd = &get_cpu_var(softnet_data);
+
+       if (sd->completion_queue) {
+               struct sk_buff *clist;
+
+               local_irq_save(flags);
+               clist = sd->completion_queue;
+               sd->completion_queue = NULL;
+               local_irq_restore(flags);
+
+               while (clist != NULL) {
+                       struct sk_buff *skb = clist;
+                       clist = clist->next;
+                       if (skb->destructor) {
+                               atomic_inc(&skb->users);
+                               dev_kfree_skb_any(skb); /* put this one back */
+                       } else {
+                               __kfree_skb(skb);
+                       }
+               }
+       }
+
+       put_cpu_var(softnet_data);
+}
+
 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
        int count = 0;
        struct sk_buff *skb;
 
+       zap_completion_queue();
        refill_skbs();
 repeat:
 
@@ -255,11 +297,11 @@ static int netpoll_owner_active(struct net_device *dev)
        return 0;
 }
 
-void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+                            struct net_device *dev)
 {
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
-       struct net_device *dev = np->dev;
        const struct net_device_ops *ops = dev->netdev_ops;
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo = np->dev->npinfo;
@@ -282,9 +324,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_tx_queue_stopped(txq)) {
-                                       dev->priv_flags |= IFF_IN_NETPOLL;
                                        status = ops->ndo_start_xmit(skb, dev);
-                                       dev->priv_flags &= ~IFF_IN_NETPOLL;
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
                                }
@@ -313,6 +353,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
                schedule_delayed_work(&npinfo->tx_work,0);
        }
 }
+EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
@@ -374,6 +415,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 
        netpoll_send_skb(np, skb);
 }
+EXPORT_SYMBOL(netpoll_send_udp);
 
 static void arp_reply(struct sk_buff *skb)
 {
@@ -600,6 +642,7 @@ void netpoll_print_options(struct netpoll *np)
        printk(KERN_INFO "%s: remote ethernet address %pM\n",
                         np->name, np->remote_mac);
 }
+EXPORT_SYMBOL(netpoll_print_options);
 
 int netpoll_parse_options(struct netpoll *np, char *opt)
 {
@@ -692,16 +735,79 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
               np->name, cur);
        return -1;
 }
+EXPORT_SYMBOL(netpoll_parse_options);
 
-int netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np)
 {
-       struct net_device *ndev = NULL;
-       struct in_device *in_dev;
+       struct net_device *ndev = np->dev;
        struct netpoll_info *npinfo;
        const struct net_device_ops *ops;
        unsigned long flags;
        int err;
 
+       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+           !ndev->netdev_ops->ndo_poll_controller) {
+               printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+                      np->name, np->dev_name);
+               err = -ENOTSUPP;
+               goto out;
+       }
+
+       if (!ndev->npinfo) {
+               npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+               if (!npinfo) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               npinfo->rx_flags = 0;
+               INIT_LIST_HEAD(&npinfo->rx_np);
+
+               spin_lock_init(&npinfo->rx_lock);
+               skb_queue_head_init(&npinfo->arp_tx);
+               skb_queue_head_init(&npinfo->txq);
+               INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
+
+               atomic_set(&npinfo->refcnt, 1);
+
+               ops = np->dev->netdev_ops;
+               if (ops->ndo_netpoll_setup) {
+                       err = ops->ndo_netpoll_setup(ndev, npinfo);
+                       if (err)
+                               goto free_npinfo;
+               }
+       } else {
+               npinfo = ndev->npinfo;
+               atomic_inc(&npinfo->refcnt);
+       }
+
+       npinfo->netpoll = np;
+
+       if (np->rx_hook) {
+               spin_lock_irqsave(&npinfo->rx_lock, flags);
+               npinfo->rx_flags |= NETPOLL_RX_ENABLED;
+               list_add_tail(&np->rx, &npinfo->rx_np);
+               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+       }
+
+       /* last thing to do is link it to the net device structure */
+       rcu_assign_pointer(ndev->npinfo, npinfo);
+
+       return 0;
+
+free_npinfo:
+       kfree(npinfo);
+out:
+       return err;
+}
+EXPORT_SYMBOL_GPL(__netpoll_setup);
+
+int netpoll_setup(struct netpoll *np)
+{
+       struct net_device *ndev = NULL;
+       struct in_device *in_dev;
+       int err;
+
        if (np->dev_name)
                ndev = dev_get_by_name(&init_net, np->dev_name);
        if (!ndev) {
@@ -774,65 +880,19 @@ int netpoll_setup(struct netpoll *np)
        refill_skbs();
 
        rtnl_lock();
-       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
-           !ndev->netdev_ops->ndo_poll_controller) {
-               printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
-                      np->name, np->dev_name);
-               err = -ENOTSUPP;
-               goto unlock;
-       }
-
-       if (!ndev->npinfo) {
-               npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
-               if (!npinfo) {
-                       err = -ENOMEM;
-                       goto unlock;
-               }
-
-               npinfo->rx_flags = 0;
-               INIT_LIST_HEAD(&npinfo->rx_np);
-
-               spin_lock_init(&npinfo->rx_lock);
-               skb_queue_head_init(&npinfo->arp_tx);
-               skb_queue_head_init(&npinfo->txq);
-               INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
-
-               atomic_set(&npinfo->refcnt, 1);
-
-               ops = np->dev->netdev_ops;
-               if (ops->ndo_netpoll_setup) {
-                       err = ops->ndo_netpoll_setup(ndev, npinfo);
-                       if (err)
-                               goto free_npinfo;
-               }
-       } else {
-               npinfo = ndev->npinfo;
-               atomic_inc(&npinfo->refcnt);
-       }
-
-       npinfo->netpoll = np;
-
-       if (np->rx_hook) {
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               npinfo->rx_flags |= NETPOLL_RX_ENABLED;
-               list_add_tail(&np->rx, &npinfo->rx_np);
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-       }
-
-       /* last thing to do is link it to the net device structure */
-       rcu_assign_pointer(ndev->npinfo, npinfo);
+       err = __netpoll_setup(np);
        rtnl_unlock();
 
+       if (err)
+               goto put;
+
        return 0;
 
-free_npinfo:
-       kfree(npinfo);
-unlock:
-       rtnl_unlock();
 put:
        dev_put(ndev);
        return err;
 }
+EXPORT_SYMBOL(netpoll_setup);
 
 static int __init netpoll_init(void)
 {
@@ -841,61 +901,65 @@ static int __init netpoll_init(void)
 }
 core_initcall(netpoll_init);
 
-void netpoll_cleanup(struct netpoll *np)
+void __netpoll_cleanup(struct netpoll *np)
 {
        struct netpoll_info *npinfo;
        unsigned long flags;
-       int free = 0;
 
-       if (!np->dev)
+       npinfo = np->dev->npinfo;
+       if (!npinfo)
                return;
 
-       rtnl_lock();
-       npinfo = np->dev->npinfo;
-       if (npinfo) {
-               if (!list_empty(&npinfo->rx_np)) {
-                       spin_lock_irqsave(&npinfo->rx_lock, flags);
-                       list_del(&np->rx);
-                       if (list_empty(&npinfo->rx_np))
-                               npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-                       spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-               }
+       if (!list_empty(&npinfo->rx_np)) {
+               spin_lock_irqsave(&npinfo->rx_lock, flags);
+               list_del(&np->rx);
+               if (list_empty(&npinfo->rx_np))
+                       npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+       }
 
-               free = atomic_dec_and_test(&npinfo->refcnt);
-               if (free) {
-                       const struct net_device_ops *ops;
+       if (atomic_dec_and_test(&npinfo->refcnt)) {
+               const struct net_device_ops *ops;
 
-                       ops = np->dev->netdev_ops;
-                       if (ops->ndo_netpoll_cleanup)
-                               ops->ndo_netpoll_cleanup(np->dev);
+               ops = np->dev->netdev_ops;
+               if (ops->ndo_netpoll_cleanup)
+                       ops->ndo_netpoll_cleanup(np->dev);
 
-                       rcu_assign_pointer(np->dev->npinfo, NULL);
-               }
-       }
-       rtnl_unlock();
+               rcu_assign_pointer(np->dev->npinfo, NULL);
 
-       if (free) {
                /* avoid racing with NAPI reading npinfo */
                synchronize_rcu_bh();
 
                skb_queue_purge(&npinfo->arp_tx);
                skb_queue_purge(&npinfo->txq);
-               cancel_rearming_delayed_work(&npinfo->tx_work);
+               cancel_delayed_work_sync(&npinfo->tx_work);
 
                /* clean after last, unfinished work */
                __skb_queue_purge(&npinfo->txq);
                kfree(npinfo);
        }
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
-       dev_put(np->dev);
+void netpoll_cleanup(struct netpoll *np)
+{
+       if (!np->dev)
+               return;
 
+       rtnl_lock();
+       __netpoll_cleanup(np);
+       rtnl_unlock();
+
+       dev_put(np->dev);
        np->dev = NULL;
 }
+EXPORT_SYMBOL(netpoll_cleanup);
 
 int netpoll_trap(void)
 {
        return atomic_read(&trapped);
 }
+EXPORT_SYMBOL(netpoll_trap);
 
 void netpoll_set_trap(int trap)
 {
@@ -904,14 +968,4 @@ void netpoll_set_trap(int trap)
        else
                atomic_dec(&trapped);
 }
-
-EXPORT_SYMBOL(netpoll_send_skb);
 EXPORT_SYMBOL(netpoll_set_trap);
-EXPORT_SYMBOL(netpoll_trap);
-EXPORT_SYMBOL(netpoll_print_options);
-EXPORT_SYMBOL(netpoll_parse_options);
-EXPORT_SYMBOL(netpoll_setup);
-EXPORT_SYMBOL(netpoll_cleanup);
-EXPORT_SYMBOL(netpoll_send_udp);
-EXPORT_SYMBOL(netpoll_poll_dev);
-EXPORT_SYMBOL(netpoll_poll);