bonding: properly stop queuing work when requested
[linux-2.6.git] / drivers / net / bonding / bond_main.c
index 088fd84..6d79b78 100644 (file)
@@ -98,6 +98,7 @@ static char *mode;
 static char *primary;
 static char *primary_reselect;
 static char *lacp_rate;
+static int min_links;
 static char *ad_select;
 static char *xmit_hash_policy;
 static int arp_interval = BOND_LINK_ARP_INTERV;
@@ -113,9 +114,11 @@ MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
 module_param(tx_queues, int, 0);
 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)");
+MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
+                              "failover event (alias of num_unsol_na)");
 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)");
+MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
+                              "failover event (alias of num_grat_arp)");
 module_param(miimon, int, 0);
 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
 module_param(updelay, int, 0);
@@ -127,7 +130,7 @@ module_param(use_carrier, int, 0);
 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
                              "0 for off, 1 for on (default)");
 module_param(mode, charp, 0);
-MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
+MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
                       "1 for active-backup, 2 for balance-xor, "
                       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
                       "6 for balance-alb");
@@ -142,27 +145,38 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
                                   "2 for only on active slave "
                                   "failure");
 module_param(lacp_rate, charp, 0);
-MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
-                           "(slow/fast)");
+MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
+                           "0 for slow, 1 for fast");
 module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)");
+MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+                           "0 for stable (default), 1 for bandwidth, "
+                           "2 for count");
+module_param(min_links, int, 0);
+MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
+
 module_param(xmit_hash_policy, charp, 0);
-MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
-                                  ", 1 for layer 3+4");
+MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
+                                  "0 for layer 2 (default), 1 for layer 3+4, "
+                                  "2 for layer 2+3");
 module_param(arp_interval, int, 0);
 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 module_param_array(arp_ip_target, charp, NULL, 0);
 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
 module_param(arp_validate, charp, 0);
-MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
+MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
+                              "0 for none (default), 1 for active, "
+                              "2 for backup, 3 for all");
 module_param(fail_over_mac, charp, 0);
-MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC.  none (default), active or follow");
+MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
+                               "the same MAC; 0 for none (default), "
+                               "1 for active, 2 for follow");
 module_param(all_slaves_active, int, 0);
 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
-                                    "by setting active flag for all slaves.  "
+                                    "by setting active flag for all slaves; "
                                     "0 for never (default), 1 for always.");
 module_param(resend_igmp, int, 0);
-MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
+MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
+                             "link failure");
 
 /*----------------------------- Global variables ----------------------------*/
 
@@ -319,16 +333,6 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
 
                        kfree(vlan);
 
-                       if (list_empty(&bond->vlan_list) &&
-                           (bond->slave_cnt == 0)) {
-                               /* Last VLAN removed and no slaves, so
-                                * restore block on adding VLANs. This will
-                                * be removed once new slaves that are not
-                                * VLAN challenged will be added.
-                                */
-                               bond->dev->features |= NETIF_F_VLAN_CHALLENGED;
-                       }
-
                        res = 0;
                        goto out;
                }
@@ -378,6 +382,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
        return next;
 }
 
+#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -390,6 +396,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
        skb->priority = 1;
+
+       skb->queue_mapping = bond_queue_mapping(skb);
+
        if (unlikely(netpoll_tx_running(slave_dev)))
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
        else
@@ -399,9 +408,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 }
 
 /*
- * In the following 3 functions, bond_vlan_rx_register(), bond_vlan_rx_add_vid
- * and bond_vlan_rx_kill_vid, We don't protect the slave list iteration with a
- * lock because:
+ * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
+ * We don't protect the slave list iteration with a lock because:
  * a. This operation is performed in IOCTL context,
  * b. The operation is protected by the RTNL semaphore in the 8021q code,
  * c. Holding a lock with BH disabled while directly calling a base driver
@@ -417,33 +425,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 */
 
 /**
- * bond_vlan_rx_register - Propagates registration to slaves
- * @bond_dev: bonding net device that got called
- * @grp: vlan group being registered
- */
-static void bond_vlan_rx_register(struct net_device *bond_dev,
-                                 struct vlan_group *grp)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
-       int i;
-
-       write_lock_bh(&bond->lock);
-       bond->vlgrp = grp;
-       write_unlock_bh(&bond->lock);
-
-       bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
-                   slave_ops->ndo_vlan_rx_register) {
-                       slave_ops->ndo_vlan_rx_register(slave_dev, grp);
-               }
-       }
-}
-
-/**
  * bond_vlan_rx_add_vid - Propagates adding an id to slaves
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being added
@@ -480,7 +461,6 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
-       struct net_device *vlan_dev;
        int i, res;
 
        bond_for_each_slave(bond, slave, i) {
@@ -489,12 +469,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 
                if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
                    slave_ops->ndo_vlan_rx_kill_vid) {
-                       /* Save and then restore vlan_dev in the grp array,
-                        * since the slave's driver might clear it.
-                        */
-                       vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
                        slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
-                       vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
                }
        }
 
@@ -510,13 +485,6 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
        struct vlan_entry *vlan;
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
 
-       if (!bond->vlgrp)
-               return;
-
-       if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
-           slave_ops->ndo_vlan_rx_register)
-               slave_ops->ndo_vlan_rx_register(slave_dev, bond->vlgrp);
-
        if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
            !(slave_ops->ndo_vlan_rx_add_vid))
                return;
@@ -530,30 +498,16 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
 {
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct vlan_entry *vlan;
-       struct net_device *vlan_dev;
-
-       if (!bond->vlgrp)
-               return;
 
        if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
            !(slave_ops->ndo_vlan_rx_kill_vid))
-               goto unreg;
+               return;
 
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                if (!vlan->vlan_id)
                        continue;
-               /* Save and then restore vlan_dev in the grp array,
-                * since the slave's driver might clear it.
-                */
-               vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
                slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
-               vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
        }
-
-unreg:
-       if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
-           slave_ops->ndo_vlan_rx_register)
-               slave_ops->ndo_vlan_rx_register(slave_dev, NULL);
 }
 
 /*------------------------------- Link status -------------------------------*/
@@ -619,15 +573,8 @@ static int bond_update_speed_duplex(struct slave *slave)
                return -1;
 
        slave_speed = ethtool_cmd_speed(&etool);
-       switch (slave_speed) {
-       case SPEED_10:
-       case SPEED_100:
-       case SPEED_1000:
-       case SPEED_10000:
-               break;
-       default:
+       if (slave_speed == 0 || slave_speed == ((__u32) -1))
                return -1;
-       }
 
        switch (etool.duplex) {
        case DUPLEX_FULL:
@@ -830,29 +777,32 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
 
        read_lock(&bond->lock);
 
+       if (bond->kill_timers)
+               goto out;
+
        /* rejoin all groups on bond device */
        __bond_resend_igmp_join_requests(bond->dev);
 
        /* rejoin all groups on vlan devices */
-       if (bond->vlgrp) {
-               list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-                       vlan_dev = vlan_group_get_device(bond->vlgrp,
-                                                        vlan->vlan_id);
-                       if (vlan_dev)
-                               __bond_resend_igmp_join_requests(vlan_dev);
-               }
+       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+               rcu_read_lock();
+               vlan_dev = __vlan_find_dev_deep(bond->dev,
+                                               vlan->vlan_id);
+               rcu_read_unlock();
+               if (vlan_dev)
+                       __bond_resend_igmp_join_requests(vlan_dev);
        }
 
-       if (--bond->igmp_retrans > 0)
+       if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+out:
        read_unlock(&bond->lock);
 }
 
 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
 {
        struct bonding *bond = container_of(work, struct bonding,
-                                                       mcast_work.work);
+                                           mcast_work.work);
        bond_resend_igmp_join_requests(bond);
 }
 
@@ -1172,10 +1122,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
        }
 
        /* resend IGMP joins since active slave has changed or
-        * all were sent on curr_active_slave */
-       if (((USES_PRIMARY(bond->params.mode) && new_active) ||
-            bond->params.mode == BOND_MODE_ROUNDROBIN) &&
-           netif_running(bond->dev)) {
+        * all were sent on curr_active_slave.
+        * resend only if bond is brought up with the affected
+        * bonding modes and the retransmission is enabled */
+       if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
+           ((USES_PRIMARY(bond->params.mode) && new_active) ||
+            bond->params.mode == BOND_MODE_ROUNDROBIN)) {
                bond->igmp_retrans = bond->params.resend_igmp;
                queue_delayed_work(bond->wq, &bond->mcast_work, 0);
        }
@@ -1280,6 +1232,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
                goto out;
 
        np->dev = slave->dev;
+       strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
        err = __netpoll_setup(np);
        if (err) {
                kfree(np);
@@ -1410,9 +1363,9 @@ out:
        return features;
 }
 
-#define BOND_VLAN_FEATURES     (NETIF_F_ALL_TX_OFFLOADS | \
-                                NETIF_F_SOFT_FEATURES | \
-                                NETIF_F_LRO)
+#define BOND_VLAN_FEATURES     (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                                NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 static void bond_compute_features(struct bonding *bond)
 {
@@ -1542,12 +1495,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                           bond_dev->name, slave_dev->name);
        }
 
-       /* bond must be initialized by bond_open() before enslaving */
-       if (!(bond_dev->flags & IFF_UP)) {
-               pr_warning("%s: master_dev is not up in bond_enslave\n",
-                          bond_dev->name);
-       }
-
        /* already enslaved */
        if (slave_dev->flags & IFF_SLAVE) {
                pr_debug("Error, Device was already enslaved\n");
@@ -1558,7 +1505,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        /* no need to lock since we're protected by rtnl_lock */
        if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
                pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
-               if (bond->vlgrp) {
+               if (bond_vlan_used(bond)) {
                        pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
                               bond_dev->name, slave_dev->name, bond_dev->name);
                        return -EPERM;
@@ -1613,8 +1560,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
                        if (slave_dev->type != ARPHRD_ETHER)
                                bond_setup_by_slave(bond_dev, slave_dev);
-                       else
+                       else {
                                ether_setup(bond_dev);
+                               bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+                       }
 
                        netdev_bonding_change(bond_dev,
                                              NETDEV_POST_TYPE_CHANGE);
@@ -1640,6 +1589,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
+       call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
+
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
        if (is_zero_ether_addr(bond->dev->dev_addr))
@@ -1842,8 +1793,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
-                       bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL,
-                                           bond->params.lacp_fast);
+                       bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
                        SLAVE_AD_INFO(new_slave).id =
                                SLAVE_AD_INFO(new_slave->prev).id + 1;
@@ -1972,7 +1922,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        block_netpoll_tx();
-       netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
+       netdev_bonding_change(bond_dev, NETDEV_RELEASE);
        write_lock_bh(&bond->lock);
 
        slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2065,7 +2015,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
                 */
                memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
 
-               if (bond->vlgrp) {
+               if (bond_vlan_used(bond)) {
                        pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
                                   bond_dev->name, bond_dev->name);
                        pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2247,7 +2197,7 @@ static int bond_release_all(struct net_device *bond_dev)
         */
        memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
 
-       if (bond->vlgrp) {
+       if (bond_vlan_used(bond)) {
                pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
                           bond_dev->name, bond_dev->name);
                pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2591,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
        }
 
 re_arm:
-       if (bond->params.miimon)
+       if (bond->params.miimon && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->mii_work,
                                   msecs_to_jiffies(bond->params.miimon));
 out:
@@ -2685,7 +2635,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                if (!targets[i])
                        break;
                pr_debug("basa: target %x\n", targets[i]);
-               if (!bond->vlgrp) {
+               if (!bond_vlan_used(bond)) {
                        pr_debug("basa: empty vlan: arp_send\n");
                        bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
                                      bond->master_ip, 0);
@@ -2720,7 +2670,10 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 
                vlan_id = 0;
                list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-                       vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
+                       rcu_read_lock();
+                       vlan_dev = __vlan_find_dev_deep(bond->dev,
+                                                       vlan->vlan_id);
+                       rcu_read_unlock();
                        if (vlan_dev == rt->dst.dev) {
                                vlan_id = vlan->vlan_id;
                                pr_debug("basa: vlan match on %s %d\n",
@@ -2936,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
        }
 
 re_arm:
-       if (bond->params.arp_interval)
+       if (bond->params.arp_interval && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
 out:
        read_unlock(&bond->lock);
@@ -3204,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
        bond_ab_arp_probe(bond);
 
 re_arm:
-       if (bond->params.arp_interval)
+       if (bond->params.arp_interval && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
 out:
        read_unlock(&bond->lock);
@@ -3381,9 +3334,8 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                }
 
                list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-                       if (!bond->vlgrp)
-                               continue;
-                       vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
+                       vlan_dev = __vlan_find_dev_deep(bond->dev,
+                                                       vlan->vlan_id);
                        if (vlan_dev == event_dev) {
                                switch (event) {
                                case NETDEV_UP:
@@ -3442,7 +3394,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
        int layer4_xor = 0;
 
        if (skb->protocol == htons(ETH_P_IP)) {
-               if (!(iph->frag_off & htons(IP_MF|IP_OFFSET)) &&
+               if (!ip_is_fragment(iph) &&
                    (iph->protocol == IPPROTO_TCP ||
                     iph->protocol == IPPROTO_UDP)) {
                        layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
@@ -3470,9 +3422,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
 static int bond_open(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave;
+       int i;
 
        bond->kill_timers = 0;
 
+       /* reset slave->backup and slave->inactive */
+       read_lock(&bond->lock);
+       if (bond->slave_cnt > 0) {
+               read_lock(&bond->curr_slave_lock);
+               bond_for_each_slave(bond, slave, i) {
+                       if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+                               && (slave != bond->curr_active_slave)) {
+                               bond_set_slave_inactive_flags(slave);
+                       } else {
+                               bond_set_slave_active_flags(slave);
+                       }
+               }
+               read_unlock(&bond->curr_slave_lock);
+       }
+       read_unlock(&bond->lock);
+
        INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
 
        if (bond_is_lb(bond)) {
@@ -4198,6 +4168,7 @@ static inline int bond_slave_override(struct bonding *bond,
        return res;
 }
 
+
 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        /*
@@ -4208,6 +4179,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
         */
        u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
 
+       /*
+        * Save the original txq to restore before passing to the driver
+        */
+       bond_queue_mapping(skb) = skb->queue_mapping;
+
        if (unlikely(txq >= dev->real_num_tx_queues)) {
                do {
                        txq -= dev->real_num_tx_queues;
@@ -4329,10 +4305,9 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_do_ioctl           = bond_do_ioctl,
        .ndo_set_multicast_list = bond_set_multicast_list,
        .ndo_change_mtu         = bond_change_mtu,
-       .ndo_set_mac_address    = bond_set_mac_address,
+       .ndo_set_mac_address    = bond_set_mac_address,
        .ndo_neigh_setup        = bond_neigh_setup,
-       .ndo_vlan_rx_register   = bond_vlan_rx_register,
-       .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
+       .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_setup      = bond_netpoll_setup,
@@ -4378,7 +4353,7 @@ static void bond_setup(struct net_device *bond_dev)
        bond_dev->tx_queue_len = 0;
        bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
        bond_dev->priv_flags |= IFF_BONDING;
-       bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+       bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
        /* At first, we block adding VLANs. That's the only way to
         * prevent problems that occur when adding VLANs over an
@@ -4739,7 +4714,7 @@ static int bond_check_params(struct bond_params *params)
                /* miimon and arp_interval not set, we need one so things
                 * work as expected, see bonding.txt for details
                 */
-               pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+               pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
        }
 
        if (primary && !USES_PRIMARY(bond_mode)) {
@@ -4796,6 +4771,7 @@ static int bond_check_params(struct bond_params *params)
        params->tx_queues = tx_queues;
        params->all_slaves_active = all_slaves_active;
        params->resend_igmp = resend_igmp;
+       params->min_links = min_links;
 
        if (primary) {
                strncpy(params->primary, primary, IFNAMSIZ);
@@ -4832,9 +4808,19 @@ static int bond_init(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
 
        pr_debug("Begin bond_init for %s\n", bond_dev->name);
 
+       /*
+        * Initialize locks that may be required during
+        * en/deslave operations.  All of the bond_open work
+        * (of which this is part) should really be moved to
+        * a phase prior to dev_open
+        */
+       spin_lock_init(&(bond_info->tx_hashtbl_lock));
+       spin_lock_init(&(bond_info->rx_hashtbl_lock));
+
        bond->wq = create_singlethread_workqueue(bond_dev->name);
        if (!bond->wq)
                return -ENOMEM;