return res;
}
-/**
- * bond_has_challenged_slaves
- * @bond: the bond we're working on
- *
- * Searches the slave list. Returns 1 if a vlan challenged slave
- * was found, 0 otherwise.
- *
- * Assumes bond->lock is held.
- */
-static int bond_has_challenged_slaves(struct bonding *bond)
-{
- struct slave *slave;
- int i;
-
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
- pr_debug("found VLAN challenged slave - %s\n",
- slave->dev->name);
- return 1;
- }
- }
-
- pr_debug("no VLAN challenged slaves found\n");
- return 0;
-}
-
/**
* bond_next_vlan - safely skip to the next item in the vlans list.
* @bond: the bond we're working on
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
- mcast_work.work);
+ mcast_work.work);
bond_resend_igmp_join_requests(bond);
}
}
/* resend IGMP joins since active slave has changed or
- * all were sent on curr_active_slave */
- if (((USES_PRIMARY(bond->params.mode) && new_active) ||
- bond->params.mode == BOND_MODE_ROUNDROBIN) &&
- netif_running(bond->dev)) {
+ * all were sent on curr_active_slave.
+ * resend only if bond is brought up with the affected
+ * bonding modes and the retransmission is enabled */
+ if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
+ ((USES_PRIMARY(bond->params.mode) && new_active) ||
+ bond->params.mode == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
}
return 0;
}
-#define BOND_VLAN_FEATURES \
- (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
- NETIF_F_HW_VLAN_FILTER)
-
-/*
- * Compute the common dev->feature set available to all slaves. Some
- * feature bits are managed elsewhere, so preserve those feature bits
- * on the master device.
- */
-static int bond_compute_features(struct bonding *bond)
+static u32 bond_fix_features(struct net_device *dev, u32 features)
{
struct slave *slave;
- struct net_device *bond_dev = bond->dev;
- u32 features = bond_dev->features;
- u32 vlan_features = 0;
- unsigned short max_hard_header_len = max((u16)ETH_HLEN,
- bond_dev->hard_header_len);
+ struct bonding *bond = netdev_priv(dev);
+ u32 mask;
int i;
- features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
- features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_NOCACHE_COPY;
+ read_lock(&bond->lock);
- if (!bond->first_slave)
- goto done;
+ if (!bond->first_slave) {
+ /* Disable adding VLANs to empty bond. But why? --mq */
+ features |= NETIF_F_VLAN_CHALLENGED;
+ goto out;
+ }
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
- vlan_features = bond->first_slave->dev->vlan_features;
bond_for_each_slave(bond, slave, i) {
features = netdev_increment_features(features,
slave->dev->features,
- NETIF_F_ONE_FOR_ALL);
+ mask);
+ }
+
+out:
+ read_unlock(&bond->lock);
+ return features;
+}
+
+#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
+ NETIF_F_SOFT_FEATURES | \
+ NETIF_F_LRO)
+
+static void bond_compute_features(struct bonding *bond)
+{
+ struct slave *slave;
+ struct net_device *bond_dev = bond->dev;
+ u32 vlan_features = BOND_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+ int i;
+
+ read_lock(&bond->lock);
+
+ if (!bond->first_slave)
+ goto done;
+
+ bond_for_each_slave(bond, slave, i) {
vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features,
- NETIF_F_ONE_FOR_ALL);
+ slave->dev->vlan_features, BOND_VLAN_FEATURES);
+
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
}
done:
- features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(bond_dev, features);
- bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
+ bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
- return 0;
+ read_unlock(&bond->lock);
+
+ netdev_change_features(bond_dev);
}
static void bond_setup_by_slave(struct net_device *bond_dev,
struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
- int old_features = bond_dev->features;
int res = 0;
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
bond_dev->name, slave_dev->name);
}
- /* bond must be initialized by bond_open() before enslaving */
- if (!(bond_dev->flags & IFF_UP)) {
- pr_warning("%s: master_dev is not up in bond_enslave\n",
- bond_dev->name);
- }
-
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
pr_debug("Error, Device was already enslaved\n");
pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
bond_dev->name, slave_dev->name,
slave_dev->name, bond_dev->name);
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
- if (bond->slave_cnt == 0) {
- /* First slave, and it is not VLAN challenged,
- * so remove the block of adding VLANs over the bond.
- */
- bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
- }
}
/*
}
}
+ call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
+
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
if (is_zero_ether_addr(bond->dev->dev_addr))
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- bond_compute_features(bond);
-
write_unlock_bh(&bond->lock);
+ bond_compute_features(bond);
+
read_lock(&bond->lock);
new_slave->last_arp_rx = jiffies;
kfree(new_slave);
err_undo_flags:
- bond_dev->features = old_features;
+ bond_compute_features(bond);
return res;
}
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
+ u32 old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
}
block_netpoll_tx();
- netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
+ netdev_bonding_change(bond_dev, NETDEV_RELEASE);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
/* release the slave from its bond */
bond_detach_slave(bond, slave);
- bond_compute_features(bond);
-
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (!bond->vlgrp) {
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
+ if (bond->vlgrp) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
bond_dev->name);
}
- } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
- !bond_has_challenged_slaves(bond)) {
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
- bond_dev->name, slave_dev->name, bond_dev->name);
- bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
}
write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
+ bond_compute_features(bond);
+ if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
+ (old_features & NETIF_F_VLAN_CHALLENGED))
+ pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
+
/* must do this from outside any spinlocks */
bond_destroy_slave_symlinks(bond_dev, slave_dev);
bond_alb_deinit_slave(bond, slave);
}
- bond_compute_features(bond);
-
bond_destroy_slave_symlinks(bond_dev, slave_dev);
bond_del_vlans_from_slave(bond, slave_dev);
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (!bond->vlgrp) {
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
+ if (bond->vlgrp) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
out:
write_unlock_bh(&bond->lock);
+
+ bond_compute_features(bond);
+
return 0;
}
int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb);
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
/*
* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
struct bonding *bond = netdev_priv(bond_dev);
int res = 1;
- read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (!BOND_IS_OK(bond))
- goto out;
+ if (bond->curr_active_slave)
+ res = bond_dev_queue_xmit(bond, skb,
+ bond->curr_active_slave->dev);
- if (!bond->curr_active_slave)
- goto out;
-
- res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
-
-out:
if (res)
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
int i;
int res = 1;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
bond_for_each_slave(bond, slave, i) {
}
}
-out:
if (res) {
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
int i;
int res = 1;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
read_lock(&bond->curr_slave_lock);
start_at = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
dev_kfree_skb(skb);
/* frame sent to all suitable interfaces */
- read_unlock(&bond->lock);
return NETDEV_TX_OK;
}
struct slave *slave = NULL;
struct slave *check_slave;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond) || !skb->queue_mapping)
- goto out;
+ if (!skb->queue_mapping)
+ return 1;
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave(bond, check_slave, i) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
}
-out:
- read_unlock(&bond->lock);
return res;
}
return txq;
}
-static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- /*
- * If we risk deadlock from transmitting this in the
- * netpoll path, tell netpoll to queue the frame for later tx
- */
- if (is_netpoll_tx_blocked(dev))
- return NETDEV_TX_BUSY;
-
if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
if (!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
}
}
+static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ /*
+ * If we risk deadlock from transmitting this in the
+ * netpoll path, tell netpoll to queue the frame for later tx
+ */
+ if (is_netpoll_tx_blocked(dev))
+ return NETDEV_TX_BUSY;
+
+ read_lock(&bond->lock);
+
+ if (bond->slave_cnt)
+ ret = __bond_start_xmit(skb, dev);
+ else
+ dev_kfree_skb(skb);
+
+ read_unlock(&bond->lock);
+
+ return ret;
+}
/*
* set bond mode specific net device operations
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .get_tso = ethtool_op_get_tso,
- .get_ufo = ethtool_op_get_ufo,
- .get_flags = ethtool_op_get_flags,
};
static const struct net_device_ops bond_netdev_ops = {
#endif
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
+ .ndo_fix_features = bond_fix_features,
};
static void bond_destructor(struct net_device *bond_dev)
* when there are slaves that are not hw accel
* capable
*/
- bond_dev->features |= (NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER);
- /* By default, we enable GRO on bonding devices.
- * Actual support requires lowlevel drivers are GRO ready.
- */
- bond_dev->features |= NETIF_F_GRO;
+ bond_dev->hw_features = BOND_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->features |= bond_dev->hw_features;
}
static void bond_work_cancel_all(struct bonding *bond)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
pr_debug("Begin bond_init for %s\n", bond_dev->name);
+ /*
+ * Initialize locks that may be required during
+ * en/deslave operations. All of the bond_open work
+ * (of which this is part) should really be moved to
+ * a phase prior to dev_open
+ */
+ spin_lock_init(&(bond_info->tx_hashtbl_lock));
+ spin_lock_init(&(bond_info->rx_hashtbl_lock));
+
bond->wq = create_singlethread_workqueue(bond_dev->name);
if (!bond->wq)
return -ENOMEM;
rtnl_lock();
- bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
- bond_setup, tx_queues);
+ bond_dev = alloc_netdev_mq(sizeof(struct bonding),
+ name ? name : "bond%d",
+ bond_setup, tx_queues);
if (!bond_dev) {
pr_err("%s: eek! can't alloc netdev!\n", name);
rtnl_unlock();
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
- if (!name) {
- res = dev_alloc_name(bond_dev, "bond%d");
- if (res < 0)
- goto out;
- } else {
- /*
- * If we're given a name to register
- * we need to ensure that its not already
- * registered
- */
- res = -EEXIST;
- if (__dev_get_by_name(net, name) != NULL)
- goto out;
- }
-
res = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
-out:
rtnl_unlock();
if (res < 0)
bond_destructor(bond_dev);