Lines Matching refs:bond

276  * bond_add_vlan - add a new vlan id on bond
277 * @bond: bond that got the notification
282 static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
286 pr_debug("bond: %s, vlan id %d\n",
287 (bond ? bond->dev->name : "None"), vlan_id);
296 write_lock_bh(&bond->lock);
298 list_add_tail(&vlan->vlan_list, &bond->vlan_list);
300 write_unlock_bh(&bond->lock);
302 pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name);
308 * bond_del_vlan - delete a vlan id from bond
309 * @bond: bond that got the notification
312 * returns -ENODEV if @vlan_id was not found in @bond.
314 static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
319 pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
322 write_lock_bh(&bond->lock);
324 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
328 if (bond_is_lb(bond))
329 bond_alb_clear_vlan(bond, vlan_id);
331 pr_debug("removed VLAN ID %d from bond %s\n",
332 vlan_id, bond->dev->name);
341 pr_debug("couldn't find VLAN ID %d in bond %s\n",
342 vlan_id, bond->dev->name);
345 write_unlock_bh(&bond->lock);
352 * @bond: the bond we're working on
355 * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL,
358 * Caller must hold bond->lock
360 struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
364 if (list_empty(&bond->vlan_list))
368 next = list_entry(bond->vlan_list.next,
371 last = list_entry(bond->vlan_list.prev,
374 next = list_entry(bond->vlan_list.next,
388 * @bond: bond device that got this skb for tx.
392 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
433 struct bonding *bond = netdev_priv(bond_dev);
437 bond_for_each_slave(bond, slave, i) {
443 res = bond_add_vlan(bond, vid);
455 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
468 struct bonding *bond = netdev_priv(bond_dev);
472 bond_for_each_slave(bond, slave, i)
475 res = bond_del_vlan(bond, vid);
485 static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
490 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
494 bond->dev->name, vlan->vlan_id,
499 static void bond_del_vlans_from_slave(struct bonding *bond,
504 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
520 static int bond_set_carrier(struct bonding *bond)
525 if (bond->slave_cnt == 0)
528 if (bond->params.mode == BOND_MODE_8023AD)
529 return bond_3ad_set_carrier(bond);
531 bond_for_each_slave(bond, slave, i) {
533 if (!netif_carrier_ok(bond->dev)) {
534 netif_carrier_on(bond->dev);
542 if (netif_carrier_ok(bond->dev)) {
543 netif_carrier_off(bond->dev);
603 static int bond_check_dev_link(struct bonding *bond,
614 if (bond->params.use_carrier)
667 static int bond_set_promiscuity(struct bonding *bond, int inc)
670 if (USES_PRIMARY(bond->params.mode)) {
672 if (bond->curr_active_slave) {
673 err = dev_set_promiscuity(bond->curr_active_slave->dev,
679 bond_for_each_slave(bond, slave, i) {
691 static int bond_set_allmulti(struct bonding *bond, int inc)
694 if (USES_PRIMARY(bond->params.mode)) {
696 if (bond->curr_active_slave) {
697 err = dev_set_allmulti(bond->curr_active_slave->dev,
703 bond_for_each_slave(bond, slave, i) {
716 static void bond_mc_add(struct bonding *bond, void *addr)
718 if (USES_PRIMARY(bond->params.mode)) {
720 if (bond->curr_active_slave)
721 dev_mc_add(bond->curr_active_slave->dev, addr);
726 bond_for_each_slave(bond, slave, i)
735 static void bond_mc_del(struct bonding *bond, void *addr)
737 if (USES_PRIMARY(bond->params.mode)) {
739 if (bond->curr_active_slave)
740 dev_mc_del(bond->curr_active_slave->dev, addr);
744 bond_for_each_slave(bond, slave, i) {
767 static void bond_resend_igmp_join_requests(struct bonding *bond)
772 read_lock(&bond->lock);
774 bond_dev = bond->dev;
776 /* rejoin all groups on bond device */
780 * if bond is enslaved to a bridge,
790 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
799 if (--bond->igmp_retrans > 0)
800 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
802 read_unlock(&bond->lock);
807 struct bonding *bond = container_of(work, struct bonding,
809 bond_resend_igmp_join_requests(bond);
818 struct bonding *bond = netdev_priv(bond_dev);
824 if (bond->params.mode == BOND_MODE_8023AD) {
839 static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
844 if (!USES_PRIMARY(bond->params.mode))
851 if (bond->dev->flags & IFF_PROMISC)
854 if (bond->dev->flags & IFF_ALLMULTI)
857 netdev_for_each_mc_addr(ha, bond->dev)
863 if (bond->dev->flags & IFF_PROMISC)
866 if (bond->dev->flags & IFF_ALLMULTI)
869 netdev_for_each_mc_addr(ha, bond->dev)
879 * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
881 static void bond_do_fail_over_mac(struct bonding *bond,
884 __releases(&bond->curr_slave_lock)
885 __releases(&bond->lock)
886 __acquires(&bond->lock)
887 __acquires(&bond->curr_slave_lock)
893 switch (bond->params.fail_over_mac) {
896 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr,
898 write_unlock_bh(&bond->curr_slave_lock);
899 read_unlock(&bond->lock);
900 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
901 read_lock(&bond->lock);
902 write_lock_bh(&bond->curr_slave_lock);
909 * if just new_active, set new_active to bond's MAC
914 write_unlock_bh(&bond->curr_slave_lock);
915 read_unlock(&bond->lock);
923 memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
924 saddr.sa_family = bond->dev->type;
930 bond->dev->name, -rv, new_active->dev->name);
943 bond->dev->name, -rv, new_active->dev->name);
945 read_lock(&bond->lock);
946 write_lock_bh(&bond->curr_slave_lock);
950 bond->dev->name, bond->params.fail_over_mac);
956 static bool bond_should_change_active(struct bonding *bond)
958 struct slave *prim = bond->primary_slave;
959 struct slave *curr = bond->curr_active_slave;
963 if (bond->force_primary) {
964 bond->force_primary = false;
967 if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
971 if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
978 * @bond: our bonding struct
982 static struct slave *bond_find_best_slave(struct bonding *bond)
986 int mintime = bond->params.updelay;
989 new_active = bond->curr_active_slave;
992 if (bond->slave_cnt > 0) /* found one slave */
993 new_active = bond->first_slave;
998 if ((bond->primary_slave) &&
999 bond->primary_slave->link == BOND_LINK_UP &&
1000 bond_should_change_active(bond)) {
1001 new_active = bond->primary_slave;
1007 bond_for_each_slave_from(bond, new_active, i, old_active) {
1023 static bool bond_should_notify_peers(struct bonding *bond)
1025 struct slave *slave = bond->curr_active_slave;
1027 pr_debug("bond_should_notify_peers: bond %s slave %s\n",
1028 bond->dev->name, slave ? slave->dev->name : "NULL");
1030 if (!slave || !bond->send_peer_notif ||
1034 bond->send_peer_notif--;
1040 * @bond: our bonding struct
1043 * Set the new slave to the bond's settings and unset them on the old
1051 * If new_active is not NULL, caller must hold bond->lock for read and
1054 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1056 struct slave *old_active = bond->curr_active_slave;
1065 if (USES_PRIMARY(bond->params.mode)) {
1067 bond->dev->name, new_active->dev->name,
1068 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1074 if (bond->params.mode == BOND_MODE_8023AD)
1077 if (bond_is_lb(bond))
1078 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1080 if (USES_PRIMARY(bond->params.mode)) {
1082 bond->dev->name, new_active->dev->name);
1087 if (USES_PRIMARY(bond->params.mode))
1088 bond_mc_swap(bond, new_active, old_active);
1090 if (bond_is_lb(bond)) {
1091 bond_alb_handle_active_change(bond, new_active);
1097 bond->curr_active_slave = new_active;
1100 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
1109 if (bond->params.fail_over_mac)
1110 bond_do_fail_over_mac(bond, new_active,
1113 if (netif_running(bond->dev)) {
1114 bond->send_peer_notif =
1115 bond->params.num_peer_notif;
1117 bond_should_notify_peers(bond);
1120 write_unlock_bh(&bond->curr_slave_lock);
1121 read_unlock(&bond->lock);
1123 netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
1125 netdev_bonding_change(bond->dev,
1128 read_lock(&bond->lock);
1129 write_lock_bh(&bond->curr_slave_lock);
1135 * resend only if bond is brought up with the affected
1137 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1138 ((USES_PRIMARY(bond->params.mode) && new_active) ||
1139 bond->params.mode == BOND_MODE_ROUNDROBIN)) {
1140 bond->igmp_retrans = bond->params.resend_igmp;
1141 queue_delayed_work(bond->wq, &bond->mcast_work, 0);
1147 * @bond: our bonding struct
1154 * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
1156 void bond_select_active_slave(struct bonding *bond)
1161 best_slave = bond_find_best_slave(bond);
1162 if (best_slave != bond->curr_active_slave) {
1163 bond_change_active_slave(bond, best_slave);
1164 rv = bond_set_carrier(bond);
1168 if (netif_carrier_ok(bond->dev)) {
1170 bond->dev->name);
1173 bond->dev->name);
1183 * bond->lock held for writing by caller.
1185 static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
1187 if (bond->first_slave == NULL) { /* attaching the first slave */
1190 bond->first_slave = new_slave;
1192 new_slave->next = bond->first_slave;
1193 new_slave->prev = bond->first_slave->prev;
1198 bond->slave_cnt++;
1204 * belongs to <bond>.
1206 * If any slave pointer in bond was pointing to <slave>,
1209 * bond->lock held for writing by caller.
1211 static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1219 if (bond->first_slave == slave) { /* slave is the first slave */
1220 if (bond->slave_cnt > 1) { /* there are more slave */
1221 bond->first_slave = slave->next;
1223 bond->first_slave = NULL; /* slave was the last one */
1229 bond->slave_cnt--;
1279 static void __bond_netpoll_cleanup(struct bonding *bond)
1284 bond_for_each_slave(bond, slave, i)
1290 struct bonding *bond = netdev_priv(bond_dev);
1292 read_lock(&bond->lock);
1293 __bond_netpoll_cleanup(bond);
1294 read_unlock(&bond->lock);
1299 struct bonding *bond = netdev_priv(dev);
1303 read_lock(&bond->lock);
1304 bond_for_each_slave(bond, slave, i) {
1307 __bond_netpoll_cleanup(bond);
1311 read_unlock(&bond->lock);
1315 static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1317 return bond->dev->npinfo;
1349 struct bonding *bond = netdev_priv(dev);
1353 read_lock(&bond->lock);
1355 if (!bond->first_slave) {
1356 /* Disable adding VLANs to empty bond. But why? --mq */
1365 bond_for_each_slave(bond, slave, i) {
1372 read_unlock(&bond->lock);
1380 static void bond_compute_features(struct bonding *bond)
1383 struct net_device *bond_dev = bond->dev;
1388 read_lock(&bond->lock);
1390 if (!bond->first_slave)
1393 bond_for_each_slave(bond, slave, i) {
1405 read_unlock(&bond->lock);
1413 struct bonding *bond = netdev_priv(bond_dev);
1423 bond->setup_by_slave = 1;
1431 struct bonding *bond)
1434 if (bond->params.mode == BOND_MODE_ALB &&
1447 struct bonding *bond;
1459 bond = slave->bond;
1461 if (bond->params.arp_interval)
1464 recv_probe = ACCESS_ONCE(bond->recv_probe);
1469 ret = recv_probe(nskb, bond, slave);
1478 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1482 skb->dev = bond->dev;
1484 if (bond->params.mode == BOND_MODE_ALB &&
1485 bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1493 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
1499 /* enslave device <slave> to bond device <master> */
1502 struct bonding *bond = netdev_priv(bond_dev);
1510 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1526 if (bond_vlan_used(bond)) {
1527 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
1531 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
1556 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1557 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1559 if (bond->slave_cnt == 0) {
1598 if (bond->slave_cnt == 0) {
1601 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1602 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1614 if (is_zero_ether_addr(bond->dev->dev_addr))
1615 memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
1631 /* Save slave's original mtu and then set it to match the bond */
1633 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1646 if (!bond->params.fail_over_mac) {
1673 new_slave->bond = bond;
1677 if (bond_is_lb(bond)) {
1681 res = bond_alb_init_slave(bond, new_slave);
1691 if (!USES_PRIMARY(bond->params.mode)) {
1713 if (bond->params.mode == BOND_MODE_8023AD) {
1720 bond_add_vlans_on_slave(bond, slave_dev);
1722 write_lock_bh(&bond->lock);
1724 bond_attach_slave(bond, new_slave);
1729 write_unlock_bh(&bond->lock);
1731 bond_compute_features(bond);
1733 read_lock(&bond->lock);
1737 if (bond->params.miimon && !bond->params.use_carrier) {
1738 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1740 if ((link_reporting == -1) && !bond->params.arp_interval) {
1760 if (!bond->params.miimon ||
1761 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
1762 if (bond->params.updelay) {
1765 new_slave->delay = bond->params.updelay;
1778 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1780 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1781 bond->primary_slave = new_slave;
1782 bond->force_primary = true;
1786 write_lock_bh(&bond->curr_slave_lock);
1788 switch (bond->params.mode) {
1791 bond_select_active_slave(bond);
1800 if (bond->slave_cnt == 1) {
1803 * can be called only after the mac address of the bond is set
1805 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1817 bond_select_active_slave(bond);
1826 * anyway (it holds no special properties of the bond device),
1829 if (!bond->curr_active_slave)
1830 bond->curr_active_slave = new_slave;
1835 write_unlock_bh(&bond->curr_slave_lock);
1837 bond_set_carrier(bond);
1840 slave_dev->npinfo = bond_netpoll_info(bond);
1843 read_unlock(&bond->lock);
1853 read_unlock(&bond->lock);
1879 write_lock_bh(&bond->lock);
1880 bond_detach_slave(bond, new_slave);
1881 write_unlock_bh(&bond->lock);
1890 if (!bond->params.fail_over_mac) {
1892 * MAC if this slave's MAC is in use by the bond, or at
1907 bond_compute_features(bond);
1913 * Try to release the slave device <slave> from the bond device <master>
1925 struct bonding *bond = netdev_priv(bond_dev);
1940 write_lock_bh(&bond->lock);
1942 slave = bond_get_slave_by_dev(bond, slave_dev);
1944 /* not a slave of this bond */
1947 write_unlock_bh(&bond->lock);
1956 write_unlock_bh(&bond->lock);
1958 write_lock_bh(&bond->lock);
1960 if (!bond->params.fail_over_mac) {
1962 bond->slave_cnt > 1)
1970 if (bond->params.mode == BOND_MODE_8023AD) {
1982 oldcurrent = bond->curr_active_slave;
1984 bond->current_arp_slave = NULL;
1986 /* release the slave from its bond */
1987 bond_detach_slave(bond, slave);
1989 if (bond->primary_slave == slave)
1990 bond->primary_slave = NULL;
1993 bond_change_active_slave(bond, NULL);
1995 if (bond_is_lb(bond)) {
2001 write_unlock_bh(&bond->lock);
2002 bond_alb_deinit_slave(bond, slave);
2003 write_lock_bh(&bond->lock);
2012 write_unlock_bh(&bond->lock);
2013 read_lock(&bond->lock);
2014 write_lock_bh(&bond->curr_slave_lock);
2016 bond_select_active_slave(bond);
2018 write_unlock_bh(&bond->curr_slave_lock);
2019 read_unlock(&bond->lock);
2020 write_lock_bh(&bond->lock);
2023 if (bond->slave_cnt == 0) {
2024 bond_set_carrier(bond);
2032 if (bond_vlan_used(bond)) {
2035 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
2040 write_unlock_bh(&bond->lock);
2043 if (bond->slave_cnt == 0)
2044 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2046 bond_compute_features(bond);
2049 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
2055 bond_del_vlans_from_slave(bond, slave_dev);
2061 if (!USES_PRIMARY(bond->params.mode)) {
2083 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
2100 * First release a slave and then destroy the bond if no more slaves are left.
2106 struct bonding *bond = netdev_priv(bond_dev);
2110 if ((ret == 0) && (bond->slave_cnt == 0)) {
2112 pr_info("%s: destroying bond %s.\n",
2124 struct bonding *bond = netdev_priv(bond_dev);
2129 write_lock_bh(&bond->lock);
2133 if (bond->slave_cnt == 0)
2136 bond->current_arp_slave = NULL;
2137 bond->primary_slave = NULL;
2138 bond_change_active_slave(bond, NULL);
2140 while ((slave = bond->first_slave) != NULL) {
2144 if (bond->params.mode == BOND_MODE_8023AD)
2148 bond_detach_slave(bond, slave);
2154 write_unlock_bh(&bond->lock);
2162 if (bond_is_lb(bond)) {
2166 bond_alb_deinit_slave(bond, slave);
2170 bond_del_vlans_from_slave(bond, slave_dev);
2176 if (!USES_PRIMARY(bond->params.mode)) {
2198 if (!bond->params.fail_over_mac) {
2208 write_lock_bh(&bond->lock);
2217 if (bond_vlan_used(bond)) {
2220 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
2227 write_unlock_bh(&bond->lock);
2229 bond_compute_features(bond);
2247 struct bonding *bond = netdev_priv(bond_dev);
2252 if (!USES_PRIMARY(bond->params.mode))
2259 read_lock(&bond->lock);
2261 read_lock(&bond->curr_slave_lock);
2262 old_active = bond->curr_active_slave;
2263 read_unlock(&bond->curr_slave_lock);
2265 new_active = bond_get_slave_by_dev(bond, slave_dev);
2271 read_unlock(&bond->lock);
2280 write_lock_bh(&bond->curr_slave_lock);
2281 bond_change_active_slave(bond, new_active);
2282 write_unlock_bh(&bond->curr_slave_lock);
2287 read_unlock(&bond->lock);
2294 struct bonding *bond = netdev_priv(bond_dev);
2296 info->bond_mode = bond->params.mode;
2297 info->miimon = bond->params.miimon;
2299 read_lock(&bond->lock);
2300 info->num_slaves = bond->slave_cnt;
2301 read_unlock(&bond->lock);
2308 struct bonding *bond = netdev_priv(bond_dev);
2312 read_lock(&bond->lock);
2314 bond_for_each_slave(bond, slave, i) {
2325 read_unlock(&bond->lock);
2333 static int bond_miimon_inspect(struct bonding *bond)
2339 ignore_updelay = !bond->curr_active_slave ? true : false;
2341 bond_for_each_slave(bond, slave, i) {
2344 link_state = bond_check_dev_link(bond, slave->dev, 0);
2352 slave->delay = bond->params.downdelay;
2355 bond->dev->name,
2356 (bond->params.mode ==
2361 bond->params.downdelay * bond->params.miimon);
2372 bond->dev->name,
2373 (bond->params.downdelay - slave->delay) *
2374 bond->params.miimon,
2393 slave->delay = bond->params.updelay;
2397 bond->dev->name, slave->dev->name,
2399 bond->params.updelay *
2400 bond->params.miimon);
2407 bond->dev->name,
2408 (bond->params.updelay - slave->delay) *
2409 bond->params.miimon,
2433 static void bond_miimon_commit(struct bonding *bond)
2438 bond_for_each_slave(bond, slave, i) {
2447 if (bond->params.mode == BOND_MODE_8023AD) {
2450 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2453 } else if (slave != bond->primary_slave) {
2461 bond->dev->name, slave->dev->name,
2465 if (bond->params.mode == BOND_MODE_8023AD)
2468 if (bond_is_lb(bond))
2469 bond_alb_handle_link_change(bond, slave,
2472 if (!bond->curr_active_slave ||
2473 (slave == bond->primary_slave))
2484 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
2485 bond->params.mode == BOND_MODE_8023AD)
2489 bond->dev->name, slave->dev->name);
2491 if (bond->params.mode == BOND_MODE_8023AD)
2495 if (bond_is_lb(bond))
2496 bond_alb_handle_link_change(bond, slave,
2499 if (slave == bond->curr_active_slave)
2506 bond->dev->name, slave->new_link,
2516 write_lock_bh(&bond->curr_slave_lock);
2517 bond_select_active_slave(bond);
2518 write_unlock_bh(&bond->curr_slave_lock);
2522 bond_set_carrier(bond);
2535 struct bonding *bond = container_of(work, struct bonding,
2540 read_lock(&bond->lock);
2542 delay = msecs_to_jiffies(bond->params.miimon);
2544 if (bond->slave_cnt == 0)
2547 should_notify_peers = bond_should_notify_peers(bond);
2549 if (bond_miimon_inspect(bond)) {
2550 read_unlock(&bond->lock);
2554 read_lock(&bond->lock);
2560 read_lock(&bond->lock);
2562 bond_miimon_commit(bond);
2564 read_unlock(&bond->lock);
2566 read_lock(&bond->lock);
2570 if (bond->params.miimon)
2571 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2573 read_unlock(&bond->lock);
2577 read_lock(&bond->lock);
2578 bond->send_peer_notif++;
2579 read_unlock(&bond->lock);
2582 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
2587 static int bond_has_this_ip(struct bonding *bond, __be32 ip)
2592 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2595 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2597 vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id);
2636 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2639 __be32 *targets = bond->params.arp_targets;
2649 if (!bond_vlan_used(bond)) {
2651 addr = bond_confirm_addr(bond->dev, targets[i], 0);
2662 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2667 bond->dev->name, &targets[i]);
2675 if (rt->dst.dev == bond->dev) {
2677 pr_debug("basa: rtdev == bond->dev: arp_send\n");
2678 addr = bond_confirm_addr(bond->dev, targets[i], 0);
2685 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2687 vlan_dev = __vlan_find_dev_deep(bond->dev,
2708 bond->dev->name, &targets[i],
2715 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2718 __be32 *targets = bond->params.arp_targets;
2723 bond_has_this_ip(bond, tip));
2725 if (bond_has_this_ip(bond, tip))
2732 static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2742 read_lock(&bond->lock);
2744 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
2745 bond->dev->name, skb->dev->name);
2747 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
2751 if (arp->ar_hln != bond->dev->addr_len ||
2760 arp_ptr += bond->dev->addr_len;
2762 arp_ptr += 4 + bond->dev->addr_len;
2766 bond->dev->name, slave->dev->name, bond_slave_state(slave),
2767 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2779 bond_validate_arp(bond, slave, sip, tip);
2781 bond_validate_arp(bond, slave, tip, sip);
2784 read_unlock(&bond->lock);
2797 struct bonding *bond = container_of(work, struct bonding,
2804 read_lock(&bond->lock);
2806 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2808 if (bond->slave_cnt == 0)
2811 read_lock(&bond->curr_slave_lock);
2812 oldcurrent = bond->curr_active_slave;
2813 read_unlock(&bond->curr_slave_lock);
2823 bond_for_each_slave(bond, slave, i) {
2844 bond->dev->name,
2849 bond->dev->name,
2874 bond->dev->name,
2890 bond_arp_send_all(bond, slave);
2895 write_lock_bh(&bond->curr_slave_lock);
2897 bond_select_active_slave(bond);
2899 write_unlock_bh(&bond->curr_slave_lock);
2904 if (bond->params.arp_interval)
2905 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
2907 read_unlock(&bond->lock);
2916 * Called with bond->lock held for read.
2918 static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2924 bond_for_each_slave(bond, slave, i) {
2929 slave_last_rx(bond, slave) - delta_in_ticks,
2930 slave_last_rx(bond, slave) + delta_in_ticks)) {
2953 * - the bond has an IP address
2963 !bond->current_arp_slave &&
2965 slave_last_rx(bond, slave) - delta_in_ticks,
2966 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
2976 * the bond has an IP address)
2984 slave_last_rx(bond, slave) - delta_in_ticks,
2985 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
2999 * Called with RTNL and bond->lock for read.
3001 static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3007 bond_for_each_slave(bond, slave, i) {
3014 if ((!bond->curr_active_slave &&
3018 bond->curr_active_slave != slave) {
3020 if (bond->current_arp_slave) {
3022 bond->current_arp_slave);
3023 bond->current_arp_slave = NULL;
3027 bond->dev->name, slave->dev->name);
3029 if (!bond->curr_active_slave ||
3030 (slave == bond->primary_slave))
3045 bond->dev->name, slave->dev->name);
3047 if (slave == bond->curr_active_slave) {
3048 bond->current_arp_slave = NULL;
3056 bond->dev->name, slave->new_link,
3064 write_lock_bh(&bond->curr_slave_lock);
3065 bond_select_active_slave(bond);
3066 write_unlock_bh(&bond->curr_slave_lock);
3070 bond_set_carrier(bond);
3076 * Called with bond->lock held for read.
3078 static void bond_ab_arp_probe(struct bonding *bond)
3083 read_lock(&bond->curr_slave_lock);
3085 if (bond->current_arp_slave && bond->curr_active_slave)
3087 bond->current_arp_slave->dev->name,
3088 bond->curr_active_slave->dev->name);
3090 if (bond->curr_active_slave) {
3091 bond_arp_send_all(bond, bond->curr_active_slave);
3092 read_unlock(&bond->curr_slave_lock);
3096 read_unlock(&bond->curr_slave_lock);
3103 if (!bond->current_arp_slave) {
3104 bond->current_arp_slave = bond->first_slave;
3105 if (!bond->current_arp_slave)
3109 bond_set_slave_inactive_flags(bond->current_arp_slave);
3112 bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
3116 bond_arp_send_all(bond, slave);
3118 bond->current_arp_slave = slave;
3137 bond->dev->name, slave->dev->name);
3144 struct bonding *bond = container_of(work, struct bonding,
3149 read_lock(&bond->lock);
3151 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3153 if (bond->slave_cnt == 0)
3156 should_notify_peers = bond_should_notify_peers(bond);
3158 if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
3159 read_unlock(&bond->lock);
3163 read_lock(&bond->lock);
3169 read_lock(&bond->lock);
3171 bond_ab_arp_commit(bond, delta_in_ticks);
3173 read_unlock(&bond->lock);
3175 read_lock(&bond->lock);
3178 bond_ab_arp_probe(bond);
3181 if (bond->params.arp_interval)
3182 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3184 read_unlock(&bond->lock);
3188 read_lock(&bond->lock);
3189 bond->send_peer_notif++;
3190 read_unlock(&bond->lock);
3193 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
3203 static int bond_event_changename(struct bonding *bond)
3205 bond_remove_proc_entry(bond);
3206 bond_create_proc_entry(bond);
3208 bond_debug_reregister(bond);
3232 struct bonding *bond = netdev_priv(bond_dev);
3238 if (bond->setup_by_slave)
3246 slave = bond_get_slave_by_dev(bond, slave_dev);
3253 if (bond->params.mode == BOND_MODE_8023AD) {
3271 * an active-backup bond, slaves need
3287 bond_compute_features(bond);
3392 struct bonding *bond = netdev_priv(bond_dev);
3397 read_lock(&bond->lock);
3398 if (bond->slave_cnt > 0) {
3399 read_lock(&bond->curr_slave_lock);
3400 bond_for_each_slave(bond, slave, i) {
3401 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3402 && (slave != bond->curr_active_slave)) {
3408 read_unlock(&bond->curr_slave_lock);
3410 read_unlock(&bond->lock);
3412 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
3414 if (bond_is_lb(bond)) {
3418 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
3423 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3424 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3427 if (bond->params.miimon) { /* link check interval, in milliseconds. */
3428 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3429 queue_delayed_work(bond->wq, &bond->mii_work, 0);
3432 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3433 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3434 INIT_DELAYED_WORK(&bond->arp_work,
3437 INIT_DELAYED_WORK(&bond->arp_work,
3440 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3441 if (bond->params.arp_validate)
3442 bond->recv_probe = bond_arp_rcv;
3445 if (bond->params.mode == BOND_MODE_8023AD) {
3446 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3447 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3449 bond->recv_probe = bond_3ad_lacpdu_recv;
3450 bond_3ad_initiate_agg_selection(bond, 1);
3458 struct bonding *bond = netdev_priv(bond_dev);
3460 write_lock_bh(&bond->lock);
3462 bond->send_peer_notif = 0;
3464 write_unlock_bh(&bond->lock);
3466 if (bond->params.miimon) { /* link check interval, in milliseconds. */
3467 cancel_delayed_work_sync(&bond->mii_work);
3470 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3471 cancel_delayed_work_sync(&bond->arp_work);
3474 switch (bond->params.mode) {
3476 cancel_delayed_work_sync(&bond->ad_work);
3480 cancel_delayed_work_sync(&bond->alb_work);
3486 if (delayed_work_pending(&bond->mcast_work))
3487 cancel_delayed_work_sync(&bond->mcast_work);
3489 if (bond_is_lb(bond)) {
3493 bond_alb_deinitialize(bond);
3495 bond->recv_probe = NULL;
3503 struct bonding *bond = netdev_priv(bond_dev);
3510 read_lock_bh(&bond->lock);
3512 bond_for_each_slave(bond, slave, i) {
3543 read_unlock_bh(&bond->lock);
3579 struct bonding *bond = netdev_priv(bond_dev);
3581 read_lock(&bond->lock);
3582 read_lock(&bond->curr_slave_lock);
3583 if (netif_carrier_ok(bond->dev))
3586 read_unlock(&bond->curr_slave_lock);
3587 read_unlock(&bond->lock);
3675 struct bonding *bond = netdev_priv(bond_dev);
3678 bond_set_promiscuity(bond,
3682 bond_set_allmulti(bond,
3688 struct bonding *bond = netdev_priv(bond_dev);
3692 read_lock(&bond->lock);
3696 found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
3699 bond_mc_add(bond, ha->addr);
3703 netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
3707 bond_mc_del(bond, ha->addr);
3711 __hw_addr_flush(&bond->mc_list);
3712 __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
3715 read_unlock(&bond->lock);
3720 struct bonding *bond = netdev_priv(n->dev);
3721 struct slave *slave = bond->first_slave;
3772 struct bonding *bond = netdev_priv(bond_dev);
3777 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
3780 /* Can't hold bond->lock with bh disabled here since
3782 * hold bond->lock without bh disabled because we'll
3795 bond_for_each_slave(bond, slave, i) {
3824 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
3846 struct bonding *bond = netdev_priv(bond_dev);
3852 if (bond->params.mode == BOND_MODE_ALB)
3856 pr_debug("bond=%p, name=%s\n",
3857 bond, bond_dev ? bond_dev->name : "None");
3863 if (bond->params.fail_over_mac == BOND_FOM_ACTIVE)
3869 /* Can't hold bond->lock with bh disabled here since
3871 * hold bond->lock without bh disabled because we'll
3884 bond_for_each_slave(bond, slave, i) {
3917 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
3932 struct bonding *bond = netdev_priv(bond_dev);
3938 * Start with the curr_active_slave that joined the bond as the
3947 read_lock(&bond->curr_slave_lock);
3948 slave = bond->curr_active_slave;
3949 read_unlock(&bond->curr_slave_lock);
3959 slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
3961 bond_for_each_slave(bond, slave, i) {
3969 bond_for_each_slave_from(bond, slave, i, start_at) {
3973 res = bond_dev_queue_xmit(bond, skb, slave->dev);
3989 * in active-backup mode, we know that bond->curr_active_slave is always valid if
3990 * the bond has a usable interface.
3994 struct bonding *bond = netdev_priv(bond_dev);
3997 read_lock(&bond->curr_slave_lock);
3999 if (bond->curr_active_slave)
4000 res = bond_dev_queue_xmit(bond, skb,
4001 bond->curr_active_slave->dev);
4007 read_unlock(&bond->curr_slave_lock);
4019 struct bonding *bond = netdev_priv(bond_dev);
4025 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
4027 bond_for_each_slave(bond, slave, i) {
4035 bond_for_each_slave_from(bond, slave, i, start_at) {
4039 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4057 struct bonding *bond = netdev_priv(bond_dev);
4063 read_lock(&bond->curr_slave_lock);
4064 start_at = bond->curr_active_slave;
4065 read_unlock(&bond->curr_slave_lock);
4070 bond_for_each_slave_from(bond, slave, i, start_at) {
4082 res = bond_dev_queue_xmit(bond, skb2, tx_dev);
4093 res = bond_dev_queue_xmit(bond, skb, tx_dev);
4106 static void bond_set_xmit_hash_policy(struct bonding *bond)
4108 switch (bond->params.xmit_policy) {
4110 bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
4113 bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
4117 bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
4125 static inline int bond_slave_override(struct bonding *bond,
4136 bond_for_each_slave(bond, check_slave, i) {
4146 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4178 struct bonding *bond = netdev_priv(dev);
4180 if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
4181 if (!bond_slave_override(bond, skb))
4185 switch (bond->params.mode) {
4202 dev->name, bond->params.mode);
4211 struct bonding *bond = netdev_priv(dev);
4221 read_lock(&bond->lock);
4223 if (bond->slave_cnt)
4228 read_unlock(&bond->lock);
4234 * set bond mode specific net device operations
4236 void bond_set_mode_ops(struct bonding *bond, int mode)
4238 struct net_device *bond_dev = bond->dev;
4246 bond_set_xmit_hash_policy(bond);
4251 bond_set_xmit_hash_policy(bond);
4306 struct bonding *bond = netdev_priv(bond_dev);
4307 if (bond->wq)
4308 destroy_workqueue(bond->wq);
4314 struct bonding *bond = netdev_priv(bond_dev);
4317 rwlock_init(&bond->lock);
4318 rwlock_init(&bond->curr_slave_lock);
4320 bond->params = bonding_defaults;
4323 bond->dev = bond_dev;
4324 INIT_LIST_HEAD(&bond->vlan_list);
4330 bond_set_mode_ops(bond, bond->params.mode);
4342 * empty bond. The block will be removed once non-challenged
4347 /* don't acquire bond device's netif_tx_lock when
4351 /* By default, we declare the bond to be fully
4367 static void bond_work_cancel_all(struct bonding *bond)
4369 if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
4370 cancel_delayed_work_sync(&bond->mii_work);
4372 if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
4373 cancel_delayed_work_sync(&bond->arp_work);
4375 if (bond->params.mode == BOND_MODE_ALB &&
4376 delayed_work_pending(&bond->alb_work))
4377 cancel_delayed_work_sync(&bond->alb_work);
4379 if (bond->params.mode == BOND_MODE_8023AD &&
4380 delayed_work_pending(&bond->ad_work))
4381 cancel_delayed_work_sync(&bond->ad_work);
4383 if (delayed_work_pending(&bond->mcast_work))
4384 cancel_delayed_work_sync(&bond->mcast_work);
4393 struct bonding *bond = netdev_priv(bond_dev);
4401 list_del(&bond->bond_list);
4403 bond_work_cancel_all(bond);
4405 bond_remove_proc_entry(bond);
4407 bond_debug_unregister(bond);
4409 __hw_addr_flush(&bond->mc_list);
4411 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
4787 struct bonding *bond = netdev_priv(bond_dev);
4789 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
4802 bond->wq = create_singlethread_workqueue(bond_dev->name);
4803 if (!bond->wq)
4808 bond_create_proc_entry(bond);
4809 list_add_tail(&bond->bond_list, &bn->dev_list);
4811 bond_prepare_sysfs_group(bond);
4813 bond_debug_register(bond);
4815 __hw_addr_init(&bond->mc_list);
4839 .kind = "bond",
4846 /* Create a new bond based on the specified name and bonding parameters.
4847 * If name is NULL, obtain a suitable "bond%d" name for us.
4859 name ? name : "bond%d",
4969 MODULE_ALIAS_RTNL_LINK("bond");