Make the addrs_lock be per port, not per ipvlan dev. This appears to be a very minor problem though. Since it's highly unlikely that ipvlan_add_addr() will be called on 2 CPU simultaneously. But nevertheless, this may cause: 1. False-negative of ipvlan_addr_busy(): one interface iterated through all port->ipvlans + ipvlan->addrs under some ipvlan spinlock, and another added IP under its own lock. Though this is only possible for IPv6, since looks like only ipvlan_addr6_event() can be called without rtnl_lock. 2. Race since ipvlan_ht_addr_add(port) is called under different ipvlan->addrs_lock locks This should not affect performance, since add/remove IP is a rare situation and spinlock is not locked on fast paths. Also, it's quite convenient to have addrs_lock on ipvl_port, to dynamically prevent conflict of IPs with addresses on main port. CC: Paolo Abeni Signed-off-by: Dmitry Skorodumov --- drivers/net/ipvlan/ipvlan.h | 2 +- drivers/net/ipvlan/ipvlan_core.c | 4 ++-- drivers/net/ipvlan/ipvlan_main.c | 20 ++++++++++---------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index c690e313ef6b..0ab1797c6128 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -71,7 +71,6 @@ struct ipvl_dev { DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); netdev_features_t sfeatures; u32 msg_enable; - spinlock_t addrs_lock; }; struct ipvl_addr { @@ -94,6 +93,7 @@ struct ipvl_port { struct net_device *dev; possible_net_t pnet; struct hlist_head hlhead[IPVLAN_HASH_SIZE]; + spinlock_t addrs_lock; /* guards hash-table and addrs */ struct list_head ipvlans; struct packet_type ipvl_ptype; u16 mode; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index f3f34581339c..8a39b1c170b3 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -625,7 +625,7 @@ static void __ipvlan_macnat_addr_learn(struct ipvl_dev *ipvlan, struct ipvl_addr *ipvladdr, *oldest = NULL; unsigned int naddrs = 0; - spin_lock_bh(&ipvlan->addrs_lock); + spin_lock_bh(&ipvlan->port->addrs_lock); if (ipvlan_addr_busy(ipvlan->port, addr, is_v6)) goto out_unlock; @@ -648,7 +648,7 @@ static void __ipvlan_macnat_addr_learn(struct ipvl_dev *ipvlan, ipvlan_add_addr(ipvlan, addr, is_v6, hwaddr); out_unlock: - spin_unlock_bh(&ipvlan->addrs_lock); + spin_unlock_bh(&ipvlan->port->addrs_lock); if (oldest) kfree_rcu(oldest, rcu); } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 288b4d103b97..c1df97a88a40 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -148,6 +148,7 @@ static int ipvlan_port_create(struct net_device *dev) for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++) INIT_HLIST_HEAD(&port->hlhead[idx]); + spin_lock_init(&port->addrs_lock); skb_queue_head_init(&port->backlog); INIT_WORK(&port->wq, ipvlan_process_multicast); ida_init(&port->ida); @@ -662,7 +663,6 @@ int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params, if (!tb[IFLA_MTU]) ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); - spin_lock_init(&ipvlan->addrs_lock); /* Flags are per port and latest update overrides. User has * to be consistent in setting it just like the mode attribute. @@ -746,13 +746,13 @@ static void ipvlan_addrs_forget_all(struct ipvl_dev *ipvlan) { struct ipvl_addr *addr, *next; - spin_lock_bh(&ipvlan->addrs_lock); + spin_lock_bh(&ipvlan->port->addrs_lock); list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { ipvlan_ht_addr_del(addr); list_del_rcu(&addr->anode); kfree_rcu(addr, rcu); } - spin_unlock_bh(&ipvlan->addrs_lock); + spin_unlock_bh(&ipvlan->port->addrs_lock); } void ipvlan_link_delete(struct net_device *dev, struct list_head *head) @@ -973,16 +973,16 @@ void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; - spin_lock_bh(&ipvlan->addrs_lock); + spin_lock_bh(&ipvlan->port->addrs_lock); addr = ipvlan_find_addr(ipvlan, iaddr, is_v6); if (!addr) { - spin_unlock_bh(&ipvlan->addrs_lock); + spin_unlock_bh(&ipvlan->port->addrs_lock); return; } ipvlan_ht_addr_del(addr); list_del_rcu(&addr->anode); - spin_unlock_bh(&ipvlan->addrs_lock); + spin_unlock_bh(&ipvlan->port->addrs_lock); kfree_rcu(addr, rcu); } @@ -991,14 +991,14 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) { int ret = -EINVAL; - spin_lock_bh(&ipvlan->addrs_lock); + spin_lock_bh(&ipvlan->port->addrs_lock); if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) netif_err(ipvlan, ifup, ipvlan->dev, "Failed to add IPv6=%pI6c addr for %s intf\n", ip6_addr, ipvlan->dev->name); else ret = ipvlan_add_addr(ipvlan, ip6_addr, true, NULL); - spin_unlock_bh(&ipvlan->addrs_lock); + spin_unlock_bh(&ipvlan->port->addrs_lock); return ret; } @@ -1062,14 +1062,14 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { int ret = -EINVAL; - spin_lock_bh(&ipvlan->addrs_lock); + spin_lock_bh(&ipvlan->port->addrs_lock); if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) netif_err(ipvlan, ifup, ipvlan->dev, "Failed to add IPv4=%pI4 on %s intf.\n", ip4_addr, ipvlan->dev->name); else ret = ipvlan_add_addr(ipvlan, ip4_addr, false, NULL); - spin_unlock_bh(&ipvlan->addrs_lock); + spin_unlock_bh(&ipvlan->port->addrs_lock); return ret; } -- 2.25.1