David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 1 | /* |
| 2 | * vrf.c: device driver to encapsulate a VRF space |
| 3 | * |
| 4 | * Copyright (c) 2015 Cumulus Networks. All rights reserved. |
| 5 | * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com> |
| 6 | * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com> |
| 7 | * |
| 8 | * Based on dummy, team and ipvlan drivers |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License as published by |
| 12 | * the Free Software Foundation; either version 2 of the License, or |
| 13 | * (at your option) any later version. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/netdevice.h> |
| 19 | #include <linux/etherdevice.h> |
| 20 | #include <linux/ip.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/moduleparam.h> |
| 23 | #include <linux/netfilter.h> |
| 24 | #include <linux/rtnetlink.h> |
| 25 | #include <net/rtnetlink.h> |
| 26 | #include <linux/u64_stats_sync.h> |
| 27 | #include <linux/hashtable.h> |
| 28 | |
| 29 | #include <linux/inetdevice.h> |
David Ahern | 8f58336 | 2015-08-27 10:10:50 -0700 | [diff] [blame] | 30 | #include <net/arp.h> |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 31 | #include <net/ip.h> |
| 32 | #include <net/ip_fib.h> |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 33 | #include <net/ip6_fib.h> |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 34 | #include <net/ip6_route.h> |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 35 | #include <net/route.h> |
| 36 | #include <net/addrconf.h> |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 37 | #include <net/l3mdev.h> |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 38 | |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 39 | #define RT_FL_TOS(oldflp4) \ |
| 40 | ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) |
| 41 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 42 | #define DRV_NAME "vrf" |
| 43 | #define DRV_VERSION "1.0" |
| 44 | |
David Ahern | ec53951 | 2015-09-29 20:07:17 -0700 | [diff] [blame] | 45 | struct net_vrf { |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 46 | struct rtable __rcu *rth; |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 47 | struct rtable __rcu *rth_local; |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 48 | struct rt6_info __rcu *rt6; |
David Ahern | ec53951 | 2015-09-29 20:07:17 -0700 | [diff] [blame] | 49 | u32 tb_id; |
| 50 | }; |
| 51 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 52 | struct pcpu_dstats { |
| 53 | u64 tx_pkts; |
| 54 | u64 tx_bytes; |
| 55 | u64 tx_drps; |
| 56 | u64 rx_pkts; |
| 57 | u64 rx_bytes; |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 58 | u64 rx_drps; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 59 | struct u64_stats_sync syncp; |
| 60 | }; |
| 61 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 62 | static void vrf_rx_stats(struct net_device *dev, int len) |
| 63 | { |
| 64 | struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); |
| 65 | |
| 66 | u64_stats_update_begin(&dstats->syncp); |
| 67 | dstats->rx_pkts++; |
| 68 | dstats->rx_bytes += len; |
| 69 | u64_stats_update_end(&dstats->syncp); |
| 70 | } |
| 71 | |
Nikolay Aleksandrov | 57b8efa | 2015-08-19 06:12:29 +0300 | [diff] [blame] | 72 | static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) |
| 73 | { |
| 74 | vrf_dev->stats.tx_errors++; |
| 75 | kfree_skb(skb); |
| 76 | } |
| 77 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 78 | static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, |
| 79 | struct rtnl_link_stats64 *stats) |
| 80 | { |
| 81 | int i; |
| 82 | |
| 83 | for_each_possible_cpu(i) { |
| 84 | const struct pcpu_dstats *dstats; |
| 85 | u64 tbytes, tpkts, tdrops, rbytes, rpkts; |
| 86 | unsigned int start; |
| 87 | |
| 88 | dstats = per_cpu_ptr(dev->dstats, i); |
| 89 | do { |
| 90 | start = u64_stats_fetch_begin_irq(&dstats->syncp); |
| 91 | tbytes = dstats->tx_bytes; |
| 92 | tpkts = dstats->tx_pkts; |
| 93 | tdrops = dstats->tx_drps; |
| 94 | rbytes = dstats->rx_bytes; |
| 95 | rpkts = dstats->rx_pkts; |
| 96 | } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); |
| 97 | stats->tx_bytes += tbytes; |
| 98 | stats->tx_packets += tpkts; |
| 99 | stats->tx_dropped += tdrops; |
| 100 | stats->rx_bytes += rbytes; |
| 101 | stats->rx_packets += rpkts; |
| 102 | } |
| 103 | return stats; |
| 104 | } |
| 105 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 106 | /* Local traffic destined to local address. Reinsert the packet to rx |
| 107 | * path, similar to loopback handling. |
| 108 | */ |
| 109 | static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, |
| 110 | struct dst_entry *dst) |
| 111 | { |
| 112 | int len = skb->len; |
| 113 | |
| 114 | skb_orphan(skb); |
| 115 | |
| 116 | skb_dst_set(skb, dst); |
| 117 | skb_dst_force(skb); |
| 118 | |
| 119 | /* set pkt_type to avoid skb hitting packet taps twice - |
| 120 | * once on Tx and again in Rx processing |
| 121 | */ |
| 122 | skb->pkt_type = PACKET_LOOPBACK; |
| 123 | |
| 124 | skb->protocol = eth_type_trans(skb, dev); |
| 125 | |
| 126 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) |
| 127 | vrf_rx_stats(dev, len); |
| 128 | else |
| 129 | this_cpu_inc(dev->dstats->rx_drps); |
| 130 | |
| 131 | return NETDEV_TX_OK; |
| 132 | } |
| 133 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 134 | #if IS_ENABLED(CONFIG_IPV6) |
| 135 | static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, |
| 136 | struct net_device *dev) |
| 137 | { |
| 138 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
| 139 | struct net *net = dev_net(skb->dev); |
| 140 | struct flowi6 fl6 = { |
| 141 | /* needed to match OIF rule */ |
| 142 | .flowi6_oif = dev->ifindex, |
| 143 | .flowi6_iif = LOOPBACK_IFINDEX, |
| 144 | .daddr = iph->daddr, |
| 145 | .saddr = iph->saddr, |
| 146 | .flowlabel = ip6_flowinfo(iph), |
| 147 | .flowi6_mark = skb->mark, |
| 148 | .flowi6_proto = iph->nexthdr, |
| 149 | .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF, |
| 150 | }; |
| 151 | int ret = NET_XMIT_DROP; |
| 152 | struct dst_entry *dst; |
| 153 | struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; |
| 154 | |
| 155 | dst = ip6_route_output(net, NULL, &fl6); |
| 156 | if (dst == dst_null) |
| 157 | goto err; |
| 158 | |
| 159 | skb_dst_drop(skb); |
| 160 | skb_dst_set(skb, dst); |
| 161 | |
David Ahern | 911a66f | 2016-06-06 20:50:38 -0700 | [diff] [blame] | 162 | /* strip the ethernet header added for pass through VRF device */ |
| 163 | __skb_pull(skb, skb_network_offset(skb)); |
| 164 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 165 | ret = ip6_local_out(net, skb->sk, skb); |
| 166 | if (unlikely(net_xmit_eval(ret))) |
| 167 | dev->stats.tx_errors++; |
| 168 | else |
| 169 | ret = NET_XMIT_SUCCESS; |
| 170 | |
| 171 | return ret; |
| 172 | err: |
| 173 | vrf_tx_error(dev, skb); |
| 174 | return NET_XMIT_DROP; |
| 175 | } |
| 176 | #else |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 177 | static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, |
| 178 | struct net_device *dev) |
| 179 | { |
Nikolay Aleksandrov | 57b8efa | 2015-08-19 06:12:29 +0300 | [diff] [blame] | 180 | vrf_tx_error(dev, skb); |
| 181 | return NET_XMIT_DROP; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 182 | } |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 183 | #endif |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 184 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 185 | static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, |
| 186 | struct net_device *vrf_dev) |
| 187 | { |
| 188 | struct iphdr *ip4h = ip_hdr(skb); |
| 189 | int ret = NET_XMIT_DROP; |
| 190 | struct flowi4 fl4 = { |
| 191 | /* needed to match OIF rule */ |
| 192 | .flowi4_oif = vrf_dev->ifindex, |
| 193 | .flowi4_iif = LOOPBACK_IFINDEX, |
| 194 | .flowi4_tos = RT_TOS(ip4h->tos), |
David Ahern | 6e2895a | 2015-10-05 08:51:23 -0700 | [diff] [blame] | 195 | .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC | |
David Ahern | 58189ca | 2015-09-15 15:10:50 -0700 | [diff] [blame] | 196 | FLOWI_FLAG_SKIP_NH_OIF, |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 197 | .daddr = ip4h->daddr, |
| 198 | }; |
David Ahern | 911a66f | 2016-06-06 20:50:38 -0700 | [diff] [blame] | 199 | struct net *net = dev_net(vrf_dev); |
| 200 | struct rtable *rt; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 201 | |
David Ahern | 911a66f | 2016-06-06 20:50:38 -0700 | [diff] [blame] | 202 | rt = ip_route_output_flow(net, &fl4, NULL); |
| 203 | if (IS_ERR(rt)) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 204 | goto err; |
| 205 | |
David Ahern | 911a66f | 2016-06-06 20:50:38 -0700 | [diff] [blame] | 206 | if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { |
| 207 | ip_rt_put(rt); |
| 208 | goto err; |
| 209 | } |
| 210 | |
| 211 | skb_dst_drop(skb); |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 212 | |
| 213 | /* if dst.dev is loopback or the VRF device again this is locally |
| 214 | * originated traffic destined to a local address. Short circuit |
| 215 | * to Rx path using our local dst |
| 216 | */ |
| 217 | if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) { |
| 218 | struct net_vrf *vrf = netdev_priv(vrf_dev); |
| 219 | struct rtable *rth_local; |
| 220 | struct dst_entry *dst = NULL; |
| 221 | |
| 222 | ip_rt_put(rt); |
| 223 | |
| 224 | rcu_read_lock(); |
| 225 | |
| 226 | rth_local = rcu_dereference(vrf->rth_local); |
| 227 | if (likely(rth_local)) { |
| 228 | dst = &rth_local->dst; |
| 229 | dst_hold(dst); |
| 230 | } |
| 231 | |
| 232 | rcu_read_unlock(); |
| 233 | |
| 234 | if (unlikely(!dst)) |
| 235 | goto err; |
| 236 | |
| 237 | return vrf_local_xmit(skb, vrf_dev, dst); |
| 238 | } |
| 239 | |
David Ahern | 911a66f | 2016-06-06 20:50:38 -0700 | [diff] [blame] | 240 | skb_dst_set(skb, &rt->dst); |
| 241 | |
| 242 | /* strip the ethernet header added for pass through VRF device */ |
| 243 | __skb_pull(skb, skb_network_offset(skb)); |
| 244 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 245 | if (!ip4h->saddr) { |
| 246 | ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, |
| 247 | RT_SCOPE_LINK); |
| 248 | } |
| 249 | |
Eric W. Biederman | 33224b1 | 2015-10-07 16:48:46 -0500 | [diff] [blame] | 250 | ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 251 | if (unlikely(net_xmit_eval(ret))) |
| 252 | vrf_dev->stats.tx_errors++; |
| 253 | else |
| 254 | ret = NET_XMIT_SUCCESS; |
| 255 | |
| 256 | out: |
| 257 | return ret; |
| 258 | err: |
Nikolay Aleksandrov | 57b8efa | 2015-08-19 06:12:29 +0300 | [diff] [blame] | 259 | vrf_tx_error(vrf_dev, skb); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 260 | goto out; |
| 261 | } |
| 262 | |
| 263 | static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) |
| 264 | { |
| 265 | switch (skb->protocol) { |
| 266 | case htons(ETH_P_IP): |
| 267 | return vrf_process_v4_outbound(skb, dev); |
| 268 | case htons(ETH_P_IPV6): |
| 269 | return vrf_process_v6_outbound(skb, dev); |
| 270 | default: |
Nikolay Aleksandrov | 57b8efa | 2015-08-19 06:12:29 +0300 | [diff] [blame] | 271 | vrf_tx_error(dev, skb); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 272 | return NET_XMIT_DROP; |
| 273 | } |
| 274 | } |
| 275 | |
| 276 | static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) |
| 277 | { |
| 278 | netdev_tx_t ret = is_ip_tx_frame(skb, dev); |
| 279 | |
| 280 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
| 281 | struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); |
| 282 | |
| 283 | u64_stats_update_begin(&dstats->syncp); |
| 284 | dstats->tx_pkts++; |
| 285 | dstats->tx_bytes += skb->len; |
| 286 | u64_stats_update_end(&dstats->syncp); |
| 287 | } else { |
| 288 | this_cpu_inc(dev->dstats->tx_drps); |
| 289 | } |
| 290 | |
| 291 | return ret; |
| 292 | } |
| 293 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 294 | #if IS_ENABLED(CONFIG_IPV6) |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 295 | /* modelled after ip6_finish_output2 */ |
| 296 | static int vrf_finish_output6(struct net *net, struct sock *sk, |
| 297 | struct sk_buff *skb) |
| 298 | { |
| 299 | struct dst_entry *dst = skb_dst(skb); |
| 300 | struct net_device *dev = dst->dev; |
| 301 | struct neighbour *neigh; |
| 302 | struct in6_addr *nexthop; |
| 303 | int ret; |
| 304 | |
| 305 | skb->protocol = htons(ETH_P_IPV6); |
| 306 | skb->dev = dev; |
| 307 | |
| 308 | rcu_read_lock_bh(); |
| 309 | nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); |
| 310 | neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); |
| 311 | if (unlikely(!neigh)) |
| 312 | neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); |
| 313 | if (!IS_ERR(neigh)) { |
| 314 | ret = dst_neigh_output(dst, neigh, skb); |
| 315 | rcu_read_unlock_bh(); |
| 316 | return ret; |
| 317 | } |
| 318 | rcu_read_unlock_bh(); |
| 319 | |
| 320 | IP6_INC_STATS(dev_net(dst->dev), |
| 321 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
| 322 | kfree_skb(skb); |
| 323 | return -EINVAL; |
| 324 | } |
| 325 | |
| 326 | /* modelled after ip6_output */ |
| 327 | static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 328 | { |
| 329 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, |
| 330 | net, sk, skb, NULL, skb_dst(skb)->dev, |
| 331 | vrf_finish_output6, |
| 332 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); |
| 333 | } |
| 334 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 335 | /* holding rtnl */ |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 336 | static void vrf_rt6_release(struct net_vrf *vrf) |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 337 | { |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 338 | struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); |
| 339 | |
David S. Miller | 3d9dc40 | 2016-06-06 15:58:34 -0700 | [diff] [blame] | 340 | rcu_assign_pointer(vrf->rt6, NULL); |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 341 | |
| 342 | if (rt6) |
| 343 | dst_release(&rt6->dst); |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 344 | } |
| 345 | |
| 346 | static int vrf_rt6_create(struct net_device *dev) |
| 347 | { |
| 348 | struct net_vrf *vrf = netdev_priv(dev); |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 349 | struct net *net = dev_net(dev); |
David Ahern | b3b4663 | 2016-05-04 21:46:12 -0700 | [diff] [blame] | 350 | struct fib6_table *rt6i_table; |
David S. Miller | 3d9dc40 | 2016-06-06 15:58:34 -0700 | [diff] [blame] | 351 | struct rt6_info *rt6; |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 352 | int rc = -ENOMEM; |
| 353 | |
David Ahern | b3b4663 | 2016-05-04 21:46:12 -0700 | [diff] [blame] | 354 | rt6i_table = fib6_new_table(net, vrf->tb_id); |
| 355 | if (!rt6i_table) |
| 356 | goto out; |
| 357 | |
David S. Miller | 3d9dc40 | 2016-06-06 15:58:34 -0700 | [diff] [blame] | 358 | rt6 = ip6_dst_alloc(net, dev, |
| 359 | DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 360 | if (!rt6) |
| 361 | goto out; |
| 362 | |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 363 | dst_hold(&rt6->dst); |
David Ahern | b3b4663 | 2016-05-04 21:46:12 -0700 | [diff] [blame] | 364 | |
| 365 | rt6->rt6i_table = rt6i_table; |
| 366 | rt6->dst.output = vrf_output6; |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 367 | rcu_assign_pointer(vrf->rt6, rt6); |
| 368 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 369 | rc = 0; |
| 370 | out: |
| 371 | return rc; |
| 372 | } |
| 373 | #else |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 374 | static void vrf_rt6_release(struct net_vrf *vrf) |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 375 | { |
| 376 | } |
| 377 | |
| 378 | static int vrf_rt6_create(struct net_device *dev) |
| 379 | { |
| 380 | return 0; |
| 381 | } |
| 382 | #endif |
| 383 | |
David Ahern | 8f58336 | 2015-08-27 10:10:50 -0700 | [diff] [blame] | 384 | /* modelled after ip_finish_output2 */ |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 385 | static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 386 | { |
David Ahern | 8f58336 | 2015-08-27 10:10:50 -0700 | [diff] [blame] | 387 | struct dst_entry *dst = skb_dst(skb); |
| 388 | struct rtable *rt = (struct rtable *)dst; |
| 389 | struct net_device *dev = dst->dev; |
| 390 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
| 391 | struct neighbour *neigh; |
| 392 | u32 nexthop; |
| 393 | int ret = -EINVAL; |
| 394 | |
| 395 | /* Be paranoid, rather than too clever. */ |
| 396 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { |
| 397 | struct sk_buff *skb2; |
| 398 | |
| 399 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); |
| 400 | if (!skb2) { |
| 401 | ret = -ENOMEM; |
| 402 | goto err; |
| 403 | } |
| 404 | if (skb->sk) |
| 405 | skb_set_owner_w(skb2, skb->sk); |
| 406 | |
| 407 | consume_skb(skb); |
| 408 | skb = skb2; |
| 409 | } |
| 410 | |
| 411 | rcu_read_lock_bh(); |
| 412 | |
| 413 | nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); |
| 414 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
| 415 | if (unlikely(!neigh)) |
| 416 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); |
| 417 | if (!IS_ERR(neigh)) |
| 418 | ret = dst_neigh_output(dst, neigh, skb); |
| 419 | |
| 420 | rcu_read_unlock_bh(); |
| 421 | err: |
| 422 | if (unlikely(ret < 0)) |
| 423 | vrf_tx_error(skb->dev, skb); |
| 424 | return ret; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 425 | } |
| 426 | |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 427 | static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 428 | { |
| 429 | struct net_device *dev = skb_dst(skb)->dev; |
| 430 | |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 431 | IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 432 | |
| 433 | skb->dev = dev; |
| 434 | skb->protocol = htons(ETH_P_IP); |
| 435 | |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 436 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, |
| 437 | net, sk, skb, NULL, dev, |
David Ahern | 8f58336 | 2015-08-27 10:10:50 -0700 | [diff] [blame] | 438 | vrf_finish_output, |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 439 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
| 440 | } |
| 441 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 442 | /* holding rtnl */ |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 443 | static void vrf_rtable_release(struct net_vrf *vrf) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 444 | { |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 445 | struct rtable *rth = rtnl_dereference(vrf->rth); |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 446 | struct rtable *rth_local = rtnl_dereference(vrf->rth_local); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 447 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 448 | RCU_INIT_POINTER(vrf->rth, NULL); |
| 449 | RCU_INIT_POINTER(vrf->rth_local, NULL); |
| 450 | synchronize_rcu(); |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 451 | |
| 452 | if (rth) |
| 453 | dst_release(&rth->dst); |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 454 | |
| 455 | if (rth_local) |
| 456 | dst_release(&rth_local->dst); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 457 | } |
| 458 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 459 | static int vrf_rtable_create(struct net_device *dev) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 460 | { |
David Ahern | b7503e0 | 2015-09-02 13:58:35 -0700 | [diff] [blame] | 461 | struct net_vrf *vrf = netdev_priv(dev); |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 462 | struct rtable *rth, *rth_local; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 463 | |
David Ahern | b3b4663 | 2016-05-04 21:46:12 -0700 | [diff] [blame] | 464 | if (!fib_new_table(dev_net(dev), vrf->tb_id)) |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 465 | return -ENOMEM; |
David Ahern | b3b4663 | 2016-05-04 21:46:12 -0700 | [diff] [blame] | 466 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 467 | /* create a dst for routing packets out through a VRF device */ |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 468 | rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 469 | if (!rth) |
| 470 | return -ENOMEM; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 471 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 472 | /* create a dst for local ingress routing - packets sent locally |
| 473 | * to local address via the VRF device as a loopback |
| 474 | */ |
| 475 | rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0); |
| 476 | if (!rth_local) { |
| 477 | dst_release(&rth->dst); |
| 478 | return -ENOMEM; |
| 479 | } |
| 480 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 481 | rth->dst.output = vrf_output; |
| 482 | rth->rt_table_id = vrf->tb_id; |
| 483 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 484 | rth_local->rt_table_id = vrf->tb_id; |
| 485 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 486 | rcu_assign_pointer(vrf->rth, rth); |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 487 | rcu_assign_pointer(vrf->rth_local, rth_local); |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 488 | |
| 489 | return 0; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | /**************************** device handling ********************/ |
| 493 | |
| 494 | /* cycle interface to flush neighbor cache and move routes across tables */ |
| 495 | static void cycle_netdev(struct net_device *dev) |
| 496 | { |
| 497 | unsigned int flags = dev->flags; |
| 498 | int ret; |
| 499 | |
| 500 | if (!netif_running(dev)) |
| 501 | return; |
| 502 | |
| 503 | ret = dev_change_flags(dev, flags & ~IFF_UP); |
| 504 | if (ret >= 0) |
| 505 | ret = dev_change_flags(dev, flags); |
| 506 | |
| 507 | if (ret < 0) { |
| 508 | netdev_err(dev, |
| 509 | "Failed to cycle device %s; route tables might be wrong!\n", |
| 510 | dev->name); |
| 511 | } |
| 512 | } |
| 513 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 514 | static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) |
| 515 | { |
Nikolay Aleksandrov | bad5316 | 2015-11-24 14:29:16 +0100 | [diff] [blame] | 516 | int ret; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 517 | |
Jiri Pirko | 29bf24a | 2015-12-03 12:12:11 +0100 | [diff] [blame] | 518 | ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 519 | if (ret < 0) |
David Ahern | 74b2058 | 2016-05-10 11:19:50 -0700 | [diff] [blame] | 520 | return ret; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 521 | |
David Ahern | fee6d4c | 2015-10-05 08:51:24 -0700 | [diff] [blame] | 522 | port_dev->priv_flags |= IFF_L3MDEV_SLAVE; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 523 | cycle_netdev(port_dev); |
| 524 | |
| 525 | return 0; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 526 | } |
| 527 | |
| 528 | static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) |
| 529 | { |
David Ahern | fee6d4c | 2015-10-05 08:51:24 -0700 | [diff] [blame] | 530 | if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev)) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 531 | return -EINVAL; |
| 532 | |
| 533 | return do_vrf_add_slave(dev, port_dev); |
| 534 | } |
| 535 | |
| 536 | /* inverse of do_vrf_add_slave */ |
| 537 | static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) |
| 538 | { |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 539 | netdev_upper_dev_unlink(port_dev, dev); |
David Ahern | fee6d4c | 2015-10-05 08:51:24 -0700 | [diff] [blame] | 540 | port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 541 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 542 | cycle_netdev(port_dev); |
| 543 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 544 | return 0; |
| 545 | } |
| 546 | |
| 547 | static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) |
| 548 | { |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 549 | return do_vrf_del_slave(dev, port_dev); |
| 550 | } |
| 551 | |
| 552 | static void vrf_dev_uninit(struct net_device *dev) |
| 553 | { |
| 554 | struct net_vrf *vrf = netdev_priv(dev); |
Nikolay Aleksandrov | bad5316 | 2015-11-24 14:29:16 +0100 | [diff] [blame] | 555 | struct net_device *port_dev; |
| 556 | struct list_head *iter; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 557 | |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 558 | vrf_rtable_release(vrf); |
| 559 | vrf_rt6_release(vrf); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 560 | |
Nikolay Aleksandrov | bad5316 | 2015-11-24 14:29:16 +0100 | [diff] [blame] | 561 | netdev_for_each_lower_dev(dev, port_dev, iter) |
| 562 | vrf_del_slave(dev, port_dev); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 563 | |
Nikolay Aleksandrov | 3a4a27d | 2015-08-18 20:28:03 +0300 | [diff] [blame] | 564 | free_percpu(dev->dstats); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 565 | dev->dstats = NULL; |
| 566 | } |
| 567 | |
| 568 | static int vrf_dev_init(struct net_device *dev) |
| 569 | { |
| 570 | struct net_vrf *vrf = netdev_priv(dev); |
| 571 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 572 | dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); |
| 573 | if (!dev->dstats) |
| 574 | goto out_nomem; |
| 575 | |
| 576 | /* create the default dst which points back to us */ |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 577 | if (vrf_rtable_create(dev) != 0) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 578 | goto out_stats; |
| 579 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 580 | if (vrf_rt6_create(dev) != 0) |
| 581 | goto out_rth; |
| 582 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 583 | dev->flags = IFF_MASTER | IFF_NOARP; |
| 584 | |
David Ahern | b87ab6b | 2016-06-01 21:16:39 -0700 | [diff] [blame] | 585 | /* MTU is irrelevant for VRF device; set to 64k similar to lo */ |
| 586 | dev->mtu = 64 * 1024; |
| 587 | |
| 588 | /* similarly, oper state is irrelevant; set to up to avoid confusion */ |
| 589 | dev->operstate = IF_OPER_UP; |
| 590 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 591 | return 0; |
| 592 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 593 | out_rth: |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 594 | vrf_rtable_release(vrf); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 595 | out_stats: |
| 596 | free_percpu(dev->dstats); |
| 597 | dev->dstats = NULL; |
| 598 | out_nomem: |
| 599 | return -ENOMEM; |
| 600 | } |
| 601 | |
| 602 | static const struct net_device_ops vrf_netdev_ops = { |
| 603 | .ndo_init = vrf_dev_init, |
| 604 | .ndo_uninit = vrf_dev_uninit, |
| 605 | .ndo_start_xmit = vrf_xmit, |
| 606 | .ndo_get_stats64 = vrf_get_stats64, |
| 607 | .ndo_add_slave = vrf_add_slave, |
| 608 | .ndo_del_slave = vrf_del_slave, |
| 609 | }; |
| 610 | |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 611 | static u32 vrf_fib_table(const struct net_device *dev) |
| 612 | { |
| 613 | struct net_vrf *vrf = netdev_priv(dev); |
| 614 | |
| 615 | return vrf->tb_id; |
| 616 | } |
| 617 | |
| 618 | static struct rtable *vrf_get_rtable(const struct net_device *dev, |
| 619 | const struct flowi4 *fl4) |
| 620 | { |
| 621 | struct rtable *rth = NULL; |
| 622 | |
David Ahern | 6e2895a | 2015-10-05 08:51:23 -0700 | [diff] [blame] | 623 | if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) { |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 624 | struct net_vrf *vrf = netdev_priv(dev); |
| 625 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 626 | rcu_read_lock(); |
| 627 | |
| 628 | rth = rcu_dereference(vrf->rth); |
| 629 | if (likely(rth)) |
| 630 | dst_hold(&rth->dst); |
| 631 | |
| 632 | rcu_read_unlock(); |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | return rth; |
| 636 | } |
| 637 | |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 638 | /* called under rcu_read_lock */ |
David Ahern | b5bdacf | 2016-01-04 09:09:27 -0800 | [diff] [blame] | 639 | static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 640 | { |
| 641 | struct fib_result res = { .tclassid = 0 }; |
| 642 | struct net *net = dev_net(dev); |
| 643 | u32 orig_tos = fl4->flowi4_tos; |
| 644 | u8 flags = fl4->flowi4_flags; |
| 645 | u8 scope = fl4->flowi4_scope; |
| 646 | u8 tos = RT_FL_TOS(fl4); |
David Ahern | b5bdacf | 2016-01-04 09:09:27 -0800 | [diff] [blame] | 647 | int rc; |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 648 | |
| 649 | if (unlikely(!fl4->daddr)) |
David Ahern | b5bdacf | 2016-01-04 09:09:27 -0800 | [diff] [blame] | 650 | return 0; |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 651 | |
| 652 | fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF; |
| 653 | fl4->flowi4_iif = LOOPBACK_IFINDEX; |
David Ahern | 1ff23be | 2016-05-07 16:49:00 -0700 | [diff] [blame] | 654 | /* make sure oif is set to VRF device for lookup */ |
| 655 | fl4->flowi4_oif = dev->ifindex; |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 656 | fl4->flowi4_tos = tos & IPTOS_RT_MASK; |
| 657 | fl4->flowi4_scope = ((tos & RTO_ONLINK) ? |
| 658 | RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); |
| 659 | |
David Ahern | b5bdacf | 2016-01-04 09:09:27 -0800 | [diff] [blame] | 660 | rc = fib_lookup(net, fl4, &res, 0); |
| 661 | if (!rc) { |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 662 | if (res.type == RTN_LOCAL) |
| 663 | fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr; |
| 664 | else |
| 665 | fib_select_path(net, &res, fl4, -1); |
| 666 | } |
| 667 | |
| 668 | fl4->flowi4_flags = flags; |
| 669 | fl4->flowi4_tos = orig_tos; |
| 670 | fl4->flowi4_scope = scope; |
David Ahern | b5bdacf | 2016-01-04 09:09:27 -0800 | [diff] [blame] | 671 | |
| 672 | return rc; |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 673 | } |
| 674 | |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 675 | #if IS_ENABLED(CONFIG_IPV6) |
David Ahern | 74b2058 | 2016-05-10 11:19:50 -0700 | [diff] [blame] | 676 | /* neighbor handling is done with actual device; do not want |
| 677 | * to flip skb->dev for those ndisc packets. This really fails |
| 678 | * for multiple next protocols (e.g., NEXTHDR_HOP). But it is |
| 679 | * a start. |
| 680 | */ |
| 681 | static bool ipv6_ndisc_frame(const struct sk_buff *skb) |
| 682 | { |
| 683 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
| 684 | bool rc = false; |
| 685 | |
| 686 | if (iph->nexthdr == NEXTHDR_ICMP) { |
| 687 | const struct icmp6hdr *icmph; |
| 688 | struct icmp6hdr _icmph; |
| 689 | |
| 690 | icmph = skb_header_pointer(skb, sizeof(*iph), |
| 691 | sizeof(_icmph), &_icmph); |
| 692 | if (!icmph) |
| 693 | goto out; |
| 694 | |
| 695 | switch (icmph->icmp6_type) { |
| 696 | case NDISC_ROUTER_SOLICITATION: |
| 697 | case NDISC_ROUTER_ADVERTISEMENT: |
| 698 | case NDISC_NEIGHBOUR_SOLICITATION: |
| 699 | case NDISC_NEIGHBOUR_ADVERTISEMENT: |
| 700 | case NDISC_REDIRECT: |
| 701 | rc = true; |
| 702 | break; |
| 703 | } |
| 704 | } |
| 705 | |
| 706 | out: |
| 707 | return rc; |
| 708 | } |
| 709 | |
| 710 | static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, |
| 711 | struct sk_buff *skb) |
| 712 | { |
| 713 | /* if packet is NDISC keep the ingress interface */ |
| 714 | if (!ipv6_ndisc_frame(skb)) { |
| 715 | skb->dev = vrf_dev; |
| 716 | skb->skb_iif = vrf_dev->ifindex; |
| 717 | |
| 718 | skb_push(skb, skb->mac_len); |
| 719 | dev_queue_xmit_nit(skb, vrf_dev); |
| 720 | skb_pull(skb, skb->mac_len); |
| 721 | |
| 722 | IP6CB(skb)->flags |= IP6SKB_L3SLAVE; |
| 723 | } |
| 724 | |
| 725 | return skb; |
| 726 | } |
| 727 | |
| 728 | #else |
| 729 | static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, |
| 730 | struct sk_buff *skb) |
| 731 | { |
| 732 | return skb; |
| 733 | } |
| 734 | #endif |
| 735 | |
| 736 | static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, |
| 737 | struct sk_buff *skb) |
| 738 | { |
| 739 | skb->dev = vrf_dev; |
| 740 | skb->skb_iif = vrf_dev->ifindex; |
| 741 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 742 | /* loopback traffic; do not push through packet taps again. |
| 743 | * Reset pkt_type for upper layers to process skb |
| 744 | */ |
| 745 | if (skb->pkt_type == PACKET_LOOPBACK) { |
| 746 | skb->pkt_type = PACKET_HOST; |
| 747 | goto out; |
| 748 | } |
| 749 | |
David Ahern | 74b2058 | 2016-05-10 11:19:50 -0700 | [diff] [blame] | 750 | skb_push(skb, skb->mac_len); |
| 751 | dev_queue_xmit_nit(skb, vrf_dev); |
| 752 | skb_pull(skb, skb->mac_len); |
| 753 | |
David Ahern | afe80a4 | 2016-06-06 20:50:39 -0700 | [diff] [blame^] | 754 | out: |
David Ahern | 74b2058 | 2016-05-10 11:19:50 -0700 | [diff] [blame] | 755 | return skb; |
| 756 | } |
| 757 | |
| 758 | /* called with rcu lock held */ |
| 759 | static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, |
| 760 | struct sk_buff *skb, |
| 761 | u16 proto) |
| 762 | { |
| 763 | switch (proto) { |
| 764 | case AF_INET: |
| 765 | return vrf_ip_rcv(vrf_dev, skb); |
| 766 | case AF_INET6: |
| 767 | return vrf_ip6_rcv(vrf_dev, skb); |
| 768 | } |
| 769 | |
| 770 | return skb; |
| 771 | } |
| 772 | |
| 773 | #if IS_ENABLED(CONFIG_IPV6) |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 774 | static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, |
| 775 | const struct flowi6 *fl6) |
| 776 | { |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 777 | struct dst_entry *dst = NULL; |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 778 | |
| 779 | if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { |
| 780 | struct net_vrf *vrf = netdev_priv(dev); |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 781 | struct rt6_info *rt; |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 782 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 783 | rcu_read_lock(); |
| 784 | |
| 785 | rt = rcu_dereference(vrf->rt6); |
| 786 | if (likely(rt)) { |
| 787 | dst = &rt->dst; |
| 788 | dst_hold(dst); |
| 789 | } |
| 790 | |
| 791 | rcu_read_unlock(); |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 792 | } |
| 793 | |
David Ahern | b0e95cc | 2016-05-13 12:23:45 -0700 | [diff] [blame] | 794 | return dst; |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 795 | } |
| 796 | #endif |
| 797 | |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 798 | static const struct l3mdev_ops vrf_l3mdev_ops = { |
| 799 | .l3mdev_fib_table = vrf_fib_table, |
| 800 | .l3mdev_get_rtable = vrf_get_rtable, |
David Ahern | 8cbb512c | 2015-10-05 08:51:26 -0700 | [diff] [blame] | 801 | .l3mdev_get_saddr = vrf_get_saddr, |
David Ahern | 74b2058 | 2016-05-10 11:19:50 -0700 | [diff] [blame] | 802 | .l3mdev_l3_rcv = vrf_l3_rcv, |
David Ahern | 35402e3 | 2015-10-12 11:47:09 -0700 | [diff] [blame] | 803 | #if IS_ENABLED(CONFIG_IPV6) |
| 804 | .l3mdev_get_rt6_dst = vrf_get_rt6_dst, |
| 805 | #endif |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 806 | }; |
| 807 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 808 | static void vrf_get_drvinfo(struct net_device *dev, |
| 809 | struct ethtool_drvinfo *info) |
| 810 | { |
| 811 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
| 812 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
| 813 | } |
| 814 | |
| 815 | static const struct ethtool_ops vrf_ethtool_ops = { |
| 816 | .get_drvinfo = vrf_get_drvinfo, |
| 817 | }; |
| 818 | |
| 819 | static void vrf_setup(struct net_device *dev) |
| 820 | { |
| 821 | ether_setup(dev); |
| 822 | |
| 823 | /* Initialize the device structure. */ |
| 824 | dev->netdev_ops = &vrf_netdev_ops; |
David Ahern | ee15ee5 | 2015-09-29 20:07:12 -0700 | [diff] [blame] | 825 | dev->l3mdev_ops = &vrf_l3mdev_ops; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 826 | dev->ethtool_ops = &vrf_ethtool_ops; |
| 827 | dev->destructor = free_netdev; |
| 828 | |
| 829 | /* Fill in device structure with ethernet-generic values. */ |
| 830 | eth_hw_addr_random(dev); |
| 831 | |
| 832 | /* don't acquire vrf device's netif_tx_lock when transmitting */ |
| 833 | dev->features |= NETIF_F_LLTX; |
| 834 | |
| 835 | /* don't allow vrf devices to change network namespaces. */ |
| 836 | dev->features |= NETIF_F_NETNS_LOCAL; |
| 837 | } |
| 838 | |
| 839 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) |
| 840 | { |
| 841 | if (tb[IFLA_ADDRESS]) { |
| 842 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) |
| 843 | return -EINVAL; |
| 844 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) |
| 845 | return -EADDRNOTAVAIL; |
| 846 | } |
| 847 | return 0; |
| 848 | } |
| 849 | |
| 850 | static void vrf_dellink(struct net_device *dev, struct list_head *head) |
| 851 | { |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 852 | unregister_netdevice_queue(dev, head); |
| 853 | } |
| 854 | |
| 855 | static int vrf_newlink(struct net *src_net, struct net_device *dev, |
| 856 | struct nlattr *tb[], struct nlattr *data[]) |
| 857 | { |
| 858 | struct net_vrf *vrf = netdev_priv(dev); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 859 | |
| 860 | if (!data || !data[IFLA_VRF_TABLE]) |
| 861 | return -EINVAL; |
| 862 | |
| 863 | vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); |
| 864 | |
David Ahern | 007979e | 2015-09-29 20:07:10 -0700 | [diff] [blame] | 865 | dev->priv_flags |= IFF_L3MDEV_MASTER; |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 866 | |
Nikolay Aleksandrov | 7f109f7 | 2015-11-21 19:46:19 +0100 | [diff] [blame] | 867 | return register_netdevice(dev); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 868 | } |
| 869 | |
| 870 | static size_t vrf_nl_getsize(const struct net_device *dev) |
| 871 | { |
| 872 | return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ |
| 873 | } |
| 874 | |
| 875 | static int vrf_fillinfo(struct sk_buff *skb, |
| 876 | const struct net_device *dev) |
| 877 | { |
| 878 | struct net_vrf *vrf = netdev_priv(dev); |
| 879 | |
| 880 | return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); |
| 881 | } |
| 882 | |
David Ahern | 67eb033 | 2016-02-02 07:43:45 -0800 | [diff] [blame] | 883 | static size_t vrf_get_slave_size(const struct net_device *bond_dev, |
| 884 | const struct net_device *slave_dev) |
| 885 | { |
| 886 | return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ |
| 887 | } |
| 888 | |
| 889 | static int vrf_fill_slave_info(struct sk_buff *skb, |
| 890 | const struct net_device *vrf_dev, |
| 891 | const struct net_device *slave_dev) |
| 892 | { |
| 893 | struct net_vrf *vrf = netdev_priv(vrf_dev); |
| 894 | |
| 895 | if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) |
| 896 | return -EMSGSIZE; |
| 897 | |
| 898 | return 0; |
| 899 | } |
| 900 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 901 | static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
| 902 | [IFLA_VRF_TABLE] = { .type = NLA_U32 }, |
| 903 | }; |
| 904 | |
| 905 | static struct rtnl_link_ops vrf_link_ops __read_mostly = { |
| 906 | .kind = DRV_NAME, |
| 907 | .priv_size = sizeof(struct net_vrf), |
| 908 | |
| 909 | .get_size = vrf_nl_getsize, |
| 910 | .policy = vrf_nl_policy, |
| 911 | .validate = vrf_validate, |
| 912 | .fill_info = vrf_fillinfo, |
| 913 | |
David Ahern | 67eb033 | 2016-02-02 07:43:45 -0800 | [diff] [blame] | 914 | .get_slave_size = vrf_get_slave_size, |
| 915 | .fill_slave_info = vrf_fill_slave_info, |
| 916 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 917 | .newlink = vrf_newlink, |
| 918 | .dellink = vrf_dellink, |
| 919 | .setup = vrf_setup, |
| 920 | .maxtype = IFLA_VRF_MAX, |
| 921 | }; |
| 922 | |
| 923 | static int vrf_device_event(struct notifier_block *unused, |
| 924 | unsigned long event, void *ptr) |
| 925 | { |
| 926 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 927 | |
| 928 | /* only care about unregister events to drop slave references */ |
| 929 | if (event == NETDEV_UNREGISTER) { |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 930 | struct net_device *vrf_dev; |
| 931 | |
David Ahern | fee6d4c | 2015-10-05 08:51:24 -0700 | [diff] [blame] | 932 | if (!netif_is_l3_slave(dev)) |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 933 | goto out; |
| 934 | |
Nikolay Aleksandrov | 58aa908 | 2015-08-18 20:28:04 +0300 | [diff] [blame] | 935 | vrf_dev = netdev_master_upper_dev_get(dev); |
| 936 | vrf_del_slave(vrf_dev, dev); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 937 | } |
| 938 | out: |
| 939 | return NOTIFY_DONE; |
| 940 | } |
| 941 | |
| 942 | static struct notifier_block vrf_notifier_block __read_mostly = { |
| 943 | .notifier_call = vrf_device_event, |
| 944 | }; |
| 945 | |
| 946 | static int __init vrf_init_module(void) |
| 947 | { |
| 948 | int rc; |
| 949 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 950 | register_netdevice_notifier(&vrf_notifier_block); |
| 951 | |
| 952 | rc = rtnl_link_register(&vrf_link_ops); |
| 953 | if (rc < 0) |
| 954 | goto error; |
| 955 | |
| 956 | return 0; |
| 957 | |
| 958 | error: |
| 959 | unregister_netdevice_notifier(&vrf_notifier_block); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 960 | return rc; |
| 961 | } |
| 962 | |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 963 | module_init(vrf_init_module); |
David Ahern | 193125d | 2015-08-13 14:59:10 -0600 | [diff] [blame] | 964 | MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); |
| 965 | MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); |
| 966 | MODULE_LICENSE("GPL"); |
| 967 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); |
| 968 | MODULE_VERSION(DRV_VERSION); |