blob: d960ea6657b50407f094d78800cfb085868fafe4 [file] [log] [blame]
YOSHIFUJI Hideakia716c112007-02-09 23:25:29 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
Trent Jaegerdf718372005-12-13 23:12:27 -080013 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Herbert Xu66cdb3c2007-11-13 21:37:28 -080016#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/slab.h>
18#include <linux/kmod.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/workqueue.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
Patrick McHardyeb9c7eb2006-01-06 23:06:30 -080024#include <linux/netfilter.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
David S. Miller2518c7c2006-08-24 04:45:07 -070026#include <linux/cache.h>
Florian Westphalec30d782017-07-17 13:57:27 +020027#include <linux/cpu.h>
Paul Moore68277ac2007-12-20 20:49:33 -080028#include <linux/audit.h>
Herbert Xu25ee3282007-12-11 09:32:34 -080029#include <net/dst.h>
Eric Paris6ce74ec2012-02-16 15:08:39 -050030#include <net/flow.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/xfrm.h>
32#include <net/ip.h>
Masahide NAKAMURA558f82e2007-12-20 20:42:57 -080033#ifdef CONFIG_XFRM_STATISTICS
34#include <net/snmp.h>
35#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
David S. Miller44e36b42006-08-24 04:50:50 -070037#include "xfrm_hash.h"
38
Steffen Klasserta0073fe2013-02-05 12:52:55 +010039#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
40#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
41#define XFRM_MAX_QUEUE_LEN 100
42
Steffen Klassertb8c203b2014-09-16 10:08:49 +020043struct xfrm_flo {
44 struct dst_entry *dst_orig;
45 u8 flags;
46};
47
Florian Westphalec30d782017-07-17 13:57:27 +020048static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst);
49static struct work_struct *xfrm_pcpu_work __read_mostly;
Steffen Klassertf203b762018-06-12 14:07:12 +020050static DEFINE_SPINLOCK(xfrm_if_cb_lock);
51static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
52
Priyanka Jain418a99a2012-08-12 21:22:29 +000053static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
Florian Westphal37b10382017-02-07 15:00:19 +010054static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
Priyanka Jain418a99a2012-08-12 21:22:29 +000055 __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Alexey Dobriyanf8c3d0d2018-02-24 21:21:38 +030057static struct kmem_cache *xfrm_dst_cache __ro_after_init;
Florian Westphal30846092016-08-11 15:17:54 +020058static __read_mostly seqcount_t xfrm_policy_hash_generation;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
David Miller54920932017-11-28 15:41:01 -050060static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
Timo Teräs80c802f2010-04-07 00:30:05 +000061static int stale_bundle(struct dst_entry *dst);
Steffen Klassert12fdb4d2011-06-29 23:18:20 +000062static int xfrm_bundle_ok(struct xfrm_dst *xdst);
Kees Cookc3aed702017-10-16 17:28:56 -070063static void xfrm_policy_queue_process(struct timer_list *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Herbert Xu12bfa8b2014-11-13 17:09:50 +080065static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
Wei Yongjun29fa0b302008-12-03 00:33:09 -080066static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
67 int dir);
68
Florian Westphale37cc8a2016-08-11 15:17:55 +020069static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
70{
Reshetova, Elena850a6212017-07-04 15:53:22 +030071 return refcount_inc_not_zero(&policy->refcnt);
Florian Westphale37cc8a2016-08-11 15:17:55 +020072}
73
David S. Millerbc9b35a2012-05-15 15:04:57 -040074static inline bool
David S. Miller200ce962011-02-24 00:12:25 -050075__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
Andrew Morton77681022006-11-08 22:46:26 -080076{
David S. Miller7e1dc7b2011-03-12 02:42:11 -050077 const struct flowi4 *fl4 = &fl->u.ip4;
78
Alexey Dobriyan26bff942011-11-22 06:46:02 +000079 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
80 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
David S. Miller7e1dc7b2011-03-12 02:42:11 -050081 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
82 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
83 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
84 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
Andrew Morton77681022006-11-08 22:46:26 -080085}
86
David S. Millerbc9b35a2012-05-15 15:04:57 -040087static inline bool
David S. Miller200ce962011-02-24 00:12:25 -050088__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
Andrew Morton77681022006-11-08 22:46:26 -080089{
David S. Miller7e1dc7b2011-03-12 02:42:11 -050090 const struct flowi6 *fl6 = &fl->u.ip6;
91
92 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
93 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
94 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
95 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
96 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
97 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
Andrew Morton77681022006-11-08 22:46:26 -080098}
99
David S. Millerbc9b35a2012-05-15 15:04:57 -0400100bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
101 unsigned short family)
Andrew Morton77681022006-11-08 22:46:26 -0800102{
103 switch (family) {
104 case AF_INET:
105 return __xfrm4_selector_match(sel, fl);
106 case AF_INET6:
107 return __xfrm6_selector_match(sel, fl);
108 }
David S. Millerbc9b35a2012-05-15 15:04:57 -0400109 return false;
Andrew Morton77681022006-11-08 22:46:26 -0800110}
111
Florian Westphala2817d82017-02-07 15:00:17 +0100112static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
Eric Dumazetef8531b2012-08-19 12:31:48 +0200113{
Florian Westphala2817d82017-02-07 15:00:17 +0100114 const struct xfrm_policy_afinfo *afinfo;
Eric Dumazetef8531b2012-08-19 12:31:48 +0200115
Florian Westphala2817d82017-02-07 15:00:17 +0100116 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
Eric Dumazetef8531b2012-08-19 12:31:48 +0200117 return NULL;
118 rcu_read_lock();
119 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
120 if (unlikely(!afinfo))
121 rcu_read_unlock();
122 return afinfo;
123}
124
Steffen Klassertf203b762018-06-12 14:07:12 +0200125/* Called with rcu_read_lock(). */
126static const struct xfrm_if_cb *xfrm_if_get_cb(void)
127{
128 return rcu_dereference(xfrm_if_cb);
129}
130
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200131struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
132 const xfrm_address_t *saddr,
133 const xfrm_address_t *daddr,
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900134 int family, u32 mark)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Florian Westphal37b10382017-02-07 15:00:19 +0100136 const struct xfrm_policy_afinfo *afinfo;
Herbert Xu66cdb3c2007-11-13 21:37:28 -0800137 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Herbert Xu25ee3282007-12-11 09:32:34 -0800139 afinfo = xfrm_policy_get_afinfo(family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 if (unlikely(afinfo == NULL))
Herbert Xu66cdb3c2007-11-13 21:37:28 -0800141 return ERR_PTR(-EAFNOSUPPORT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900143 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900144
Florian Westphalbdba9fe2017-02-07 15:00:18 +0100145 rcu_read_unlock();
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900146
147 return dst;
148}
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200149EXPORT_SYMBOL(__xfrm_dst_lookup);
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900150
David Ahern42a7b322015-08-10 16:58:11 -0600151static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
152 int tos, int oif,
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900153 xfrm_address_t *prev_saddr,
154 xfrm_address_t *prev_daddr,
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900155 int family, u32 mark)
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900156{
Alexey Dobriyanc5b3cf42008-11-25 17:51:25 -0800157 struct net *net = xs_net(x);
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900158 xfrm_address_t *saddr = &x->props.saddr;
159 xfrm_address_t *daddr = &x->id.daddr;
160 struct dst_entry *dst;
161
162 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
163 saddr = x->coaddr;
164 daddr = prev_daddr;
165 }
166 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
167 saddr = prev_saddr;
168 daddr = x->coaddr;
169 }
170
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900171 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +0900172
173 if (!IS_ERR(dst)) {
174 if (prev_saddr != saddr)
175 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
176 if (prev_daddr != daddr)
177 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
178 }
179
Herbert Xu66cdb3c2007-11-13 21:37:28 -0800180 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183static inline unsigned long make_jiffies(long secs)
184{
185 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
186 return MAX_SCHEDULE_TIMEOUT-1;
187 else
YOSHIFUJI Hideakia716c112007-02-09 23:25:29 +0900188 return secs*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Kees Cookc3aed702017-10-16 17:28:56 -0700191static void xfrm_policy_timer(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Kees Cookc3aed702017-10-16 17:28:56 -0700193 struct xfrm_policy *xp = from_timer(xp, t, timer);
James Morris9d729f72007-03-04 16:12:44 -0800194 unsigned long now = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 long next = LONG_MAX;
196 int warn = 0;
197 int dir;
198
199 read_lock(&xp->lock);
200
Timo Teräsea2dea92010-03-31 00:17:05 +0000201 if (unlikely(xp->walk.dead))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 goto out;
203
Herbert Xu77d8d7a2005-10-05 12:15:12 -0700204 dir = xfrm_policy_id2dir(xp->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 if (xp->lft.hard_add_expires_seconds) {
207 long tmo = xp->lft.hard_add_expires_seconds +
208 xp->curlft.add_time - now;
209 if (tmo <= 0)
210 goto expired;
211 if (tmo < next)
212 next = tmo;
213 }
214 if (xp->lft.hard_use_expires_seconds) {
215 long tmo = xp->lft.hard_use_expires_seconds +
216 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
217 if (tmo <= 0)
218 goto expired;
219 if (tmo < next)
220 next = tmo;
221 }
222 if (xp->lft.soft_add_expires_seconds) {
223 long tmo = xp->lft.soft_add_expires_seconds +
224 xp->curlft.add_time - now;
225 if (tmo <= 0) {
226 warn = 1;
227 tmo = XFRM_KM_TIMEOUT;
228 }
229 if (tmo < next)
230 next = tmo;
231 }
232 if (xp->lft.soft_use_expires_seconds) {
233 long tmo = xp->lft.soft_use_expires_seconds +
234 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
235 if (tmo <= 0) {
236 warn = 1;
237 tmo = XFRM_KM_TIMEOUT;
238 }
239 if (tmo < next)
240 next = tmo;
241 }
242
243 if (warn)
Jamal Hadi Salim6c5c8ca2006-03-20 19:17:25 -0800244 km_policy_expired(xp, dir, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (next != LONG_MAX &&
246 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
247 xfrm_pol_hold(xp);
248
249out:
250 read_unlock(&xp->lock);
251 xfrm_pol_put(xp);
252 return;
253
254expired:
255 read_unlock(&xp->lock);
Herbert Xu4666faa2005-06-18 22:43:22 -0700256 if (!xfrm_policy_delete(xp, dir))
Jamal Hadi Salim6c5c8ca2006-03-20 19:17:25 -0800257 km_policy_expired(xp, dir, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 xfrm_pol_put(xp);
259}
260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
262 * SPD calls.
263 */
264
Alexey Dobriyan0331b1f2008-11-25 17:21:45 -0800265struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 struct xfrm_policy *policy;
268
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700269 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 if (policy) {
Alexey Dobriyan0331b1f2008-11-25 17:21:45 -0800272 write_pnet(&policy->xp_net, net);
Herbert Xu12a169e2008-10-01 07:03:24 -0700273 INIT_LIST_HEAD(&policy->walk.all);
David S. Miller2518c7c2006-08-24 04:45:07 -0700274 INIT_HLIST_NODE(&policy->bydst);
275 INIT_HLIST_NODE(&policy->byidx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 rwlock_init(&policy->lock);
Reshetova, Elena850a6212017-07-04 15:53:22 +0300277 refcount_set(&policy->refcnt, 1);
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100278 skb_queue_head_init(&policy->polq.hold_queue);
Kees Cookc3aed702017-10-16 17:28:56 -0700279 timer_setup(&policy->timer, xfrm_policy_timer, 0);
280 timer_setup(&policy->polq.hold_timer,
281 xfrm_policy_queue_process, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 }
283 return policy;
284}
285EXPORT_SYMBOL(xfrm_policy_alloc);
286
Eric Dumazet56f04732015-12-08 07:22:01 -0800287static void xfrm_policy_destroy_rcu(struct rcu_head *head)
288{
289 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
290
291 security_xfrm_policy_free(policy->security);
292 kfree(policy);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295/* Destroy xfrm_policy: descendant resources must be released to this moment. */
296
WANG Cong64c31b32008-01-07 22:34:29 -0800297void xfrm_policy_destroy(struct xfrm_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
Herbert Xu12a169e2008-10-01 07:03:24 -0700299 BUG_ON(!policy->walk.dead);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Fan Du0659eea2013-08-01 18:08:36 +0800301 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 BUG();
303
Eric Dumazet56f04732015-12-08 07:22:01 -0800304 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
WANG Cong64c31b32008-01-07 22:34:29 -0800306EXPORT_SYMBOL(xfrm_policy_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Alexander Alemayhu1365e547c2017-01-03 17:13:20 +0100308/* Rule must be locked. Release descendant resources, announce
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * entry dead. The rule must be unlinked from lists to the moment.
310 */
311
312static void xfrm_policy_kill(struct xfrm_policy *policy)
313{
Herbert Xu12a169e2008-10-01 07:03:24 -0700314 policy->walk.dead = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Timo Teräs285ead12010-04-07 00:30:06 +0000316 atomic_inc(&policy->genid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +0200318 if (del_timer(&policy->polq.hold_timer))
319 xfrm_pol_put(policy);
Li RongQing1ee5e662015-04-22 15:51:16 +0800320 skb_queue_purge(&policy->polq.hold_queue);
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100321
Timo Teräs285ead12010-04-07 00:30:06 +0000322 if (del_timer(&policy->timer))
323 xfrm_pol_put(policy);
324
325 xfrm_pol_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
David S. Miller2518c7c2006-08-24 04:45:07 -0700328static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
329
Alexey Dobriyane92303f2008-11-25 17:32:41 -0800330static inline unsigned int idx_hash(struct net *net, u32 index)
David S. Miller2518c7c2006-08-24 04:45:07 -0700331{
Alexey Dobriyane92303f2008-11-25 17:32:41 -0800332 return __idx_hash(index, net->xfrm.policy_idx_hmask);
David S. Miller2518c7c2006-08-24 04:45:07 -0700333}
334
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200335/* calculate policy hash thresholds */
336static void __get_hash_thresh(struct net *net,
337 unsigned short family, int dir,
338 u8 *dbits, u8 *sbits)
339{
340 switch (family) {
341 case AF_INET:
342 *dbits = net->xfrm.policy_bydst[dir].dbits4;
343 *sbits = net->xfrm.policy_bydst[dir].sbits4;
344 break;
345
346 case AF_INET6:
347 *dbits = net->xfrm.policy_bydst[dir].dbits6;
348 *sbits = net->xfrm.policy_bydst[dir].sbits6;
349 break;
350
351 default:
352 *dbits = 0;
353 *sbits = 0;
354 }
355}
356
David S. Miller5f803b52011-02-24 00:33:19 -0500357static struct hlist_head *policy_hash_bysel(struct net *net,
358 const struct xfrm_selector *sel,
359 unsigned short family, int dir)
David S. Miller2518c7c2006-08-24 04:45:07 -0700360{
Alexey Dobriyan11219942008-11-25 17:33:06 -0800361 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200362 unsigned int hash;
363 u8 dbits;
364 u8 sbits;
365
366 __get_hash_thresh(net, family, dir, &dbits, &sbits);
367 hash = __sel_hash(sel, family, hmask, dbits, sbits);
David S. Miller2518c7c2006-08-24 04:45:07 -0700368
Florian Westphale1e551b2016-08-11 15:17:53 +0200369 if (hash == hmask + 1)
370 return &net->xfrm.policy_inexact[dir];
371
372 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
373 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
David S. Miller2518c7c2006-08-24 04:45:07 -0700374}
375
David S. Miller5f803b52011-02-24 00:33:19 -0500376static struct hlist_head *policy_hash_direct(struct net *net,
377 const xfrm_address_t *daddr,
378 const xfrm_address_t *saddr,
379 unsigned short family, int dir)
David S. Miller2518c7c2006-08-24 04:45:07 -0700380{
Alexey Dobriyan11219942008-11-25 17:33:06 -0800381 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200382 unsigned int hash;
383 u8 dbits;
384 u8 sbits;
385
386 __get_hash_thresh(net, family, dir, &dbits, &sbits);
387 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
David S. Miller2518c7c2006-08-24 04:45:07 -0700388
Florian Westphale1e551b2016-08-11 15:17:53 +0200389 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
390 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
David S. Miller2518c7c2006-08-24 04:45:07 -0700391}
392
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200393static void xfrm_dst_hash_transfer(struct net *net,
394 struct hlist_head *list,
David S. Miller2518c7c2006-08-24 04:45:07 -0700395 struct hlist_head *ndsttable,
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200396 unsigned int nhashmask,
397 int dir)
David S. Miller2518c7c2006-08-24 04:45:07 -0700398{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800399 struct hlist_node *tmp, *entry0 = NULL;
David S. Miller2518c7c2006-08-24 04:45:07 -0700400 struct xfrm_policy *pol;
YOSHIFUJI Hideakib7911602008-02-17 23:29:30 -0800401 unsigned int h0 = 0;
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200402 u8 dbits;
403 u8 sbits;
David S. Miller2518c7c2006-08-24 04:45:07 -0700404
YOSHIFUJI Hideakib7911602008-02-17 23:29:30 -0800405redo:
Sasha Levinb67bfe02013-02-27 17:06:00 -0800406 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700407 unsigned int h;
408
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200409 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
David S. Miller2518c7c2006-08-24 04:45:07 -0700410 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200411 pol->family, nhashmask, dbits, sbits);
YOSHIFUJI Hideakib7911602008-02-17 23:29:30 -0800412 if (!entry0) {
Florian Westphala5eefc12016-08-11 15:17:52 +0200413 hlist_del_rcu(&pol->bydst);
414 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
YOSHIFUJI Hideakib7911602008-02-17 23:29:30 -0800415 h0 = h;
416 } else {
417 if (h != h0)
418 continue;
Florian Westphala5eefc12016-08-11 15:17:52 +0200419 hlist_del_rcu(&pol->bydst);
420 hlist_add_behind_rcu(&pol->bydst, entry0);
YOSHIFUJI Hideakib7911602008-02-17 23:29:30 -0800421 }
Sasha Levinb67bfe02013-02-27 17:06:00 -0800422 entry0 = &pol->bydst;
YOSHIFUJI Hideakib7911602008-02-17 23:29:30 -0800423 }
424 if (!hlist_empty(list)) {
425 entry0 = NULL;
426 goto redo;
David S. Miller2518c7c2006-08-24 04:45:07 -0700427 }
428}
429
430static void xfrm_idx_hash_transfer(struct hlist_head *list,
431 struct hlist_head *nidxtable,
432 unsigned int nhashmask)
433{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800434 struct hlist_node *tmp;
David S. Miller2518c7c2006-08-24 04:45:07 -0700435 struct xfrm_policy *pol;
436
Sasha Levinb67bfe02013-02-27 17:06:00 -0800437 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700438 unsigned int h;
439
440 h = __idx_hash(pol->index, nhashmask);
441 hlist_add_head(&pol->byidx, nidxtable+h);
442 }
443}
444
445static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
446{
447 return ((old_hmask + 1) << 1) - 1;
448}
449
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800450static void xfrm_bydst_resize(struct net *net, int dir)
David S. Miller2518c7c2006-08-24 04:45:07 -0700451{
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800452 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
David S. Miller2518c7c2006-08-24 04:45:07 -0700453 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
454 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
David S. Miller44e36b42006-08-24 04:50:50 -0700455 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
Florian Westphale1e551b2016-08-11 15:17:53 +0200456 struct hlist_head *odst;
David S. Miller2518c7c2006-08-24 04:45:07 -0700457 int i;
458
459 if (!ndst)
460 return;
461
Florian Westphal9d0380d2016-08-11 15:17:59 +0200462 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Florian Westphal30846092016-08-11 15:17:54 +0200463 write_seqcount_begin(&xfrm_policy_hash_generation);
464
465 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
466 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
David S. Miller2518c7c2006-08-24 04:45:07 -0700467
Florian Westphale1e551b2016-08-11 15:17:53 +0200468 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
469 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
470
David S. Miller2518c7c2006-08-24 04:45:07 -0700471 for (i = hmask; i >= 0; i--)
Christophe Gouaultb58555f2014-08-29 16:16:04 +0200472 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
David S. Miller2518c7c2006-08-24 04:45:07 -0700473
Florian Westphale1e551b2016-08-11 15:17:53 +0200474 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800475 net->xfrm.policy_bydst[dir].hmask = nhashmask;
David S. Miller2518c7c2006-08-24 04:45:07 -0700476
Florian Westphal30846092016-08-11 15:17:54 +0200477 write_seqcount_end(&xfrm_policy_hash_generation);
Florian Westphal9d0380d2016-08-11 15:17:59 +0200478 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
David S. Miller2518c7c2006-08-24 04:45:07 -0700479
Florian Westphale1e551b2016-08-11 15:17:53 +0200480 synchronize_rcu();
481
David S. Miller44e36b42006-08-24 04:50:50 -0700482 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
David S. Miller2518c7c2006-08-24 04:45:07 -0700483}
484
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800485static void xfrm_byidx_resize(struct net *net, int total)
David S. Miller2518c7c2006-08-24 04:45:07 -0700486{
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800487 unsigned int hmask = net->xfrm.policy_idx_hmask;
David S. Miller2518c7c2006-08-24 04:45:07 -0700488 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
489 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800490 struct hlist_head *oidx = net->xfrm.policy_byidx;
David S. Miller44e36b42006-08-24 04:50:50 -0700491 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
David S. Miller2518c7c2006-08-24 04:45:07 -0700492 int i;
493
494 if (!nidx)
495 return;
496
Florian Westphal9d0380d2016-08-11 15:17:59 +0200497 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
David S. Miller2518c7c2006-08-24 04:45:07 -0700498
499 for (i = hmask; i >= 0; i--)
500 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
501
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800502 net->xfrm.policy_byidx = nidx;
503 net->xfrm.policy_idx_hmask = nhashmask;
David S. Miller2518c7c2006-08-24 04:45:07 -0700504
Florian Westphal9d0380d2016-08-11 15:17:59 +0200505 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
David S. Miller2518c7c2006-08-24 04:45:07 -0700506
David S. Miller44e36b42006-08-24 04:50:50 -0700507 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
David S. Miller2518c7c2006-08-24 04:45:07 -0700508}
509
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800510static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
David S. Miller2518c7c2006-08-24 04:45:07 -0700511{
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800512 unsigned int cnt = net->xfrm.policy_count[dir];
513 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
David S. Miller2518c7c2006-08-24 04:45:07 -0700514
515 if (total)
516 *total += cnt;
517
518 if ((hmask + 1) < xfrm_policy_hashmax &&
519 cnt > hmask)
520 return 1;
521
522 return 0;
523}
524
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800525static inline int xfrm_byidx_should_resize(struct net *net, int total)
David S. Miller2518c7c2006-08-24 04:45:07 -0700526{
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800527 unsigned int hmask = net->xfrm.policy_idx_hmask;
David S. Miller2518c7c2006-08-24 04:45:07 -0700528
529 if ((hmask + 1) < xfrm_policy_hashmax &&
530 total > hmask)
531 return 1;
532
533 return 0;
534}
535
Alexey Dobriyane0710412010-01-23 13:37:10 +0000536void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
Jamal Hadi Salimecfd6b12007-04-28 21:20:32 -0700537{
Alexey Dobriyane0710412010-01-23 13:37:10 +0000538 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
539 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
540 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
541 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
542 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
543 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
544 si->spdhcnt = net->xfrm.policy_idx_hmask;
Jamal Hadi Salimecfd6b12007-04-28 21:20:32 -0700545 si->spdhmcnt = xfrm_policy_hashmax;
Jamal Hadi Salimecfd6b12007-04-28 21:20:32 -0700546}
547EXPORT_SYMBOL(xfrm_spd_getinfo);
David S. Miller2518c7c2006-08-24 04:45:07 -0700548
Jamal Hadi Salimecfd6b12007-04-28 21:20:32 -0700549static DEFINE_MUTEX(hash_resize_mutex);
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800550static void xfrm_hash_resize(struct work_struct *work)
David S. Miller2518c7c2006-08-24 04:45:07 -0700551{
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800552 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
David S. Miller2518c7c2006-08-24 04:45:07 -0700553 int dir, total;
554
555 mutex_lock(&hash_resize_mutex);
556
557 total = 0;
Herbert Xu53c2e282014-11-13 17:09:49 +0800558 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800559 if (xfrm_bydst_should_resize(net, dir, &total))
560 xfrm_bydst_resize(net, dir);
David S. Miller2518c7c2006-08-24 04:45:07 -0700561 }
Alexey Dobriyan66caf622008-11-25 17:28:57 -0800562 if (xfrm_byidx_should_resize(net, total))
563 xfrm_byidx_resize(net, total);
David S. Miller2518c7c2006-08-24 04:45:07 -0700564
565 mutex_unlock(&hash_resize_mutex);
566}
567
Christophe Gouault880a6fa2014-08-29 16:16:05 +0200568static void xfrm_hash_rebuild(struct work_struct *work)
569{
570 struct net *net = container_of(work, struct net,
571 xfrm.policy_hthresh.work);
572 unsigned int hmask;
573 struct xfrm_policy *pol;
574 struct xfrm_policy *policy;
575 struct hlist_head *chain;
576 struct hlist_head *odst;
577 struct hlist_node *newpos;
578 int i;
579 int dir;
580 unsigned seq;
581 u8 lbits4, rbits4, lbits6, rbits6;
582
583 mutex_lock(&hash_resize_mutex);
584
585 /* read selector prefixlen thresholds */
586 do {
587 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
588
589 lbits4 = net->xfrm.policy_hthresh.lbits4;
590 rbits4 = net->xfrm.policy_hthresh.rbits4;
591 lbits6 = net->xfrm.policy_hthresh.lbits6;
592 rbits6 = net->xfrm.policy_hthresh.rbits6;
593 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
594
Florian Westphal9d0380d2016-08-11 15:17:59 +0200595 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Christophe Gouault880a6fa2014-08-29 16:16:05 +0200596
597 /* reset the bydst and inexact table in all directions */
Herbert Xu53c2e282014-11-13 17:09:49 +0800598 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
Christophe Gouault880a6fa2014-08-29 16:16:05 +0200599 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
600 hmask = net->xfrm.policy_bydst[dir].hmask;
601 odst = net->xfrm.policy_bydst[dir].table;
602 for (i = hmask; i >= 0; i--)
603 INIT_HLIST_HEAD(odst + i);
604 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
605 /* dir out => dst = remote, src = local */
606 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
607 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
608 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
609 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
610 } else {
611 /* dir in/fwd => dst = local, src = remote */
612 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
613 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
614 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
615 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
616 }
617 }
618
619 /* re-insert all policies by order of creation */
620 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
Florian Westphal862591b2017-12-27 23:25:45 +0100621 if (policy->walk.dead ||
622 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
Tobias Brunner6916fb32016-07-29 09:57:32 +0200623 /* skip socket policies */
624 continue;
625 }
Christophe Gouault880a6fa2014-08-29 16:16:05 +0200626 newpos = NULL;
627 chain = policy_hash_bysel(net, &policy->selector,
628 policy->family,
629 xfrm_policy_id2dir(policy->index));
630 hlist_for_each_entry(pol, chain, bydst) {
631 if (policy->priority >= pol->priority)
632 newpos = &pol->bydst;
633 else
634 break;
635 }
636 if (newpos)
637 hlist_add_behind(&policy->bydst, newpos);
638 else
639 hlist_add_head(&policy->bydst, chain);
640 }
641
Florian Westphal9d0380d2016-08-11 15:17:59 +0200642 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Christophe Gouault880a6fa2014-08-29 16:16:05 +0200643
644 mutex_unlock(&hash_resize_mutex);
645}
646
647void xfrm_policy_hash_rebuild(struct net *net)
648{
649 schedule_work(&net->xfrm.policy_hthresh.work);
650}
651EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653/* Generate new index... KAME seems to generate them ordered by cost
654 * of an absolute inpredictability of ordering of rules. This will not pass. */
Fan Due682adf02013-11-07 17:47:48 +0800655static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 static u32 idx_generator;
658
659 for (;;) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700660 struct hlist_head *list;
661 struct xfrm_policy *p;
662 u32 idx;
663 int found;
664
Fan Due682adf02013-11-07 17:47:48 +0800665 if (!index) {
666 idx = (idx_generator | dir);
667 idx_generator += 8;
668 } else {
669 idx = index;
670 index = 0;
671 }
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (idx == 0)
674 idx = 8;
Alexey Dobriyan11219942008-11-25 17:33:06 -0800675 list = net->xfrm.policy_byidx + idx_hash(net, idx);
David S. Miller2518c7c2006-08-24 04:45:07 -0700676 found = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800677 hlist_for_each_entry(p, list, byidx) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700678 if (p->index == idx) {
679 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 break;
David S. Miller2518c7c2006-08-24 04:45:07 -0700681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
David S. Miller2518c7c2006-08-24 04:45:07 -0700683 if (!found)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 return idx;
685 }
686}
687
David S. Miller2518c7c2006-08-24 04:45:07 -0700688static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
689{
690 u32 *p1 = (u32 *) s1;
691 u32 *p2 = (u32 *) s2;
692 int len = sizeof(struct xfrm_selector) / sizeof(u32);
693 int i;
694
695 for (i = 0; i < len; i++) {
696 if (p1[i] != p2[i])
697 return 1;
698 }
699
700 return 0;
701}
702
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100703static void xfrm_policy_requeue(struct xfrm_policy *old,
704 struct xfrm_policy *new)
705{
706 struct xfrm_policy_queue *pq = &old->polq;
707 struct sk_buff_head list;
708
Li RongQingde2ad482015-04-30 17:25:19 +0800709 if (skb_queue_empty(&pq->hold_queue))
710 return;
711
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100712 __skb_queue_head_init(&list);
713
714 spin_lock_bh(&pq->hold_queue.lock);
715 skb_queue_splice_init(&pq->hold_queue, &list);
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +0200716 if (del_timer(&pq->hold_timer))
717 xfrm_pol_put(old);
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100718 spin_unlock_bh(&pq->hold_queue.lock);
719
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100720 pq = &new->polq;
721
722 spin_lock_bh(&pq->hold_queue.lock);
723 skb_queue_splice(&list, &pq->hold_queue);
724 pq->timeout = XFRM_QUEUE_TMO_MIN;
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +0200725 if (!mod_timer(&pq->hold_timer, jiffies))
726 xfrm_pol_hold(new);
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100727 spin_unlock_bh(&pq->hold_queue.lock);
728}
729
Steffen Klassert7cb8a932013-02-11 07:02:36 +0100730static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
731 struct xfrm_policy *pol)
732{
733 u32 mark = policy->mark.v & policy->mark.m;
734
735 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
736 return true;
737
738 if ((mark & pol->mark.m) == pol->mark.v &&
739 policy->priority == pol->priority)
740 return true;
741
742 return false;
743}
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
746{
Alexey Dobriyan11219942008-11-25 17:33:06 -0800747 struct net *net = xp_net(policy);
David S. Miller2518c7c2006-08-24 04:45:07 -0700748 struct xfrm_policy *pol;
749 struct xfrm_policy *delpol;
750 struct hlist_head *chain;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800751 struct hlist_node *newpos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Florian Westphal9d0380d2016-08-11 15:17:59 +0200753 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Alexey Dobriyan11219942008-11-25 17:33:06 -0800754 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
David S. Miller2518c7c2006-08-24 04:45:07 -0700755 delpol = NULL;
756 newpos = NULL;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800757 hlist_for_each_entry(pol, chain, bydst) {
Herbert Xua6c7ab52007-01-16 16:52:02 -0800758 if (pol->type == policy->type &&
Steffen Klassert7e652642018-06-12 14:07:07 +0200759 pol->if_id == policy->if_id &&
David S. Miller2518c7c2006-08-24 04:45:07 -0700760 !selector_cmp(&pol->selector, &policy->selector) &&
Steffen Klassert7cb8a932013-02-11 07:02:36 +0100761 xfrm_policy_mark_match(policy, pol) &&
Herbert Xua6c7ab52007-01-16 16:52:02 -0800762 xfrm_sec_ctx_match(pol->security, policy->security) &&
763 !WARN_ON(delpol)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 if (excl) {
Florian Westphal9d0380d2016-08-11 15:17:59 +0200765 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return -EEXIST;
767 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 delpol = pol;
769 if (policy->priority > pol->priority)
770 continue;
771 } else if (policy->priority >= pol->priority) {
Herbert Xua6c7ab52007-01-16 16:52:02 -0800772 newpos = &pol->bydst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 continue;
774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 if (delpol)
776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 }
778 if (newpos)
Ken Helias1d023282014-08-06 16:09:16 -0700779 hlist_add_behind(&policy->bydst, newpos);
David S. Miller2518c7c2006-08-24 04:45:07 -0700780 else
781 hlist_add_head(&policy->bydst, chain);
Herbert Xu12bfa8b2014-11-13 17:09:50 +0800782 __xfrm_policy_link(policy, dir);
fan.duca4c3fc2013-07-30 08:33:53 +0800783
784 /* After previous checking, family can either be AF_INET or AF_INET6 */
785 if (policy->family == AF_INET)
786 rt_genid_bump_ipv4(net);
787 else
788 rt_genid_bump_ipv6(net);
789
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100790 if (delpol) {
791 xfrm_policy_requeue(delpol, policy);
Wei Yongjun29fa0b302008-12-03 00:33:09 -0800792 __xfrm_policy_unlink(delpol, dir);
Steffen Klasserta0073fe2013-02-05 12:52:55 +0100793 }
Fan Due682adf02013-11-07 17:47:48 +0800794 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
Alexey Dobriyan11219942008-11-25 17:33:06 -0800795 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
James Morris9d729f72007-03-04 16:12:44 -0800796 policy->curlft.add_time = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 policy->curlft.use_time = 0;
798 if (!mod_timer(&policy->timer, jiffies + HZ))
799 xfrm_pol_hold(policy);
Florian Westphal9d0380d2016-08-11 15:17:59 +0200800 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
David S. Miller9b78a822005-12-22 07:39:48 -0800802 if (delpol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 xfrm_policy_kill(delpol);
Alexey Dobriyan11219942008-11-25 17:33:06 -0800804 else if (xfrm_bydst_should_resize(net, dir, NULL))
805 schedule_work(&net->xfrm.policy_hash_work);
David S. Miller9b78a822005-12-22 07:39:48 -0800806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 return 0;
808}
809EXPORT_SYMBOL(xfrm_policy_insert);
810
Steffen Klassert7e652642018-06-12 14:07:07 +0200811struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
812 u8 type, int dir,
813 struct xfrm_selector *sel,
Eric Parisef41aaa2007-03-07 15:37:58 -0800814 struct xfrm_sec_ctx *ctx, int delete,
815 int *err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816{
David S. Miller2518c7c2006-08-24 04:45:07 -0700817 struct xfrm_policy *pol, *ret;
818 struct hlist_head *chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Eric Parisef41aaa2007-03-07 15:37:58 -0800820 *err = 0;
Florian Westphal9d0380d2016-08-11 15:17:59 +0200821 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Alexey Dobriyan8d1211a2008-11-25 17:34:20 -0800822 chain = policy_hash_bysel(net, sel, sel->family, dir);
David S. Miller2518c7c2006-08-24 04:45:07 -0700823 ret = NULL;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800824 hlist_for_each_entry(pol, chain, bydst) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700825 if (pol->type == type &&
Steffen Klassert7e652642018-06-12 14:07:07 +0200826 pol->if_id == if_id &&
Jamal Hadi Salim34f8d882010-02-22 11:32:58 +0000827 (mark & pol->mark.m) == pol->mark.v &&
David S. Miller2518c7c2006-08-24 04:45:07 -0700828 !selector_cmp(sel, &pol->selector) &&
829 xfrm_sec_ctx_match(ctx, pol->security)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 xfrm_pol_hold(pol);
David S. Miller2518c7c2006-08-24 04:45:07 -0700831 if (delete) {
Paul Moore03e1ad72008-04-12 19:07:52 -0700832 *err = security_xfrm_policy_delete(
833 pol->security);
Eric Parisef41aaa2007-03-07 15:37:58 -0800834 if (*err) {
Florian Westphal9d0380d2016-08-11 15:17:59 +0200835 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Eric Parisef41aaa2007-03-07 15:37:58 -0800836 return pol;
837 }
Wei Yongjun29fa0b302008-12-03 00:33:09 -0800838 __xfrm_policy_unlink(pol, dir);
David S. Miller2518c7c2006-08-24 04:45:07 -0700839 }
840 ret = pol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 break;
842 }
843 }
Florian Westphal9d0380d2016-08-11 15:17:59 +0200844 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000846 if (ret && delete)
David S. Miller2518c7c2006-08-24 04:45:07 -0700847 xfrm_policy_kill(ret);
David S. Miller2518c7c2006-08-24 04:45:07 -0700848 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849}
Trent Jaegerdf718372005-12-13 23:12:27 -0800850EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Steffen Klassert7e652642018-06-12 14:07:07 +0200852struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
853 u8 type, int dir, u32 id, int delete,
854 int *err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
David S. Miller2518c7c2006-08-24 04:45:07 -0700856 struct xfrm_policy *pol, *ret;
857 struct hlist_head *chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Herbert Xub5505c62007-05-14 02:15:47 -0700859 *err = -ENOENT;
860 if (xfrm_policy_id2dir(id) != dir)
861 return NULL;
862
Eric Parisef41aaa2007-03-07 15:37:58 -0800863 *err = 0;
Florian Westphal9d0380d2016-08-11 15:17:59 +0200864 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Alexey Dobriyan8d1211a2008-11-25 17:34:20 -0800865 chain = net->xfrm.policy_byidx + idx_hash(net, id);
David S. Miller2518c7c2006-08-24 04:45:07 -0700866 ret = NULL;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800867 hlist_for_each_entry(pol, chain, byidx) {
Jamal Hadi Salim34f8d882010-02-22 11:32:58 +0000868 if (pol->type == type && pol->index == id &&
Steffen Klassert7e652642018-06-12 14:07:07 +0200869 pol->if_id == if_id &&
Jamal Hadi Salim34f8d882010-02-22 11:32:58 +0000870 (mark & pol->mark.m) == pol->mark.v) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 xfrm_pol_hold(pol);
David S. Miller2518c7c2006-08-24 04:45:07 -0700872 if (delete) {
Paul Moore03e1ad72008-04-12 19:07:52 -0700873 *err = security_xfrm_policy_delete(
874 pol->security);
Eric Parisef41aaa2007-03-07 15:37:58 -0800875 if (*err) {
Florian Westphal9d0380d2016-08-11 15:17:59 +0200876 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Eric Parisef41aaa2007-03-07 15:37:58 -0800877 return pol;
878 }
Wei Yongjun29fa0b302008-12-03 00:33:09 -0800879 __xfrm_policy_unlink(pol, dir);
David S. Miller2518c7c2006-08-24 04:45:07 -0700880 }
881 ret = pol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 break;
883 }
884 }
Florian Westphal9d0380d2016-08-11 15:17:59 +0200885 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000887 if (ret && delete)
David S. Miller2518c7c2006-08-24 04:45:07 -0700888 xfrm_policy_kill(ret);
David S. Miller2518c7c2006-08-24 04:45:07 -0700889 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890}
891EXPORT_SYMBOL(xfrm_policy_byid);
892
Joy Latten4aa2e622007-06-04 19:05:57 -0400893#ifdef CONFIG_SECURITY_NETWORK_XFRM
894static inline int
Tetsuo Handa2e710292014-04-22 21:48:30 +0900895xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Joy Latten4aa2e622007-06-04 19:05:57 -0400897 int dir, err = 0;
898
899 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
900 struct xfrm_policy *pol;
Joy Latten4aa2e622007-06-04 19:05:57 -0400901 int i;
902
Sasha Levinb67bfe02013-02-27 17:06:00 -0800903 hlist_for_each_entry(pol,
Alexey Dobriyan33ffbbd2008-11-25 17:33:32 -0800904 &net->xfrm.policy_inexact[dir], bydst) {
Joy Latten4aa2e622007-06-04 19:05:57 -0400905 if (pol->type != type)
906 continue;
Paul Moore03e1ad72008-04-12 19:07:52 -0700907 err = security_xfrm_policy_delete(pol->security);
Joy Latten4aa2e622007-06-04 19:05:57 -0400908 if (err) {
Tetsuo Handa2e710292014-04-22 21:48:30 +0900909 xfrm_audit_policy_delete(pol, 0, task_valid);
Joy Latten4aa2e622007-06-04 19:05:57 -0400910 return err;
911 }
YOSHIFUJI Hideaki7dc12d62007-07-19 10:45:15 +0900912 }
Alexey Dobriyan33ffbbd2008-11-25 17:33:32 -0800913 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800914 hlist_for_each_entry(pol,
Alexey Dobriyan33ffbbd2008-11-25 17:33:32 -0800915 net->xfrm.policy_bydst[dir].table + i,
Joy Latten4aa2e622007-06-04 19:05:57 -0400916 bydst) {
917 if (pol->type != type)
918 continue;
Paul Moore03e1ad72008-04-12 19:07:52 -0700919 err = security_xfrm_policy_delete(
920 pol->security);
Joy Latten4aa2e622007-06-04 19:05:57 -0400921 if (err) {
Joy Lattenab5f5e82007-09-17 11:51:22 -0700922 xfrm_audit_policy_delete(pol, 0,
Tetsuo Handa2e710292014-04-22 21:48:30 +0900923 task_valid);
Joy Latten4aa2e622007-06-04 19:05:57 -0400924 return err;
925 }
926 }
927 }
928 }
929 return err;
930}
931#else
932static inline int
Tetsuo Handa2e710292014-04-22 21:48:30 +0900933xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
Joy Latten4aa2e622007-06-04 19:05:57 -0400934{
935 return 0;
936}
937#endif
938
Tetsuo Handa2e710292014-04-22 21:48:30 +0900939int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
Joy Latten4aa2e622007-06-04 19:05:57 -0400940{
Jamal Hadi Salim2f1eb652010-02-19 02:00:42 +0000941 int dir, err = 0, cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Florian Westphal9d0380d2016-08-11 15:17:59 +0200943 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Joy Latten4aa2e622007-06-04 19:05:57 -0400944
Tetsuo Handa2e710292014-04-22 21:48:30 +0900945 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
Joy Latten4aa2e622007-06-04 19:05:57 -0400946 if (err)
947 goto out;
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700950 struct xfrm_policy *pol;
Wei Yongjun29fa0b302008-12-03 00:33:09 -0800951 int i;
David S. Miller2518c7c2006-08-24 04:45:07 -0700952
953 again1:
Sasha Levinb67bfe02013-02-27 17:06:00 -0800954 hlist_for_each_entry(pol,
Alexey Dobriyan33ffbbd2008-11-25 17:33:32 -0800955 &net->xfrm.policy_inexact[dir], bydst) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700956 if (pol->type != type)
957 continue;
Timo Teräsea2dea92010-03-31 00:17:05 +0000958 __xfrm_policy_unlink(pol, dir);
Florian Westphal9d0380d2016-08-11 15:17:59 +0200959 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Timo Teräsea2dea92010-03-31 00:17:05 +0000960 cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
Tetsuo Handa2e710292014-04-22 21:48:30 +0900962 xfrm_audit_policy_delete(pol, 1, task_valid);
Joy Latten161a09e2006-11-27 13:11:54 -0600963
David S. Miller2518c7c2006-08-24 04:45:07 -0700964 xfrm_policy_kill(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Steffen Klassert4141b362016-08-24 13:08:40 +0200966 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
David S. Miller2518c7c2006-08-24 04:45:07 -0700967 goto again1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 }
David S. Miller2518c7c2006-08-24 04:45:07 -0700969
Alexey Dobriyan33ffbbd2008-11-25 17:33:32 -0800970 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
David S. Miller2518c7c2006-08-24 04:45:07 -0700971 again2:
Sasha Levinb67bfe02013-02-27 17:06:00 -0800972 hlist_for_each_entry(pol,
Alexey Dobriyan33ffbbd2008-11-25 17:33:32 -0800973 net->xfrm.policy_bydst[dir].table + i,
David S. Miller2518c7c2006-08-24 04:45:07 -0700974 bydst) {
975 if (pol->type != type)
976 continue;
Timo Teräsea2dea92010-03-31 00:17:05 +0000977 __xfrm_policy_unlink(pol, dir);
Florian Westphal9d0380d2016-08-11 15:17:59 +0200978 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Timo Teräsea2dea92010-03-31 00:17:05 +0000979 cnt++;
David S. Miller2518c7c2006-08-24 04:45:07 -0700980
Tetsuo Handa2e710292014-04-22 21:48:30 +0900981 xfrm_audit_policy_delete(pol, 1, task_valid);
David S. Miller2518c7c2006-08-24 04:45:07 -0700982 xfrm_policy_kill(pol);
983
Florian Westphal9d0380d2016-08-11 15:17:59 +0200984 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
David S. Miller2518c7c2006-08-24 04:45:07 -0700985 goto again2;
986 }
987 }
988
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 }
Jamal Hadi Salim2f1eb652010-02-19 02:00:42 +0000990 if (!cnt)
991 err = -ESRCH;
Joy Latten4aa2e622007-06-04 19:05:57 -0400992out:
Florian Westphal9d0380d2016-08-11 15:17:59 +0200993 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Joy Latten4aa2e622007-06-04 19:05:57 -0400994 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996EXPORT_SYMBOL(xfrm_policy_flush);
997
Alexey Dobriyancdcbca72008-11-25 17:34:49 -0800998int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
Timo Teras4c563f72008-02-28 21:31:08 -0800999 int (*func)(struct xfrm_policy *, int, int, void*),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 void *data)
1001{
Herbert Xu12a169e2008-10-01 07:03:24 -07001002 struct xfrm_policy *pol;
1003 struct xfrm_policy_walk_entry *x;
Timo Teras4c563f72008-02-28 21:31:08 -08001004 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Timo Teras4c563f72008-02-28 21:31:08 -08001006 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1007 walk->type != XFRM_POLICY_TYPE_ANY)
1008 return -EINVAL;
1009
Herbert Xu12a169e2008-10-01 07:03:24 -07001010 if (list_empty(&walk->walk.all) && walk->seq != 0)
Timo Teras4c563f72008-02-28 21:31:08 -08001011 return 0;
1012
Florian Westphal9d0380d2016-08-11 15:17:59 +02001013 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Herbert Xu12a169e2008-10-01 07:03:24 -07001014 if (list_empty(&walk->walk.all))
Alexey Dobriyancdcbca72008-11-25 17:34:49 -08001015 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
Herbert Xu12a169e2008-10-01 07:03:24 -07001016 else
Li RongQing80077702015-04-22 17:09:54 +08001017 x = list_first_entry(&walk->walk.all,
1018 struct xfrm_policy_walk_entry, all);
1019
Alexey Dobriyancdcbca72008-11-25 17:34:49 -08001020 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
Herbert Xu12a169e2008-10-01 07:03:24 -07001021 if (x->dead)
Timo Teras4c563f72008-02-28 21:31:08 -08001022 continue;
Herbert Xu12a169e2008-10-01 07:03:24 -07001023 pol = container_of(x, struct xfrm_policy, walk);
1024 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1025 walk->type != pol->type)
1026 continue;
1027 error = func(pol, xfrm_policy_id2dir(pol->index),
1028 walk->seq, data);
1029 if (error) {
1030 list_move_tail(&walk->walk.all, &x->all);
1031 goto out;
Timo Teras4c563f72008-02-28 21:31:08 -08001032 }
Herbert Xu12a169e2008-10-01 07:03:24 -07001033 walk->seq++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 }
Herbert Xu12a169e2008-10-01 07:03:24 -07001035 if (walk->seq == 0) {
Jamal Hadi Salimbaf5d742006-12-04 20:02:37 -08001036 error = -ENOENT;
1037 goto out;
1038 }
Herbert Xu12a169e2008-10-01 07:03:24 -07001039 list_del_init(&walk->walk.all);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040out:
Florian Westphal9d0380d2016-08-11 15:17:59 +02001041 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return error;
1043}
1044EXPORT_SYMBOL(xfrm_policy_walk);
1045
Herbert Xu12a169e2008-10-01 07:03:24 -07001046void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1047{
1048 INIT_LIST_HEAD(&walk->walk.all);
1049 walk->walk.dead = 1;
1050 walk->type = type;
1051 walk->seq = 0;
1052}
1053EXPORT_SYMBOL(xfrm_policy_walk_init);
1054
Fan Du283bc9f2013-11-07 17:47:50 +08001055void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
Herbert Xu12a169e2008-10-01 07:03:24 -07001056{
1057 if (list_empty(&walk->walk.all))
1058 return;
1059
Florian Westphal9d0380d2016-08-11 15:17:59 +02001060 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
Herbert Xu12a169e2008-10-01 07:03:24 -07001061 list_del(&walk->walk.all);
Florian Westphal9d0380d2016-08-11 15:17:59 +02001062 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Herbert Xu12a169e2008-10-01 07:03:24 -07001063}
1064EXPORT_SYMBOL(xfrm_policy_walk_done);
1065
James Morris134b0fc2006-10-05 15:42:27 -05001066/*
1067 * Find policy to apply to this flow.
1068 *
1069 * Returns 0 if policy found, else an -errno.
1070 */
David S. Millerf299d552011-02-24 01:23:30 -05001071static int xfrm_policy_match(const struct xfrm_policy *pol,
1072 const struct flowi *fl,
David S. Miller2518c7c2006-08-24 04:45:07 -07001073 u8 type, u16 family, int dir)
1074{
David S. Millerf299d552011-02-24 01:23:30 -05001075 const struct xfrm_selector *sel = &pol->selector;
David S. Millerbc9b35a2012-05-15 15:04:57 -04001076 int ret = -ESRCH;
1077 bool match;
David S. Miller2518c7c2006-08-24 04:45:07 -07001078
1079 if (pol->family != family ||
Steffen Klassert7e652642018-06-12 14:07:07 +02001080 pol->if_id != fl->flowi_xfrm.if_id ||
David S. Miller1d28f422011-03-12 00:29:39 -05001081 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
David S. Miller2518c7c2006-08-24 04:45:07 -07001082 pol->type != type)
James Morris134b0fc2006-10-05 15:42:27 -05001083 return ret;
David S. Miller2518c7c2006-08-24 04:45:07 -07001084
1085 match = xfrm_selector_match(sel, fl, family);
James Morris134b0fc2006-10-05 15:42:27 -05001086 if (match)
David S. Miller1d28f422011-03-12 00:29:39 -05001087 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
Paul Moore03e1ad72008-04-12 19:07:52 -07001088 dir);
David S. Miller2518c7c2006-08-24 04:45:07 -07001089
James Morris134b0fc2006-10-05 15:42:27 -05001090 return ret;
David S. Miller2518c7c2006-08-24 04:45:07 -07001091}
1092
Alexey Dobriyan52479b62008-11-25 17:35:18 -08001093static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
David S. Miller062cdb42011-02-22 18:31:08 -08001094 const struct flowi *fl,
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001095 u16 family, u8 dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
James Morris134b0fc2006-10-05 15:42:27 -05001097 int err;
David S. Miller2518c7c2006-08-24 04:45:07 -07001098 struct xfrm_policy *pol, *ret;
David S. Miller0b597e72011-02-24 01:22:48 -05001099 const xfrm_address_t *daddr, *saddr;
David S. Miller2518c7c2006-08-24 04:45:07 -07001100 struct hlist_head *chain;
Florian Westphal30846092016-08-11 15:17:54 +02001101 unsigned int sequence;
1102 u32 priority;
David S. Miller2518c7c2006-08-24 04:45:07 -07001103
1104 daddr = xfrm_flowi_daddr(fl, family);
1105 saddr = xfrm_flowi_saddr(fl, family);
1106 if (unlikely(!daddr || !saddr))
1107 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Florian Westphala7c442472016-08-11 15:17:56 +02001109 rcu_read_lock();
Florian Westphal30846092016-08-11 15:17:54 +02001110 retry:
1111 do {
1112 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1113 chain = policy_hash_direct(net, daddr, saddr, family, dir);
1114 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1115
1116 priority = ~0U;
David S. Miller2518c7c2006-08-24 04:45:07 -07001117 ret = NULL;
Florian Westphala5eefc12016-08-11 15:17:52 +02001118 hlist_for_each_entry_rcu(pol, chain, bydst) {
James Morris134b0fc2006-10-05 15:42:27 -05001119 err = xfrm_policy_match(pol, fl, type, family, dir);
1120 if (err) {
1121 if (err == -ESRCH)
1122 continue;
1123 else {
1124 ret = ERR_PTR(err);
1125 goto fail;
1126 }
1127 } else {
David S. Milleracba48e2006-08-25 15:46:46 -07001128 ret = pol;
1129 priority = ret->priority;
1130 break;
1131 }
1132 }
Alexey Dobriyan52479b62008-11-25 17:35:18 -08001133 chain = &net->xfrm.policy_inexact[dir];
Florian Westphala5eefc12016-08-11 15:17:52 +02001134 hlist_for_each_entry_rcu(pol, chain, bydst) {
Li RongQing8faf4912015-05-14 11:16:59 +08001135 if ((pol->priority >= priority) && ret)
1136 break;
1137
James Morris134b0fc2006-10-05 15:42:27 -05001138 err = xfrm_policy_match(pol, fl, type, family, dir);
1139 if (err) {
1140 if (err == -ESRCH)
1141 continue;
1142 else {
1143 ret = ERR_PTR(err);
1144 goto fail;
1145 }
Li RongQing8faf4912015-05-14 11:16:59 +08001146 } else {
David S. Miller2518c7c2006-08-24 04:45:07 -07001147 ret = pol;
1148 break;
1149 }
1150 }
Li RongQing586f2eb2015-04-30 17:13:41 +08001151
Florian Westphal30846092016-08-11 15:17:54 +02001152 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1153 goto retry;
1154
Florian Westphale37cc8a2016-08-11 15:17:55 +02001155 if (ret && !xfrm_pol_hold_rcu(ret))
1156 goto retry;
James Morris134b0fc2006-10-05 15:42:27 -05001157fail:
Florian Westphala7c442472016-08-11 15:17:56 +02001158 rcu_read_unlock();
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001159
David S. Miller2518c7c2006-08-24 04:45:07 -07001160 return ret;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001161}
1162
Timo Teräs80c802f2010-04-07 00:30:05 +00001163static struct xfrm_policy *
Florian Westphal86dc8ee2017-07-17 13:57:24 +02001164xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
Timo Teräs80c802f2010-04-07 00:30:05 +00001165{
1166#ifdef CONFIG_XFRM_SUB_POLICY
1167 struct xfrm_policy *pol;
1168
1169 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1170 if (pol != NULL)
1171 return pol;
1172#endif
1173 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1174}
1175
Eric Dumazet6f9c9612015-09-25 07:39:10 -07001176static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
Steffen Klassert4c86d772017-02-14 07:43:56 +01001177 const struct flowi *fl, u16 family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178{
1179 struct xfrm_policy *pol;
1180
Eric Dumazetd188ba82015-12-08 07:22:02 -08001181 rcu_read_lock();
Florian Westphalae337862016-08-11 15:17:57 +02001182 again:
Eric Dumazetd188ba82015-12-08 07:22:02 -08001183 pol = rcu_dereference(sk->sk_policy[dir]);
1184 if (pol != NULL) {
Steffen Klassertddc47e42017-11-29 06:53:55 +01001185 bool match;
YOSHIFUJI Hideakia716c112007-02-09 23:25:29 +09001186 int err = 0;
Trent Jaegerdf718372005-12-13 23:12:27 -08001187
Steffen Klassertddc47e42017-11-29 06:53:55 +01001188 if (pol->family != family) {
1189 pol = NULL;
1190 goto out;
1191 }
1192
1193 match = xfrm_selector_match(&pol->selector, fl, family);
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05001194 if (match) {
Steffen Klassert7e652642018-06-12 14:07:07 +02001195 if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
1196 pol->if_id != fl->flowi_xfrm.if_id) {
Jamal Hadi Salim34f8d882010-02-22 11:32:58 +00001197 pol = NULL;
1198 goto out;
1199 }
Paul Moore03e1ad72008-04-12 19:07:52 -07001200 err = security_xfrm_policy_lookup(pol->security,
David S. Miller1d28f422011-03-12 00:29:39 -05001201 fl->flowi_secid,
Florian Westphalaff669b2017-07-17 13:57:23 +02001202 dir);
Florian Westphal330e8322016-11-17 13:21:46 +01001203 if (!err) {
1204 if (!xfrm_pol_hold_rcu(pol))
1205 goto again;
1206 } else if (err == -ESRCH) {
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05001207 pol = NULL;
Florian Westphal330e8322016-11-17 13:21:46 +01001208 } else {
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05001209 pol = ERR_PTR(err);
Florian Westphal330e8322016-11-17 13:21:46 +01001210 }
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05001211 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 pol = NULL;
1213 }
Jamal Hadi Salim34f8d882010-02-22 11:32:58 +00001214out:
Eric Dumazetd188ba82015-12-08 07:22:02 -08001215 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 return pol;
1217}
1218
1219static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1220{
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001221 struct net *net = xp_net(pol);
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001222
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001223 list_add(&pol->walk.all, &net->xfrm.policy_all);
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001224 net->xfrm.policy_count[dir]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 xfrm_pol_hold(pol);
1226}
1227
1228static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1229 int dir)
1230{
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001231 struct net *net = xp_net(pol);
1232
Herbert Xu53c2e282014-11-13 17:09:49 +08001233 if (list_empty(&pol->walk.all))
David S. Miller2518c7c2006-08-24 04:45:07 -07001234 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Herbert Xu53c2e282014-11-13 17:09:49 +08001236 /* Socket policies are not hashed. */
1237 if (!hlist_unhashed(&pol->bydst)) {
Florian Westphala5eefc12016-08-11 15:17:52 +02001238 hlist_del_rcu(&pol->bydst);
Herbert Xu53c2e282014-11-13 17:09:49 +08001239 hlist_del(&pol->byidx);
1240 }
1241
1242 list_del_init(&pol->walk.all);
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001243 net->xfrm.policy_count[dir]--;
David S. Miller2518c7c2006-08-24 04:45:07 -07001244
1245 return pol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246}
1247
Herbert Xu53c2e282014-11-13 17:09:49 +08001248static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1249{
1250 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1251}
1252
1253static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1254{
1255 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1256}
1257
Herbert Xu4666faa2005-06-18 22:43:22 -07001258int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259{
Fan Du283bc9f2013-11-07 17:47:50 +08001260 struct net *net = xp_net(pol);
1261
Florian Westphal9d0380d2016-08-11 15:17:59 +02001262 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 pol = __xfrm_policy_unlink(pol, dir);
Florian Westphal9d0380d2016-08-11 15:17:59 +02001264 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 if (pol) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 xfrm_policy_kill(pol);
Herbert Xu4666faa2005-06-18 22:43:22 -07001267 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
Herbert Xu4666faa2005-06-18 22:43:22 -07001269 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270}
David S. Millera70fcb02006-03-20 19:18:52 -08001271EXPORT_SYMBOL(xfrm_policy_delete);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
1273int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1274{
Lorenzo Colittibe8f8282017-11-20 19:26:02 +09001275 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 struct xfrm_policy *old_pol;
1277
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001278#ifdef CONFIG_XFRM_SUB_POLICY
1279 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1280 return -EINVAL;
1281#endif
1282
Florian Westphal9d0380d2016-08-11 15:17:59 +02001283 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Eric Dumazetd188ba82015-12-08 07:22:02 -08001284 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1285 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 if (pol) {
James Morris9d729f72007-03-04 16:12:44 -08001287 pol->curlft.add_time = get_seconds();
Fan Due682adf02013-11-07 17:47:48 +08001288 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
Herbert Xu53c2e282014-11-13 17:09:49 +08001289 xfrm_sk_policy_link(pol, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
Eric Dumazetd188ba82015-12-08 07:22:02 -08001291 rcu_assign_pointer(sk->sk_policy[dir], pol);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001292 if (old_pol) {
1293 if (pol)
1294 xfrm_policy_requeue(old_pol, pol);
1295
Timo Teräsea2dea92010-03-31 00:17:05 +00001296 /* Unlinking succeeds always. This is the only function
1297 * allowed to delete or replace socket policy.
1298 */
Herbert Xu53c2e282014-11-13 17:09:49 +08001299 xfrm_sk_policy_unlink(old_pol, dir);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001300 }
Florian Westphal9d0380d2016-08-11 15:17:59 +02001301 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
1303 if (old_pol) {
1304 xfrm_policy_kill(old_pol);
1305 }
1306 return 0;
1307}
1308
David S. Millerd3e40a92011-02-24 01:25:41 -05001309static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Alexey Dobriyan0331b1f2008-11-25 17:21:45 -08001311 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
Fan Du283bc9f2013-11-07 17:47:50 +08001312 struct net *net = xp_net(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 if (newp) {
1315 newp->selector = old->selector;
Paul Moore03e1ad72008-04-12 19:07:52 -07001316 if (security_xfrm_policy_clone(old->security,
1317 &newp->security)) {
Trent Jaegerdf718372005-12-13 23:12:27 -08001318 kfree(newp);
1319 return NULL; /* ENOMEM */
1320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 newp->lft = old->lft;
1322 newp->curlft = old->curlft;
Jamal Hadi Salimfb977e22010-02-23 15:09:53 -08001323 newp->mark = old->mark;
Steffen Klassert7e652642018-06-12 14:07:07 +02001324 newp->if_id = old->if_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 newp->action = old->action;
1326 newp->flags = old->flags;
1327 newp->xfrm_nr = old->xfrm_nr;
1328 newp->index = old->index;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001329 newp->type = old->type;
Herbert Xu0e74aa12017-11-10 14:14:06 +11001330 newp->family = old->family;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 memcpy(newp->xfrm_vec, old->xfrm_vec,
1332 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
Florian Westphal9d0380d2016-08-11 15:17:59 +02001333 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Herbert Xu53c2e282014-11-13 17:09:49 +08001334 xfrm_sk_policy_link(newp, dir);
Florian Westphal9d0380d2016-08-11 15:17:59 +02001335 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 xfrm_pol_put(newp);
1337 }
1338 return newp;
1339}
1340
Eric Dumazetd188ba82015-12-08 07:22:02 -08001341int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
Eric Dumazetd188ba82015-12-08 07:22:02 -08001343 const struct xfrm_policy *p;
1344 struct xfrm_policy *np;
1345 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Eric Dumazetd188ba82015-12-08 07:22:02 -08001347 rcu_read_lock();
1348 for (i = 0; i < 2; i++) {
1349 p = rcu_dereference(osk->sk_policy[i]);
1350 if (p) {
1351 np = clone_policy(p, i);
1352 if (unlikely(!np)) {
1353 ret = -ENOMEM;
1354 break;
1355 }
1356 rcu_assign_pointer(sk->sk_policy[i], np);
1357 }
1358 }
1359 rcu_read_unlock();
1360 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362
Patrick McHardya1e59ab2006-09-19 12:57:34 -07001363static int
David Ahern42a7b322015-08-10 16:58:11 -06001364xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
Lorenzo Colitti077fbac2017-08-11 02:11:33 +09001365 xfrm_address_t *remote, unsigned short family, u32 mark)
Patrick McHardya1e59ab2006-09-19 12:57:34 -07001366{
1367 int err;
Florian Westphal37b10382017-02-07 15:00:19 +01001368 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
Patrick McHardya1e59ab2006-09-19 12:57:34 -07001369
1370 if (unlikely(afinfo == NULL))
1371 return -EINVAL;
Lorenzo Colitti077fbac2017-08-11 02:11:33 +09001372 err = afinfo->get_saddr(net, oif, local, remote, mark);
Florian Westphalbdba9fe2017-02-07 15:00:18 +01001373 rcu_read_unlock();
Patrick McHardya1e59ab2006-09-19 12:57:34 -07001374 return err;
1375}
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377/* Resolve list of templates for the flow, given policy. */
1378
1379static int
David S. Millera6c2e612011-02-22 18:35:39 -08001380xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1381 struct xfrm_state **xfrm, unsigned short family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382{
Alexey Dobriyanfbda33b2008-11-25 17:56:49 -08001383 struct net *net = xp_net(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 int nx;
1385 int i, error;
Steffen Klassert94802152017-11-15 06:40:57 +01001386 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1387 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
Patrick McHardya1e59ab2006-09-19 12:57:34 -07001388 xfrm_address_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
Weilong Chen9b7a7872013-12-24 09:43:46 +08001390 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 struct xfrm_state *x;
Steffen Klassert94802152017-11-15 06:40:57 +01001392 xfrm_address_t *remote = daddr;
1393 xfrm_address_t *local = saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1395
Steffen Klassert94802152017-11-15 06:40:57 +01001396 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1397 tmpl->mode == XFRM_MODE_BEET) {
1398 remote = &tmpl->id.daddr;
1399 local = &tmpl->saddr;
1400 if (xfrm_addr_any(local, tmpl->encap_family)) {
1401 error = xfrm_get_saddr(net, fl->flowi_oif,
1402 &tmp, remote,
1403 tmpl->encap_family, 0);
1404 if (error)
1405 goto fail;
1406 local = &tmp;
1407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 }
1409
1410 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1411
1412 if (x && x->km.state == XFRM_STATE_VALID) {
1413 xfrm[nx++] = x;
Steffen Klassert94802152017-11-15 06:40:57 +01001414 daddr = remote;
1415 saddr = local;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 continue;
1417 }
1418 if (x) {
1419 error = (x->km.state == XFRM_STATE_ERROR ?
1420 -EINVAL : -EAGAIN);
1421 xfrm_state_put(x);
Weilong Chen420545692013-12-24 09:43:49 +08001422 } else if (error == -ESRCH) {
fernando@oss.ntt.coa43222662008-10-23 04:27:19 +00001423 error = -EAGAIN;
Weilong Chen420545692013-12-24 09:43:49 +08001424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
1426 if (!tmpl->optional)
1427 goto fail;
1428 }
1429 return nx;
1430
1431fail:
Weilong Chen9b7a7872013-12-24 09:43:46 +08001432 for (nx--; nx >= 0; nx--)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 xfrm_state_put(xfrm[nx]);
1434 return error;
1435}
1436
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001437static int
David S. Millera6c2e612011-02-22 18:35:39 -08001438xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1439 struct xfrm_state **xfrm, unsigned short family)
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001440{
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001441 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1442 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001443 int cnx = 0;
1444 int error;
1445 int ret;
1446 int i;
1447
1448 for (i = 0; i < npols; i++) {
1449 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1450 error = -ENOBUFS;
1451 goto fail;
1452 }
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001453
1454 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001455 if (ret < 0) {
1456 error = ret;
1457 goto fail;
1458 } else
1459 cnx += ret;
1460 }
1461
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001462 /* found states are sorted for outbound processing */
1463 if (npols > 1)
1464 xfrm_state_sort(xfrm, tpp, cnx, family);
1465
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001466 return cnx;
1467
1468 fail:
Weilong Chen9b7a7872013-12-24 09:43:46 +08001469 for (cnx--; cnx >= 0; cnx--)
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001470 xfrm_state_put(tpp[cnx]);
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07001471 return error;
1472
1473}
1474
Florian Westphalf5e2bb42017-02-07 15:00:14 +01001475static int xfrm_get_tos(const struct flowi *fl, int family)
Herbert Xu25ee3282007-12-11 09:32:34 -08001476{
Florian Westphal37b10382017-02-07 15:00:19 +01001477 const struct xfrm_policy_afinfo *afinfo;
Xin Long143a4452018-02-17 15:16:22 +08001478 int tos;
Herbert Xu25ee3282007-12-11 09:32:34 -08001479
Florian Westphalf5e2bb42017-02-07 15:00:14 +01001480 afinfo = xfrm_policy_get_afinfo(family);
Xin Long143a4452018-02-17 15:16:22 +08001481 if (!afinfo)
1482 return 0;
1483
1484 tos = afinfo->get_tos(fl);
Herbert Xu25ee3282007-12-11 09:32:34 -08001485
Florian Westphalbdba9fe2017-02-07 15:00:18 +01001486 rcu_read_unlock();
Herbert Xu25ee3282007-12-11 09:32:34 -08001487
1488 return tos;
1489}
1490
Alexey Dobriyand7c75442010-01-24 22:47:53 -08001491static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
Herbert Xu25ee3282007-12-11 09:32:34 -08001492{
Florian Westphal37b10382017-02-07 15:00:19 +01001493 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
Alexey Dobriyand7c75442010-01-24 22:47:53 -08001494 struct dst_ops *dst_ops;
Herbert Xu25ee3282007-12-11 09:32:34 -08001495 struct xfrm_dst *xdst;
1496
1497 if (!afinfo)
1498 return ERR_PTR(-EINVAL);
1499
Alexey Dobriyand7c75442010-01-24 22:47:53 -08001500 switch (family) {
1501 case AF_INET:
1502 dst_ops = &net->xfrm.xfrm4_dst_ops;
1503 break;
Eric Dumazetdfd56b82011-12-10 09:48:31 +00001504#if IS_ENABLED(CONFIG_IPV6)
Alexey Dobriyand7c75442010-01-24 22:47:53 -08001505 case AF_INET6:
1506 dst_ops = &net->xfrm.xfrm6_dst_ops;
1507 break;
1508#endif
1509 default:
1510 BUG();
1511 }
Wei Wangb2a9c0e2017-06-17 10:42:41 -07001512 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
Herbert Xu25ee3282007-12-11 09:32:34 -08001513
Madalin Bucurd4cae562011-09-26 07:04:36 +00001514 if (likely(xdst)) {
Steffen Klassert141e3692012-07-05 23:39:34 +00001515 struct dst_entry *dst = &xdst->u.dst;
1516
1517 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
Madalin Bucurd4cae562011-09-26 07:04:36 +00001518 } else
Hiroaki SHIMODA0b150932011-02-10 23:08:33 -08001519 xdst = ERR_PTR(-ENOBUFS);
Timo Teräs80c802f2010-04-07 00:30:05 +00001520
Florian Westphalbdba9fe2017-02-07 15:00:18 +01001521 rcu_read_unlock();
Madalin Bucurd4cae562011-09-26 07:04:36 +00001522
Herbert Xu25ee3282007-12-11 09:32:34 -08001523 return xdst;
1524}
1525
Masahide NAKAMURAa1b05142007-12-20 20:41:12 -08001526static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1527 int nfheader_len)
1528{
Florian Westphal37b10382017-02-07 15:00:19 +01001529 const struct xfrm_policy_afinfo *afinfo =
Masahide NAKAMURAa1b05142007-12-20 20:41:12 -08001530 xfrm_policy_get_afinfo(dst->ops->family);
1531 int err;
1532
1533 if (!afinfo)
1534 return -EINVAL;
1535
1536 err = afinfo->init_path(path, dst, nfheader_len);
1537
Florian Westphalbdba9fe2017-02-07 15:00:18 +01001538 rcu_read_unlock();
Masahide NAKAMURAa1b05142007-12-20 20:41:12 -08001539
1540 return err;
1541}
1542
Herbert Xu87c1e122010-03-02 02:51:56 +00001543static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
David S. Miller0c7b3ee2011-02-22 17:48:57 -08001544 const struct flowi *fl)
Herbert Xu25ee3282007-12-11 09:32:34 -08001545{
Florian Westphal37b10382017-02-07 15:00:19 +01001546 const struct xfrm_policy_afinfo *afinfo =
Herbert Xu25ee3282007-12-11 09:32:34 -08001547 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1548 int err;
1549
1550 if (!afinfo)
1551 return -EINVAL;
1552
Herbert Xu87c1e122010-03-02 02:51:56 +00001553 err = afinfo->fill_dst(xdst, dev, fl);
Herbert Xu25ee3282007-12-11 09:32:34 -08001554
Florian Westphalbdba9fe2017-02-07 15:00:18 +01001555 rcu_read_unlock();
Herbert Xu25ee3282007-12-11 09:32:34 -08001556
1557 return err;
1558}
1559
Timo Teräs80c802f2010-04-07 00:30:05 +00001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1562 * all the metrics... Shortly, bundle a bundle.
1563 */
1564
Herbert Xu25ee3282007-12-11 09:32:34 -08001565static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
David Miller54920932017-11-28 15:41:01 -05001566 struct xfrm_state **xfrm,
1567 struct xfrm_dst **bundle,
1568 int nx,
David S. Miller98313ad2011-02-22 18:36:50 -08001569 const struct flowi *fl,
Herbert Xu25ee3282007-12-11 09:32:34 -08001570 struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Alexey Dobriyand7c75442010-01-24 22:47:53 -08001572 struct net *net = xp_net(policy);
Herbert Xu25ee3282007-12-11 09:32:34 -08001573 unsigned long now = jiffies;
1574 struct net_device *dev;
Steffen Klassert43a4dea2011-05-09 19:36:38 +00001575 struct xfrm_mode *inner_mode;
David Miller45b018be2017-11-28 15:40:28 -05001576 struct xfrm_dst *xdst_prev = NULL;
1577 struct xfrm_dst *xdst0 = NULL;
Herbert Xu25ee3282007-12-11 09:32:34 -08001578 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 int err;
Herbert Xu25ee3282007-12-11 09:32:34 -08001580 int header_len = 0;
Masahide NAKAMURAa1b05142007-12-20 20:41:12 -08001581 int nfheader_len = 0;
Herbert Xu25ee3282007-12-11 09:32:34 -08001582 int trailer_len = 0;
1583 int tos;
1584 int family = policy->selector.family;
YOSHIFUJI Hideaki9bb182a2008-02-22 14:48:22 +09001585 xfrm_address_t saddr, daddr;
1586
1587 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
Herbert Xu25ee3282007-12-11 09:32:34 -08001588
1589 tos = xfrm_get_tos(fl, family);
Herbert Xu25ee3282007-12-11 09:32:34 -08001590
1591 dst_hold(dst);
1592
1593 for (; i < nx; i++) {
Alexey Dobriyand7c75442010-01-24 22:47:53 -08001594 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
Herbert Xu25ee3282007-12-11 09:32:34 -08001595 struct dst_entry *dst1 = &xdst->u.dst;
1596
1597 err = PTR_ERR(xdst);
1598 if (IS_ERR(xdst)) {
1599 dst_release(dst);
1600 goto put_states;
1601 }
1602
David Miller54920932017-11-28 15:41:01 -05001603 bundle[i] = xdst;
David Miller45b018be2017-11-28 15:40:28 -05001604 if (!xdst_prev)
1605 xdst0 = xdst;
David Miller10a7ef32017-10-10 20:59:38 -07001606 else
1607 /* Ref count is taken during xfrm_alloc_dst()
1608 * No need to do dst_clone() on dst1
1609 */
David Miller45b018be2017-11-28 15:40:28 -05001610 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
David Miller10a7ef32017-10-10 20:59:38 -07001611
Steffen Klassert43a4dea2011-05-09 19:36:38 +00001612 if (xfrm[i]->sel.family == AF_UNSPEC) {
1613 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1614 xfrm_af2proto(family));
1615 if (!inner_mode) {
1616 err = -EAFNOSUPPORT;
1617 dst_release(dst);
1618 goto put_states;
1619 }
1620 } else
1621 inner_mode = xfrm[i]->inner_mode;
1622
Herbert Xu25ee3282007-12-11 09:32:34 -08001623 xdst->route = dst;
David S. Millerdefb3512010-12-08 21:16:57 -08001624 dst_copy_metrics(dst1, dst);
Herbert Xu25ee3282007-12-11 09:32:34 -08001625
1626 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
Steffen Klassert9b42c1f2018-06-12 12:44:26 +02001627 __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1628
Herbert Xu25ee3282007-12-11 09:32:34 -08001629 family = xfrm[i]->props.family;
David Ahern42a7b322015-08-10 16:58:11 -06001630 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
Steffen Klassert9b42c1f2018-06-12 12:44:26 +02001631 &saddr, &daddr, family, mark);
Herbert Xu25ee3282007-12-11 09:32:34 -08001632 err = PTR_ERR(dst);
1633 if (IS_ERR(dst))
1634 goto put_states;
1635 } else
1636 dst_hold(dst);
1637
1638 dst1->xfrm = xfrm[i];
Timo Teräs80c802f2010-04-07 00:30:05 +00001639 xdst->xfrm_genid = xfrm[i]->genid;
Herbert Xu25ee3282007-12-11 09:32:34 -08001640
David S. Millerf5b0a872012-07-19 12:31:33 -07001641 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
Herbert Xu25ee3282007-12-11 09:32:34 -08001642 dst1->flags |= DST_HOST;
1643 dst1->lastuse = now;
1644
1645 dst1->input = dst_discard;
Steffen Klassert43a4dea2011-05-09 19:36:38 +00001646 dst1->output = inner_mode->afinfo->output;
Herbert Xu25ee3282007-12-11 09:32:34 -08001647
David Miller45b018be2017-11-28 15:40:28 -05001648 xdst_prev = xdst;
Herbert Xu25ee3282007-12-11 09:32:34 -08001649
1650 header_len += xfrm[i]->props.header_len;
Masahide NAKAMURAa1b05142007-12-20 20:41:12 -08001651 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1652 nfheader_len += xfrm[i]->props.header_len;
Herbert Xu25ee3282007-12-11 09:32:34 -08001653 trailer_len += xfrm[i]->props.trailer_len;
1654 }
1655
David Miller45b018be2017-11-28 15:40:28 -05001656 xfrm_dst_set_child(xdst_prev, dst);
David Miller0f6c4802017-11-28 15:40:46 -05001657 xdst0->path = dst;
Herbert Xu25ee3282007-12-11 09:32:34 -08001658
1659 err = -ENODEV;
1660 dev = dst->dev;
1661 if (!dev)
1662 goto free_dst;
1663
David Miller45b018be2017-11-28 15:40:28 -05001664 xfrm_init_path(xdst0, dst, nfheader_len);
David Miller54920932017-11-28 15:41:01 -05001665 xfrm_init_pmtu(bundle, nx);
Herbert Xu25ee3282007-12-11 09:32:34 -08001666
David Miller45b018be2017-11-28 15:40:28 -05001667 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
1668 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
1669 err = xfrm_fill_dst(xdst_prev, dev, fl);
Herbert Xu25ee3282007-12-11 09:32:34 -08001670 if (err)
1671 goto free_dst;
1672
David Miller45b018be2017-11-28 15:40:28 -05001673 xdst_prev->u.dst.header_len = header_len;
1674 xdst_prev->u.dst.trailer_len = trailer_len;
1675 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
1676 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
Herbert Xu25ee3282007-12-11 09:32:34 -08001677 }
1678
David Miller45b018be2017-11-28 15:40:28 -05001679 return &xdst0->u.dst;
Herbert Xu25ee3282007-12-11 09:32:34 -08001680
1681put_states:
1682 for (; i < nx; i++)
1683 xfrm_state_put(xfrm[i]);
1684free_dst:
David Miller45b018be2017-11-28 15:40:28 -05001685 if (xdst0)
1686 dst_release_immediate(&xdst0->u.dst);
Steffen Klassert38369f52018-05-31 09:45:18 +02001687
1688 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689}
1690
David S. Miller73ff93c2011-02-22 18:33:42 -08001691static int xfrm_expand_policies(const struct flowi *fl, u16 family,
Timo Teräs80c802f2010-04-07 00:30:05 +00001692 struct xfrm_policy **pols,
1693 int *num_pols, int *num_xfrms)
1694{
1695 int i;
1696
1697 if (*num_pols == 0 || !pols[0]) {
1698 *num_pols = 0;
1699 *num_xfrms = 0;
1700 return 0;
1701 }
1702 if (IS_ERR(pols[0]))
1703 return PTR_ERR(pols[0]);
1704
1705 *num_xfrms = pols[0]->xfrm_nr;
1706
1707#ifdef CONFIG_XFRM_SUB_POLICY
1708 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1709 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1710 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1711 XFRM_POLICY_TYPE_MAIN,
1712 fl, family,
1713 XFRM_POLICY_OUT);
1714 if (pols[1]) {
1715 if (IS_ERR(pols[1])) {
1716 xfrm_pols_put(pols, *num_pols);
1717 return PTR_ERR(pols[1]);
1718 }
Weilong Chen02d08922013-12-24 09:43:48 +08001719 (*num_pols)++;
Timo Teräs80c802f2010-04-07 00:30:05 +00001720 (*num_xfrms) += pols[1]->xfrm_nr;
1721 }
1722 }
1723#endif
1724 for (i = 0; i < *num_pols; i++) {
1725 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1726 *num_xfrms = -1;
1727 break;
1728 }
1729 }
1730
1731 return 0;
1732
1733}
1734
Florian Westphalec30d782017-07-17 13:57:27 +02001735static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old)
1736{
1737 this_cpu_write(xfrm_last_dst, xdst);
1738 if (old)
1739 dst_release(&old->u.dst);
1740}
1741
1742static void __xfrm_pcpu_work_fn(void)
1743{
1744 struct xfrm_dst *old;
1745
1746 old = this_cpu_read(xfrm_last_dst);
1747 if (old && !xfrm_bundle_ok(old))
1748 xfrm_last_dst_update(NULL, old);
1749}
1750
1751static void xfrm_pcpu_work_fn(struct work_struct *work)
1752{
1753 local_bh_disable();
1754 rcu_read_lock();
1755 __xfrm_pcpu_work_fn();
1756 rcu_read_unlock();
1757 local_bh_enable();
1758}
1759
1760void xfrm_policy_cache_flush(void)
1761{
1762 struct xfrm_dst *old;
Gustavo A. R. Silva415a1322018-03-05 15:49:59 -06001763 bool found = false;
Florian Westphalec30d782017-07-17 13:57:27 +02001764 int cpu;
1765
Florian Westphalb1bdcb52018-01-06 01:13:08 +01001766 might_sleep();
1767
Florian Westphalec30d782017-07-17 13:57:27 +02001768 local_bh_disable();
1769 rcu_read_lock();
1770 for_each_possible_cpu(cpu) {
1771 old = per_cpu(xfrm_last_dst, cpu);
1772 if (old && !xfrm_bundle_ok(old)) {
1773 if (smp_processor_id() == cpu) {
1774 __xfrm_pcpu_work_fn();
1775 continue;
1776 }
1777 found = true;
1778 break;
1779 }
1780 }
1781
1782 rcu_read_unlock();
1783 local_bh_enable();
1784
1785 if (!found)
1786 return;
1787
1788 get_online_cpus();
1789
1790 for_each_possible_cpu(cpu) {
1791 bool bundle_release;
1792
1793 rcu_read_lock();
1794 old = per_cpu(xfrm_last_dst, cpu);
1795 bundle_release = old && !xfrm_bundle_ok(old);
1796 rcu_read_unlock();
1797
1798 if (!bundle_release)
1799 continue;
1800
1801 if (cpu_online(cpu)) {
1802 schedule_work_on(cpu, &xfrm_pcpu_work[cpu]);
1803 continue;
1804 }
1805
1806 rcu_read_lock();
1807 old = per_cpu(xfrm_last_dst, cpu);
1808 if (old && !xfrm_bundle_ok(old)) {
1809 per_cpu(xfrm_last_dst, cpu) = NULL;
1810 dst_release(&old->u.dst);
1811 }
1812 rcu_read_unlock();
1813 }
1814
1815 put_online_cpus();
1816}
1817
Florian Westphalcf379662017-11-02 16:46:01 +01001818static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
1819 struct xfrm_state * const xfrm[],
1820 int num)
Florian Westphalec30d782017-07-17 13:57:27 +02001821{
Florian Westphalcf379662017-11-02 16:46:01 +01001822 const struct dst_entry *dst = &xdst->u.dst;
1823 int i;
Florian Westphalec30d782017-07-17 13:57:27 +02001824
Florian Westphalcf379662017-11-02 16:46:01 +01001825 if (xdst->num_xfrms != num)
1826 return false;
Florian Westphalec30d782017-07-17 13:57:27 +02001827
Florian Westphalcf379662017-11-02 16:46:01 +01001828 for (i = 0; i < num; i++) {
1829 if (!dst || dst->xfrm != xfrm[i])
1830 return false;
David Millerb92cf4a2017-11-28 15:40:22 -05001831 dst = xfrm_dst_child(dst);
Florian Westphalcf379662017-11-02 16:46:01 +01001832 }
Florian Westphalec30d782017-07-17 13:57:27 +02001833
Florian Westphalcf379662017-11-02 16:46:01 +01001834 return xfrm_bundle_ok(xdst);
Florian Westphalec30d782017-07-17 13:57:27 +02001835}
1836
Timo Teräs80c802f2010-04-07 00:30:05 +00001837static struct xfrm_dst *
1838xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
David S. Miller4ca2e682011-02-22 18:38:51 -08001839 const struct flowi *fl, u16 family,
Timo Teräs80c802f2010-04-07 00:30:05 +00001840 struct dst_entry *dst_orig)
1841{
1842 struct net *net = xp_net(pols[0]);
1843 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
David Miller54920932017-11-28 15:41:01 -05001844 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
Florian Westphalec30d782017-07-17 13:57:27 +02001845 struct xfrm_dst *xdst, *old;
Timo Teräs80c802f2010-04-07 00:30:05 +00001846 struct dst_entry *dst;
Timo Teräs80c802f2010-04-07 00:30:05 +00001847 int err;
1848
1849 /* Try to instantiate a bundle */
1850 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
Timo Teräsd809ec82010-07-12 21:29:42 +00001851 if (err <= 0) {
1852 if (err != 0 && err != -EAGAIN)
Timo Teräs80c802f2010-04-07 00:30:05 +00001853 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1854 return ERR_PTR(err);
1855 }
1856
Florian Westphalcf379662017-11-02 16:46:01 +01001857 xdst = this_cpu_read(xfrm_last_dst);
1858 if (xdst &&
1859 xdst->u.dst.dev == dst_orig->dev &&
1860 xdst->num_pols == num_pols &&
1861 memcmp(xdst->pols, pols,
1862 sizeof(struct xfrm_policy *) * num_pols) == 0 &&
1863 xfrm_xdst_can_reuse(xdst, xfrm, err)) {
1864 dst_hold(&xdst->u.dst);
Florian Westphald2950272017-12-11 18:23:09 +01001865 xfrm_pols_put(pols, num_pols);
Florian Westphalcf379662017-11-02 16:46:01 +01001866 while (err > 0)
1867 xfrm_state_put(xfrm[--err]);
1868 return xdst;
1869 }
1870
1871 old = xdst;
1872
David Miller54920932017-11-28 15:41:01 -05001873 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
Timo Teräs80c802f2010-04-07 00:30:05 +00001874 if (IS_ERR(dst)) {
1875 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1876 return ERR_CAST(dst);
1877 }
1878
1879 xdst = (struct xfrm_dst *)dst;
1880 xdst->num_xfrms = err;
Timo Teräs80c802f2010-04-07 00:30:05 +00001881 xdst->num_pols = num_pols;
Weilong Chen3e94c2d2013-12-24 09:43:47 +08001882 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
Timo Teräs80c802f2010-04-07 00:30:05 +00001883 xdst->policy_genid = atomic_read(&pols[0]->genid);
1884
Florian Westphalec30d782017-07-17 13:57:27 +02001885 atomic_set(&xdst->u.dst.__refcnt, 2);
1886 xfrm_last_dst_update(xdst, old);
1887
Timo Teräs80c802f2010-04-07 00:30:05 +00001888 return xdst;
1889}
1890
Kees Cookc3aed702017-10-16 17:28:56 -07001891static void xfrm_policy_queue_process(struct timer_list *t)
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001892{
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001893 struct sk_buff *skb;
1894 struct sock *sk;
1895 struct dst_entry *dst;
Kees Cookc3aed702017-10-16 17:28:56 -07001896 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
Eric W. Biederman3f5312a2015-10-07 16:48:34 -05001897 struct net *net = xp_net(pol);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001898 struct xfrm_policy_queue *pq = &pol->polq;
1899 struct flowi fl;
1900 struct sk_buff_head list;
1901
1902 spin_lock(&pq->hold_queue.lock);
1903 skb = skb_peek(&pq->hold_queue);
Steffen Klassert2bb53e22013-10-08 10:49:51 +02001904 if (!skb) {
1905 spin_unlock(&pq->hold_queue.lock);
1906 goto out;
1907 }
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001908 dst = skb_dst(skb);
1909 sk = skb->sk;
1910 xfrm_decode_session(skb, &fl, dst->ops->family);
1911 spin_unlock(&pq->hold_queue.lock);
1912
David Miller0f6c4802017-11-28 15:40:46 -05001913 dst_hold(xfrm_dst_path(dst));
Steffen Klassert2471c982018-02-01 11:26:12 +01001914 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001915 if (IS_ERR(dst))
1916 goto purge_queue;
1917
1918 if (dst->flags & DST_XFRM_QUEUE) {
1919 dst_release(dst);
1920
1921 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1922 goto purge_queue;
1923
1924 pq->timeout = pq->timeout << 1;
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +02001925 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1926 xfrm_pol_hold(pol);
1927 goto out;
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001928 }
1929
1930 dst_release(dst);
1931
1932 __skb_queue_head_init(&list);
1933
1934 spin_lock(&pq->hold_queue.lock);
1935 pq->timeout = 0;
1936 skb_queue_splice_init(&pq->hold_queue, &list);
1937 spin_unlock(&pq->hold_queue.lock);
1938
1939 while (!skb_queue_empty(&list)) {
1940 skb = __skb_dequeue(&list);
1941
1942 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
David Miller0f6c4802017-11-28 15:40:46 -05001943 dst_hold(xfrm_dst_path(skb_dst(skb)));
1944 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001945 if (IS_ERR(dst)) {
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001946 kfree_skb(skb);
1947 continue;
1948 }
1949
1950 nf_reset(skb);
1951 skb_dst_drop(skb);
1952 skb_dst_set(skb, dst);
1953
Eric W. Biederman13206b62015-10-07 16:48:35 -05001954 dst_output(net, skb->sk, skb);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001955 }
1956
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +02001957out:
1958 xfrm_pol_put(pol);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001959 return;
1960
1961purge_queue:
1962 pq->timeout = 0;
Li RongQing1ee5e662015-04-22 15:51:16 +08001963 skb_queue_purge(&pq->hold_queue);
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +02001964 xfrm_pol_put(pol);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001965}
1966
Eric W. Biedermanede20592015-10-07 16:48:47 -05001967static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001968{
1969 unsigned long sched_next;
1970 struct dst_entry *dst = skb_dst(skb);
1971 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +02001972 struct xfrm_policy *pol = xdst->pols[0];
1973 struct xfrm_policy_queue *pq = &pol->polq;
Steffen Klassert4d53eff2013-10-16 13:42:46 +02001974
Eric Dumazet39bb5e62014-10-30 10:32:34 -07001975 if (unlikely(skb_fclone_busy(sk, skb))) {
Steffen Klassert4d53eff2013-10-16 13:42:46 +02001976 kfree_skb(skb);
1977 return 0;
1978 }
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001979
1980 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1981 kfree_skb(skb);
1982 return -EAGAIN;
1983 }
1984
1985 skb_dst_force(skb);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001986
1987 spin_lock_bh(&pq->hold_queue.lock);
1988
1989 if (!pq->timeout)
1990 pq->timeout = XFRM_QUEUE_TMO_MIN;
1991
1992 sched_next = jiffies + pq->timeout;
1993
1994 if (del_timer(&pq->hold_timer)) {
1995 if (time_before(pq->hold_timer.expires, sched_next))
1996 sched_next = pq->hold_timer.expires;
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +02001997 xfrm_pol_put(pol);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01001998 }
1999
2000 __skb_queue_tail(&pq->hold_queue, skb);
Steffen Klasserte7d8f6c2013-10-08 10:49:45 +02002001 if (!mod_timer(&pq->hold_timer, sched_next))
2002 xfrm_pol_hold(pol);
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002003
2004 spin_unlock_bh(&pq->hold_queue.lock);
2005
2006 return 0;
2007}
2008
2009static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002010 struct xfrm_flo *xflo,
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002011 const struct flowi *fl,
2012 int num_xfrms,
2013 u16 family)
2014{
2015 int err;
2016 struct net_device *dev;
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002017 struct dst_entry *dst;
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002018 struct dst_entry *dst1;
2019 struct xfrm_dst *xdst;
2020
2021 xdst = xfrm_alloc_dst(net, family);
2022 if (IS_ERR(xdst))
2023 return xdst;
2024
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002025 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2026 net->xfrm.sysctl_larval_drop ||
2027 num_xfrms <= 0)
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002028 return xdst;
2029
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002030 dst = xflo->dst_orig;
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002031 dst1 = &xdst->u.dst;
2032 dst_hold(dst);
2033 xdst->route = dst;
2034
2035 dst_copy_metrics(dst1, dst);
2036
2037 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2038 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2039 dst1->lastuse = jiffies;
2040
2041 dst1->input = dst_discard;
2042 dst1->output = xdst_queue_output;
2043
2044 dst_hold(dst);
David Miller45b018be2017-11-28 15:40:28 -05002045 xfrm_dst_set_child(xdst, dst);
David Miller0f6c4802017-11-28 15:40:46 -05002046 xdst->path = dst;
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002047
2048 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2049
2050 err = -ENODEV;
2051 dev = dst->dev;
2052 if (!dev)
2053 goto free_dst;
2054
2055 err = xfrm_fill_dst(xdst, dev, fl);
2056 if (err)
2057 goto free_dst;
2058
2059out:
2060 return xdst;
2061
2062free_dst:
2063 dst_release(dst1);
2064 xdst = ERR_PTR(err);
2065 goto out;
2066}
2067
Florian Westphalbd45c532017-07-17 13:57:25 +02002068static struct xfrm_dst *
Florian Westphal3ca28282017-07-17 13:57:21 +02002069xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo)
Timo Teräs80c802f2010-04-07 00:30:05 +00002070{
Timo Teräs80c802f2010-04-07 00:30:05 +00002071 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
Florian Westphal855dad92017-07-17 13:57:22 +02002072 int num_pols = 0, num_xfrms = 0, err;
Florian Westphalbd45c532017-07-17 13:57:25 +02002073 struct xfrm_dst *xdst;
Timo Teräs80c802f2010-04-07 00:30:05 +00002074
Timo Teräs80c802f2010-04-07 00:30:05 +00002075 /* Resolve policies to use if we couldn't get them from
2076 * previous cache entry */
Florian Westphal855dad92017-07-17 13:57:22 +02002077 num_pols = 1;
Florian Westphal86dc8ee2017-07-17 13:57:24 +02002078 pols[0] = xfrm_policy_lookup(net, fl, family, dir);
Florian Westphal855dad92017-07-17 13:57:22 +02002079 err = xfrm_expand_policies(fl, family, pols,
Timo Teräs80c802f2010-04-07 00:30:05 +00002080 &num_pols, &num_xfrms);
Florian Westphal855dad92017-07-17 13:57:22 +02002081 if (err < 0)
2082 goto inc_error;
2083 if (num_pols == 0)
2084 return NULL;
2085 if (num_xfrms <= 0)
2086 goto make_dummy_bundle;
Timo Teräs80c802f2010-04-07 00:30:05 +00002087
Steffen Klassert76a42012018-01-10 12:14:28 +01002088 local_bh_disable();
Florian Westphalbd45c532017-07-17 13:57:25 +02002089 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
Steffen Klassert76a42012018-01-10 12:14:28 +01002090 xflo->dst_orig);
2091 local_bh_enable();
2092
Florian Westphalbd45c532017-07-17 13:57:25 +02002093 if (IS_ERR(xdst)) {
2094 err = PTR_ERR(xdst);
Steffen Klassertf203b762018-06-12 14:07:12 +02002095 if (err == -EREMOTE) {
2096 xfrm_pols_put(pols, num_pols);
2097 return NULL;
2098 }
2099
Timo Teräs80c802f2010-04-07 00:30:05 +00002100 if (err != -EAGAIN)
2101 goto error;
Florian Westphal855dad92017-07-17 13:57:22 +02002102 goto make_dummy_bundle;
Florian Westphalbd45c532017-07-17 13:57:25 +02002103 } else if (xdst == NULL) {
Timo Teräsd809ec82010-07-12 21:29:42 +00002104 num_xfrms = 0;
Florian Westphal855dad92017-07-17 13:57:22 +02002105 goto make_dummy_bundle;
Timo Teräs80c802f2010-04-07 00:30:05 +00002106 }
2107
Florian Westphalbd45c532017-07-17 13:57:25 +02002108 return xdst;
Timo Teräs80c802f2010-04-07 00:30:05 +00002109
2110make_dummy_bundle:
2111 /* We found policies, but there's no bundles to instantiate:
2112 * either because the policy blocks, has no transformations or
2113 * we could not build template (no xfrm_states).*/
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002114 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
Timo Teräs80c802f2010-04-07 00:30:05 +00002115 if (IS_ERR(xdst)) {
2116 xfrm_pols_put(pols, num_pols);
2117 return ERR_CAST(xdst);
2118 }
2119 xdst->num_pols = num_pols;
2120 xdst->num_xfrms = num_xfrms;
Weilong Chen3e94c2d2013-12-24 09:43:47 +08002121 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
Timo Teräs80c802f2010-04-07 00:30:05 +00002122
Florian Westphalbd45c532017-07-17 13:57:25 +02002123 return xdst;
Timo Teräs80c802f2010-04-07 00:30:05 +00002124
2125inc_error:
2126 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2127error:
Florian Westphal855dad92017-07-17 13:57:22 +02002128 xfrm_pols_put(pols, num_pols);
Timo Teräs80c802f2010-04-07 00:30:05 +00002129 return ERR_PTR(err);
2130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
David S. Miller2774c132011-03-01 14:59:04 -08002132static struct dst_entry *make_blackhole(struct net *net, u16 family,
2133 struct dst_entry *dst_orig)
2134{
Florian Westphal37b10382017-02-07 15:00:19 +01002135 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
David S. Miller2774c132011-03-01 14:59:04 -08002136 struct dst_entry *ret;
2137
2138 if (!afinfo) {
2139 dst_release(dst_orig);
Li RongQing433a1952012-09-17 22:40:10 +00002140 return ERR_PTR(-EINVAL);
David S. Miller2774c132011-03-01 14:59:04 -08002141 } else {
2142 ret = afinfo->blackhole_route(net, dst_orig);
2143 }
Florian Westphalbdba9fe2017-02-07 15:00:18 +01002144 rcu_read_unlock();
David S. Miller2774c132011-03-01 14:59:04 -08002145
2146 return ret;
2147}
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149/* Main function: finds/creates a bundle for given flow.
2150 *
2151 * At the moment we eat a raw IP route. Mostly to speed up lookups
2152 * on interfaces with disabled IPsec.
2153 */
David S. Miller452edd52011-03-02 13:27:41 -08002154struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2155 const struct flowi *fl,
Eric Dumazet6f9c9612015-09-25 07:39:10 -07002156 const struct sock *sk, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002158 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
Timo Teräs80c802f2010-04-07 00:30:05 +00002159 struct xfrm_dst *xdst;
David S. Miller452edd52011-03-02 13:27:41 -08002160 struct dst_entry *dst, *route;
Timo Teräs80c802f2010-04-07 00:30:05 +00002161 u16 family = dst_orig->ops->family;
Florian Westphalaff669b2017-07-17 13:57:23 +02002162 u8 dir = XFRM_POLICY_OUT;
Changli Gao4b021622010-04-27 21:20:22 +00002163 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -07002164
Timo Teräs80c802f2010-04-07 00:30:05 +00002165 dst = NULL;
2166 xdst = NULL;
2167 route = NULL;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002168
Eric Dumazetbd5eb352015-12-07 08:53:17 -08002169 sk = sk_const_to_full_sk(sk);
Thomas Graff7944fb2007-08-25 13:46:55 -07002170 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
Timo Teräs80c802f2010-04-07 00:30:05 +00002171 num_pols = 1;
Steffen Klassert4c86d772017-02-14 07:43:56 +01002172 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
Timo Teräs80c802f2010-04-07 00:30:05 +00002173 err = xfrm_expand_policies(fl, family, pols,
2174 &num_pols, &num_xfrms);
2175 if (err < 0)
Herbert Xu75b8c132007-12-11 04:38:08 -08002176 goto dropdst;
Timo Teräs80c802f2010-04-07 00:30:05 +00002177
2178 if (num_pols) {
2179 if (num_xfrms <= 0) {
2180 drop_pols = num_pols;
2181 goto no_transform;
2182 }
2183
Steffen Klassert76a42012018-01-10 12:14:28 +01002184 local_bh_disable();
Timo Teräs80c802f2010-04-07 00:30:05 +00002185 xdst = xfrm_resolve_and_create_bundle(
2186 pols, num_pols, fl,
2187 family, dst_orig);
Steffen Klassert76a42012018-01-10 12:14:28 +01002188 local_bh_enable();
2189
Timo Teräs80c802f2010-04-07 00:30:05 +00002190 if (IS_ERR(xdst)) {
2191 xfrm_pols_put(pols, num_pols);
2192 err = PTR_ERR(xdst);
Steffen Klassertf203b762018-06-12 14:07:12 +02002193 if (err == -EREMOTE)
2194 goto nopol;
2195
Timo Teräs80c802f2010-04-07 00:30:05 +00002196 goto dropdst;
Timo Teräsd809ec82010-07-12 21:29:42 +00002197 } else if (xdst == NULL) {
2198 num_xfrms = 0;
2199 drop_pols = num_pols;
2200 goto no_transform;
Timo Teräs80c802f2010-04-07 00:30:05 +00002201 }
2202
Timo Teräs80c802f2010-04-07 00:30:05 +00002203 route = xdst->route;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002204 }
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05002205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Timo Teräs80c802f2010-04-07 00:30:05 +00002207 if (xdst == NULL) {
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002208 struct xfrm_flo xflo;
2209
2210 xflo.dst_orig = dst_orig;
2211 xflo.flags = flags;
2212
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 /* To accelerate a bit... */
David S. Miller2518c7c2006-08-24 04:45:07 -07002214 if ((dst_orig->flags & DST_NOXFRM) ||
Alexey Dobriyan52479b62008-11-25 17:35:18 -08002215 !net->xfrm.policy_count[XFRM_POLICY_OUT])
Herbert Xu8b7817f2007-12-12 10:44:43 -08002216 goto nopol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
Florian Westphalbd45c532017-07-17 13:57:25 +02002218 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo);
2219 if (xdst == NULL)
Timo Teräs80c802f2010-04-07 00:30:05 +00002220 goto nopol;
Florian Westphalbd45c532017-07-17 13:57:25 +02002221 if (IS_ERR(xdst)) {
2222 err = PTR_ERR(xdst);
Herbert Xu75b8c132007-12-11 04:38:08 -08002223 goto dropdst;
Masahide NAKAMURAd66e37a2008-01-07 21:46:15 -08002224 }
Timo Teräs80c802f2010-04-07 00:30:05 +00002225
2226 num_pols = xdst->num_pols;
2227 num_xfrms = xdst->num_xfrms;
Weilong Chen3e94c2d2013-12-24 09:43:47 +08002228 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
Timo Teräs80c802f2010-04-07 00:30:05 +00002229 route = xdst->route;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 }
2231
Timo Teräs80c802f2010-04-07 00:30:05 +00002232 dst = &xdst->u.dst;
2233 if (route == NULL && num_xfrms > 0) {
2234 /* The only case when xfrm_bundle_lookup() returns a
2235 * bundle with null route, is when the template could
2236 * not be resolved. It means policies are there, but
2237 * bundle could not be created, since we don't yet
2238 * have the xfrm_state's. We need to wait for KM to
2239 * negotiate new SA's or bail out with error.*/
2240 if (net->xfrm.sysctl_larval_drop) {
Timo Teräs80c802f2010-04-07 00:30:05 +00002241 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
huaibin Wangac37e252015-02-11 18:10:36 +01002242 err = -EREMOTE;
2243 goto error;
Timo Teräs80c802f2010-04-07 00:30:05 +00002244 }
Timo Teräs80c802f2010-04-07 00:30:05 +00002245
Steffen Klassert5b8ef342013-08-27 13:43:30 +02002246 err = -EAGAIN;
Timo Teräs80c802f2010-04-07 00:30:05 +00002247
2248 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2249 goto error;
2250 }
2251
2252no_transform:
2253 if (num_pols == 0)
Herbert Xu8b7817f2007-12-12 10:44:43 -08002254 goto nopol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
Timo Teräs80c802f2010-04-07 00:30:05 +00002256 if ((flags & XFRM_LOOKUP_ICMP) &&
2257 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2258 err = -ENOENT;
Herbert Xu8b7817f2007-12-12 10:44:43 -08002259 goto error;
Timo Teräs80c802f2010-04-07 00:30:05 +00002260 }
Herbert Xu8b7817f2007-12-12 10:44:43 -08002261
Timo Teräs80c802f2010-04-07 00:30:05 +00002262 for (i = 0; i < num_pols; i++)
2263 pols[i]->curlft.use_time = get_seconds();
Herbert Xu8b7817f2007-12-12 10:44:43 -08002264
Timo Teräs80c802f2010-04-07 00:30:05 +00002265 if (num_xfrms < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 /* Prohibit the flow */
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002267 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
Patrick McHardye104411b2005-09-08 15:11:55 -07002268 err = -EPERM;
2269 goto error;
Timo Teräs80c802f2010-04-07 00:30:05 +00002270 } else if (num_xfrms > 0) {
2271 /* Flow transformed */
Timo Teräs80c802f2010-04-07 00:30:05 +00002272 dst_release(dst_orig);
2273 } else {
2274 /* Flow passes untransformed */
2275 dst_release(dst);
David S. Miller452edd52011-03-02 13:27:41 -08002276 dst = dst_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
Timo Teräs80c802f2010-04-07 00:30:05 +00002278ok:
2279 xfrm_pols_put(pols, drop_pols);
Gao feng0c183372012-05-26 01:30:53 +00002280 if (dst && dst->xfrm &&
2281 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2282 dst->flags |= DST_XFRM_TUNNEL;
David S. Miller452edd52011-03-02 13:27:41 -08002283 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
Timo Teräs80c802f2010-04-07 00:30:05 +00002285nopol:
David S. Miller452edd52011-03-02 13:27:41 -08002286 if (!(flags & XFRM_LOOKUP_ICMP)) {
2287 dst = dst_orig;
Timo Teräs80c802f2010-04-07 00:30:05 +00002288 goto ok;
David S. Miller452edd52011-03-02 13:27:41 -08002289 }
Timo Teräs80c802f2010-04-07 00:30:05 +00002290 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291error:
Timo Teräs80c802f2010-04-07 00:30:05 +00002292 dst_release(dst);
Herbert Xu75b8c132007-12-11 04:38:08 -08002293dropdst:
huaibin Wangac37e252015-02-11 18:10:36 +01002294 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2295 dst_release(dst_orig);
Timo Teräs80c802f2010-04-07 00:30:05 +00002296 xfrm_pols_put(pols, drop_pols);
David S. Miller452edd52011-03-02 13:27:41 -08002297 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298}
2299EXPORT_SYMBOL(xfrm_lookup);
2300
Steffen Klassertf92ee612014-09-16 10:08:40 +02002301/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2302 * Otherwise we may send out blackholed packets.
2303 */
2304struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2305 const struct flowi *fl,
Eric Dumazet6f9c9612015-09-25 07:39:10 -07002306 const struct sock *sk, int flags)
Steffen Klassertf92ee612014-09-16 10:08:40 +02002307{
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002308 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
huaibin Wangac37e252015-02-11 18:10:36 +01002309 flags | XFRM_LOOKUP_QUEUE |
2310 XFRM_LOOKUP_KEEP_DST_REF);
Steffen Klassertf92ee612014-09-16 10:08:40 +02002311
2312 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2313 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2314
2315 return dst;
2316}
2317EXPORT_SYMBOL(xfrm_lookup_route);
2318
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002319static inline int
David S. Miller8f029de2011-02-22 17:59:59 -08002320xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002321{
2322 struct xfrm_state *x;
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002323
2324 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2325 return 0;
2326 x = skb->sp->xvec[idx];
2327 if (!x->type->reject)
2328 return 0;
Herbert Xu1ecafed2007-10-09 13:24:07 -07002329 return x->type->reject(x, skb, fl);
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002330}
2331
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332/* When skb is transformed back to its "native" form, we have to
2333 * check policy restrictions. At the moment we make this in maximally
2334 * stupid way. Shame on me. :-) Of course, connected sockets must
2335 * have policy cached at them.
2336 */
2337
2338static inline int
David S. Miller7db454b2011-02-24 01:43:01 -05002339xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 unsigned short family)
2341{
2342 if (xfrm_state_kern(x))
Kazunori MIYAZAWA928ba412007-02-13 12:57:16 -08002343 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 return x->id.proto == tmpl->id.proto &&
2345 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2346 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2347 x->props.mode == tmpl->mode &&
Herbert Xuc5d18e92008-04-22 00:46:42 -07002348 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
Masahide NAKAMURAf3bd4842006-08-23 18:00:48 -07002349 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
Masahide NAKAMURA7e49e6d2006-09-22 15:05:15 -07002350 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2351 xfrm_state_addr_cmp(tmpl, x, family));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352}
2353
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002354/*
2355 * 0 or more than 0 is returned when validation is succeeded (either bypass
2356 * because of optional transport mode, or next index of the mathced secpath
2357 * state with the template.
2358 * -1 is returned when no matching template is found.
2359 * Otherwise "-2 - errored_index" is returned.
2360 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361static inline int
David S. Miller22cccb72011-02-24 01:43:33 -05002362xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 unsigned short family)
2364{
2365 int idx = start;
2366
2367 if (tmpl->optional) {
Masahide NAKAMURA7e49e6d2006-09-22 15:05:15 -07002368 if (tmpl->mode == XFRM_MODE_TRANSPORT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 return start;
2370 } else
2371 start = -1;
2372 for (; idx < sp->len; idx++) {
Herbert Xudbe5b4a2006-04-01 00:54:16 -08002373 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 return ++idx;
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002375 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2376 if (start == -1)
2377 start = -2-idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 break;
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002379 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 }
2381 return start;
2382}
2383
Herbert Xud5422ef2007-12-12 10:44:16 -08002384int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2385 unsigned int family, int reverse)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386{
Florian Westphal37b10382017-02-07 15:00:19 +01002387 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
Steffen Klassertf203b762018-06-12 14:07:12 +02002388 const struct xfrm_if_cb *ifcb = xfrm_if_get_cb();
2389 struct xfrm_if *xi;
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -07002390 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 if (unlikely(afinfo == NULL))
2393 return -EAFNOSUPPORT;
2394
Herbert Xud5422ef2007-12-12 10:44:16 -08002395 afinfo->decode_session(skb, fl, reverse);
Steffen Klassertf203b762018-06-12 14:07:12 +02002396 if (ifcb) {
2397 xi = ifcb->decode_session(skb);
2398 if (xi)
2399 fl->flowi_xfrm.if_id = xi->p.if_id;
2400 }
2401
David S. Miller1d28f422011-03-12 00:29:39 -05002402 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
Florian Westphalbdba9fe2017-02-07 15:00:18 +01002403 rcu_read_unlock();
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -07002404 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405}
Herbert Xud5422ef2007-12-12 10:44:16 -08002406EXPORT_SYMBOL(__xfrm_decode_session);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
David S. Miller9a7386e2011-02-24 01:44:12 -05002408static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409{
2410 for (; k < sp->len; k++) {
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002411 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
James Morrisd1d9fac2006-09-01 00:32:12 -07002412 *idxp = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 return 1;
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 }
2416
2417 return 0;
2418}
2419
YOSHIFUJI Hideakia716c112007-02-09 23:25:29 +09002420int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 unsigned short family)
2422{
Alexey Dobriyanf6e1e252008-11-25 17:35:44 -08002423 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 struct xfrm_policy *pol;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002425 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2426 int npols = 0;
2427 int xfrm_nr;
2428 int pi;
Herbert Xud5422ef2007-12-12 10:44:16 -08002429 int reverse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 struct flowi fl;
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002431 int xerr_idx = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
Herbert Xud5422ef2007-12-12 10:44:16 -08002433 reverse = dir & ~XFRM_POLICY_MASK;
2434 dir &= XFRM_POLICY_MASK;
Herbert Xud5422ef2007-12-12 10:44:16 -08002435
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002436 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002437 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 return 0;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002439 }
2440
Patrick McHardyeb9c7eb2006-01-06 23:06:30 -08002441 nf_nat_decode_session(skb, &fl, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
2443 /* First, check used SA against their selectors. */
2444 if (skb->sp) {
2445 int i;
2446
Weilong Chen9b7a7872013-12-24 09:43:46 +08002447 for (i = skb->sp->len-1; i >= 0; i--) {
Herbert Xudbe5b4a2006-04-01 00:54:16 -08002448 struct xfrm_state *x = skb->sp->xvec[i];
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002449 if (!xfrm_selector_match(&x->sel, &fl, family)) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002450 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return 0;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 }
2454 }
2455
2456 pol = NULL;
Eric Dumazetbd5eb352015-12-07 08:53:17 -08002457 sk = sk_to_full_sk(sk);
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05002458 if (sk && sk->sk_policy[dir]) {
Steffen Klassert4c86d772017-02-14 07:43:56 +01002459 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002460 if (IS_ERR(pol)) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002461 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05002462 return 0;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002463 }
Venkat Yekkirala3bccfbc2006-10-05 15:42:35 -05002464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Florian Westphal86dc8ee2017-07-17 13:57:24 +02002466 if (!pol)
2467 pol = xfrm_policy_lookup(net, &fl, family, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002469 if (IS_ERR(pol)) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002470 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
James Morris134b0fc2006-10-05 15:42:27 -05002471 return 0;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002472 }
James Morris134b0fc2006-10-05 15:42:27 -05002473
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002474 if (!pol) {
James Morrisd1d9fac2006-09-01 00:32:12 -07002475 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002476 xfrm_secpath_reject(xerr_idx, skb, &fl);
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002477 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002478 return 0;
2479 }
2480 return 1;
2481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
James Morris9d729f72007-03-04 16:12:44 -08002483 pol->curlft.use_time = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002485 pols[0] = pol;
Weilong Chen02d08922013-12-24 09:43:48 +08002486 npols++;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002487#ifdef CONFIG_XFRM_SUB_POLICY
2488 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
Alexey Dobriyanf6e1e252008-11-25 17:35:44 -08002489 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002490 &fl, family,
2491 XFRM_POLICY_IN);
2492 if (pols[1]) {
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002493 if (IS_ERR(pols[1])) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002494 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
James Morris134b0fc2006-10-05 15:42:27 -05002495 return 0;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002496 }
James Morris9d729f72007-03-04 16:12:44 -08002497 pols[1]->curlft.use_time = get_seconds();
Weilong Chen02d08922013-12-24 09:43:48 +08002498 npols++;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002499 }
2500 }
2501#endif
2502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 if (pol->action == XFRM_POLICY_ALLOW) {
2504 struct sec_path *sp;
2505 static struct sec_path dummy;
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002506 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07002507 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002508 struct xfrm_tmpl **tpp = tp;
2509 int ti = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 int i, k;
2511
2512 if ((sp = skb->sp) == NULL)
2513 sp = &dummy;
2514
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002515 for (pi = 0; pi < npols; pi++) {
2516 if (pols[pi] != pol &&
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002517 pols[pi]->action != XFRM_POLICY_ALLOW) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002518 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002519 goto reject;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002520 }
2521 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002523 goto reject_error;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002524 }
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002525 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2526 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2527 }
2528 xfrm_nr = ti;
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07002529 if (npols > 1) {
Fan Du283bc9f2013-11-07 17:47:50 +08002530 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07002531 tpp = stp;
2532 }
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 /* For each tunnel xfrm, find the first matching tmpl.
2535 * For each tmpl before that, find corresponding xfrm.
2536 * Order is _important_. Later we will implement
2537 * some barriers, but at the moment barriers
2538 * are implied between each two transformations.
2539 */
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002540 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2541 k = xfrm_policy_ok(tpp[i], sp, k, family);
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002542 if (k < 0) {
James Morrisd1d9fac2006-09-01 00:32:12 -07002543 if (k < -1)
2544 /* "-2 - errored_index" returned */
2545 xerr_idx = -(2+k);
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002546 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 goto reject;
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 }
2550
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002551 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002552 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 goto reject;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002556 xfrm_pols_put(pols, npols);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 return 1;
2558 }
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002559 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
2561reject:
Masahide NAKAMURAdf0ba922006-08-23 20:41:00 -07002562 xfrm_secpath_reject(xerr_idx, skb, &fl);
Masahide NAKAMURA4e81bb82006-08-23 22:43:30 -07002563reject_error:
2564 xfrm_pols_put(pols, npols);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 return 0;
2566}
2567EXPORT_SYMBOL(__xfrm_policy_check);
2568
2569int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2570{
Alexey Dobriyan99a66652008-11-25 17:36:13 -08002571 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 struct flowi fl;
Eric Dumazetadf30902009-06-02 05:19:30 +00002573 struct dst_entry *dst;
Eric Dumazet73137142011-03-15 15:26:43 -07002574 int res = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002576 if (xfrm_decode_session(skb, &fl, family) < 0) {
jamal72032fd2010-02-18 03:35:07 +00002577 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 return 0;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -08002579 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
Eric Dumazetfafeeb62010-06-01 10:04:49 +00002581 skb_dst_force(skb);
Eric Dumazetadf30902009-06-02 05:19:30 +00002582
Steffen Klassertb8c203b2014-09-16 10:08:49 +02002583 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
David S. Miller452edd52011-03-02 13:27:41 -08002584 if (IS_ERR(dst)) {
Eric Dumazet73137142011-03-15 15:26:43 -07002585 res = 0;
David S. Miller452edd52011-03-02 13:27:41 -08002586 dst = NULL;
2587 }
Eric Dumazetadf30902009-06-02 05:19:30 +00002588 skb_dst_set(skb, dst);
2589 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590}
2591EXPORT_SYMBOL(__xfrm_route_forward);
2592
David S. Millerd49c73c2006-08-13 18:55:53 -07002593/* Optimize later using cookies and generation ids. */
2594
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2596{
David S. Millerd49c73c2006-08-13 18:55:53 -07002597 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
David S. Millerf5b0a872012-07-19 12:31:33 -07002598 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2599 * get validated by dst_ops->check on every use. We do this
2600 * because when a normal route referenced by an XFRM dst is
2601 * obsoleted we do not go looking around for all parent
2602 * referencing XFRM dsts so that we can invalidate them. It
2603 * is just too much work. Instead we make the checks here on
2604 * every use. For example:
David S. Millerd49c73c2006-08-13 18:55:53 -07002605 *
2606 * XFRM dst A --> IPv4 dst X
2607 *
2608 * X is the "xdst->route" of A (X is also the "dst->path" of A
2609 * in this example). If X is marked obsolete, "A" will not
2610 * notice. That's what we are validating here via the
2611 * stale_bundle() check.
2612 *
Wei Wang52df1572017-06-17 10:42:38 -07002613 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2614 * be marked on it.
Florian Westphal09c75702017-07-17 13:57:26 +02002615 * This will force stale_bundle() to fail on any xdst bundle with
Wei Wang52df1572017-06-17 10:42:38 -07002616 * this dst linked in it.
David S. Miller399c1802005-12-19 14:23:23 -08002617 */
David S. Millerd49c73c2006-08-13 18:55:53 -07002618 if (dst->obsolete < 0 && !stale_bundle(dst))
2619 return dst;
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 return NULL;
2622}
2623
2624static int stale_bundle(struct dst_entry *dst)
2625{
Steffen Klassert12fdb4d2011-06-29 23:18:20 +00002626 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627}
2628
Herbert Xuaabc9762005-05-03 16:27:10 -07002629void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630{
David Millerb92cf4a2017-11-28 15:40:22 -05002631 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002632 dst->dev = dev_net(dev)->loopback_dev;
Daniel Lezcanode3cb742007-09-25 19:16:28 -07002633 dev_hold(dst->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 dev_put(dev);
2635 }
2636}
Herbert Xuaabc9762005-05-03 16:27:10 -07002637EXPORT_SYMBOL(xfrm_dst_ifdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
2639static void xfrm_link_failure(struct sk_buff *skb)
2640{
2641 /* Impossible. Such dst must be popped before reaches point of failure. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642}
2643
2644static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2645{
2646 if (dst) {
2647 if (dst->obsolete) {
2648 dst_release(dst);
2649 dst = NULL;
2650 }
2651 }
2652 return dst;
2653}
2654
David Miller54920932017-11-28 15:41:01 -05002655static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656{
David Miller54920932017-11-28 15:41:01 -05002657 while (nr--) {
2658 struct xfrm_dst *xdst = bundle[nr];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 u32 pmtu, route_mtu_cached;
David Miller54920932017-11-28 15:41:01 -05002660 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
David Miller54920932017-11-28 15:41:01 -05002662 dst = &xdst->u.dst;
David Millerb92cf4a2017-11-28 15:40:22 -05002663 pmtu = dst_mtu(xfrm_dst_child(dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 xdst->child_mtu_cached = pmtu;
2665
2666 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2667
2668 route_mtu_cached = dst_mtu(xdst->route);
2669 xdst->route_mtu_cached = route_mtu_cached;
2670
2671 if (pmtu > route_mtu_cached)
2672 pmtu = route_mtu_cached;
2673
David S. Millerdefb3512010-12-08 21:16:57 -08002674 dst_metric_set(dst, RTAX_MTU, pmtu);
David Miller54920932017-11-28 15:41:01 -05002675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676}
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678/* Check that the bundle accepts the flow and its components are
2679 * still valid.
2680 */
2681
Steffen Klassert12fdb4d2011-06-29 23:18:20 +00002682static int xfrm_bundle_ok(struct xfrm_dst *first)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
David Miller54920932017-11-28 15:41:01 -05002684 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 struct dst_entry *dst = &first->u.dst;
David Miller54920932017-11-28 15:41:01 -05002686 struct xfrm_dst *xdst;
2687 int start_from, nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 u32 mtu;
2689
David Miller0f6c4802017-11-28 15:40:46 -05002690 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 (dst->dev && !netif_running(dst->dev)))
2692 return 0;
2693
Steffen Klasserta0073fe2013-02-05 12:52:55 +01002694 if (dst->flags & DST_XFRM_QUEUE)
2695 return 1;
2696
David Miller54920932017-11-28 15:41:01 -05002697 start_from = nr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 do {
2699 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2700
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2702 return 0;
Timo Teräs80c802f2010-04-07 00:30:05 +00002703 if (xdst->xfrm_genid != dst->xfrm->genid)
2704 return 0;
Timo Teräsb1312c82010-06-24 14:35:00 -07002705 if (xdst->num_pols > 0 &&
2706 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
David S. Miller9d4a7062006-08-24 03:18:09 -07002707 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
David Miller54920932017-11-28 15:41:01 -05002709 bundle[nr++] = xdst;
2710
David Millerb92cf4a2017-11-28 15:40:22 -05002711 mtu = dst_mtu(xfrm_dst_child(dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 if (xdst->child_mtu_cached != mtu) {
David Miller54920932017-11-28 15:41:01 -05002713 start_from = nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 xdst->child_mtu_cached = mtu;
2715 }
2716
Hideaki YOSHIFUJI92d63de2005-05-26 12:58:04 -07002717 if (!dst_check(xdst->route, xdst->route_cookie))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 return 0;
2719 mtu = dst_mtu(xdst->route);
2720 if (xdst->route_mtu_cached != mtu) {
David Miller54920932017-11-28 15:41:01 -05002721 start_from = nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 xdst->route_mtu_cached = mtu;
2723 }
2724
David Millerb92cf4a2017-11-28 15:40:22 -05002725 dst = xfrm_dst_child(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 } while (dst->xfrm);
2727
David Miller54920932017-11-28 15:41:01 -05002728 if (likely(!start_from))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 return 1;
2730
David Miller54920932017-11-28 15:41:01 -05002731 xdst = bundle[start_from - 1];
2732 mtu = xdst->child_mtu_cached;
2733 while (start_from--) {
2734 dst = &xdst->u.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
2736 mtu = xfrm_state_mtu(dst->xfrm, mtu);
David Miller54920932017-11-28 15:41:01 -05002737 if (mtu > xdst->route_mtu_cached)
2738 mtu = xdst->route_mtu_cached;
David S. Millerdefb3512010-12-08 21:16:57 -08002739 dst_metric_set(dst, RTAX_MTU, mtu);
David Miller54920932017-11-28 15:41:01 -05002740 if (!start_from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 break;
2742
David Miller54920932017-11-28 15:41:01 -05002743 xdst = bundle[start_from - 1];
2744 xdst->child_mtu_cached = mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 }
2746
2747 return 1;
2748}
2749
David S. Miller0dbaee32010-12-13 12:52:14 -08002750static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2751{
David Miller0f6c4802017-11-28 15:40:46 -05002752 return dst_metric_advmss(xfrm_dst_path(dst));
David S. Miller0dbaee32010-12-13 12:52:14 -08002753}
2754
Steffen Klassertebb762f2011-11-23 02:12:51 +00002755static unsigned int xfrm_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08002756{
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002757 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2758
David Miller0f6c4802017-11-28 15:40:46 -05002759 return mtu ? : dst_mtu(xfrm_dst_path(dst));
David S. Millerd33e4552010-12-14 13:01:14 -08002760}
2761
Julian Anastasov1ecc9ad2017-02-25 17:57:43 +02002762static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2763 const void *daddr)
Julian Anastasov63fca652017-02-06 23:14:15 +02002764{
David Miller0f6c4802017-11-28 15:40:46 -05002765 while (dst->xfrm) {
Julian Anastasov63fca652017-02-06 23:14:15 +02002766 const struct xfrm_state *xfrm = dst->xfrm;
2767
Steffen Klassert013cb812018-02-19 07:44:07 +01002768 dst = xfrm_dst_child(dst);
2769
Julian Anastasov63fca652017-02-06 23:14:15 +02002770 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2771 continue;
2772 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2773 daddr = xfrm->coaddr;
2774 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2775 daddr = &xfrm->id.daddr;
2776 }
Julian Anastasov1ecc9ad2017-02-25 17:57:43 +02002777 return daddr;
2778}
2779
2780static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2781 struct sk_buff *skb,
2782 const void *daddr)
2783{
David Miller0f6c4802017-11-28 15:40:46 -05002784 const struct dst_entry *path = xfrm_dst_path(dst);
Julian Anastasov1ecc9ad2017-02-25 17:57:43 +02002785
2786 if (!skb)
2787 daddr = xfrm_get_dst_nexthop(dst, daddr);
2788 return path->ops->neigh_lookup(path, skb, daddr);
2789}
2790
2791static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2792{
David Miller0f6c4802017-11-28 15:40:46 -05002793 const struct dst_entry *path = xfrm_dst_path(dst);
Julian Anastasov1ecc9ad2017-02-25 17:57:43 +02002794
2795 daddr = xfrm_get_dst_nexthop(dst, daddr);
Julian Anastasov63fca652017-02-06 23:14:15 +02002796 path->ops->confirm_neigh(path, daddr);
2797}
2798
Florian Westphala2817d82017-02-07 15:00:17 +01002799int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800{
2801 int err = 0;
Florian Westphala2817d82017-02-07 15:00:17 +01002802
2803 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 return -EAFNOSUPPORT;
Florian Westphala2817d82017-02-07 15:00:17 +01002805
Eric Dumazetef8531b2012-08-19 12:31:48 +02002806 spin_lock(&xfrm_policy_afinfo_lock);
Florian Westphala2817d82017-02-07 15:00:17 +01002807 if (unlikely(xfrm_policy_afinfo[family] != NULL))
Li RongQingf31e8d4f2015-04-23 11:06:53 +08002808 err = -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 else {
2810 struct dst_ops *dst_ops = afinfo->dst_ops;
2811 if (likely(dst_ops->kmem_cachep == NULL))
2812 dst_ops->kmem_cachep = xfrm_dst_cache;
2813 if (likely(dst_ops->check == NULL))
2814 dst_ops->check = xfrm_dst_check;
David S. Miller0dbaee32010-12-13 12:52:14 -08002815 if (likely(dst_ops->default_advmss == NULL))
2816 dst_ops->default_advmss = xfrm_default_advmss;
Steffen Klassertebb762f2011-11-23 02:12:51 +00002817 if (likely(dst_ops->mtu == NULL))
2818 dst_ops->mtu = xfrm_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 if (likely(dst_ops->negative_advice == NULL))
2820 dst_ops->negative_advice = xfrm_negative_advice;
2821 if (likely(dst_ops->link_failure == NULL))
2822 dst_ops->link_failure = xfrm_link_failure;
David S. Millerd3aaeb32011-07-18 00:40:17 -07002823 if (likely(dst_ops->neigh_lookup == NULL))
2824 dst_ops->neigh_lookup = xfrm_neigh_lookup;
Julian Anastasov63fca652017-02-06 23:14:15 +02002825 if (likely(!dst_ops->confirm_neigh))
2826 dst_ops->confirm_neigh = xfrm_confirm_neigh;
Florian Westphala2817d82017-02-07 15:00:17 +01002827 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 }
Eric Dumazetef8531b2012-08-19 12:31:48 +02002829 spin_unlock(&xfrm_policy_afinfo_lock);
Alexey Dobriyand7c75442010-01-24 22:47:53 -08002830
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 return err;
2832}
2833EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2834
Florian Westphala2817d82017-02-07 15:00:17 +01002835void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836{
Florian Westphal2b619972017-02-07 15:00:15 +01002837 struct dst_ops *dst_ops = afinfo->dst_ops;
Florian Westphala2817d82017-02-07 15:00:17 +01002838 int i;
Eric Dumazetef8531b2012-08-19 12:31:48 +02002839
Florian Westphala2817d82017-02-07 15:00:17 +01002840 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2841 if (xfrm_policy_afinfo[i] != afinfo)
2842 continue;
2843 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2844 break;
Eric Dumazetef8531b2012-08-19 12:31:48 +02002845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846
Florian Westphal2b619972017-02-07 15:00:15 +01002847 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
Florian Westphal2b619972017-02-07 15:00:15 +01002849 dst_ops->kmem_cachep = NULL;
2850 dst_ops->check = NULL;
2851 dst_ops->negative_advice = NULL;
2852 dst_ops->link_failure = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853}
2854EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2855
Steffen Klassertf203b762018-06-12 14:07:12 +02002856void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
2857{
2858 spin_lock(&xfrm_if_cb_lock);
2859 rcu_assign_pointer(xfrm_if_cb, ifcb);
2860 spin_unlock(&xfrm_if_cb_lock);
2861}
2862EXPORT_SYMBOL(xfrm_if_register_cb);
2863
2864void xfrm_if_unregister_cb(void)
2865{
2866 RCU_INIT_POINTER(xfrm_if_cb, NULL);
2867 synchronize_rcu();
2868}
2869EXPORT_SYMBOL(xfrm_if_unregister_cb);
2870
Masahide NAKAMURA558f82e2007-12-20 20:42:57 -08002871#ifdef CONFIG_XFRM_STATISTICS
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002872static int __net_init xfrm_statistics_init(struct net *net)
Masahide NAKAMURA558f82e2007-12-20 20:42:57 -08002873{
Alexey Dobriyanc68cd1a2008-11-25 18:00:14 -08002874 int rv;
WANG Cong698365f2014-05-05 15:55:55 -07002875 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2876 if (!net->mib.xfrm_statistics)
Masahide NAKAMURA558f82e2007-12-20 20:42:57 -08002877 return -ENOMEM;
Alexey Dobriyanc68cd1a2008-11-25 18:00:14 -08002878 rv = xfrm_proc_init(net);
2879 if (rv < 0)
WANG Cong698365f2014-05-05 15:55:55 -07002880 free_percpu(net->mib.xfrm_statistics);
Alexey Dobriyanc68cd1a2008-11-25 18:00:14 -08002881 return rv;
Masahide NAKAMURA558f82e2007-12-20 20:42:57 -08002882}
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002883
2884static void xfrm_statistics_fini(struct net *net)
2885{
Alexey Dobriyanc68cd1a2008-11-25 18:00:14 -08002886 xfrm_proc_fini(net);
WANG Cong698365f2014-05-05 15:55:55 -07002887 free_percpu(net->mib.xfrm_statistics);
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002888}
2889#else
2890static int __net_init xfrm_statistics_init(struct net *net)
2891{
2892 return 0;
2893}
2894
2895static void xfrm_statistics_fini(struct net *net)
2896{
2897}
Masahide NAKAMURA558f82e2007-12-20 20:42:57 -08002898#endif
2899
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002900static int __net_init xfrm_policy_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901{
David S. Miller2518c7c2006-08-24 04:45:07 -07002902 unsigned int hmask, sz;
2903 int dir;
2904
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002905 if (net_eq(net, &init_net))
2906 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 sizeof(struct xfrm_dst),
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07002908 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09002909 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910
David S. Miller2518c7c2006-08-24 04:45:07 -07002911 hmask = 8 - 1;
2912 sz = (hmask+1) * sizeof(struct hlist_head);
2913
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002914 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2915 if (!net->xfrm.policy_byidx)
2916 goto out_byidx;
Alexey Dobriyan8100bea2008-11-25 17:22:58 -08002917 net->xfrm.policy_idx_hmask = hmask;
David S. Miller2518c7c2006-08-24 04:45:07 -07002918
Herbert Xu53c2e282014-11-13 17:09:49 +08002919 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
David S. Miller2518c7c2006-08-24 04:45:07 -07002920 struct xfrm_policy_hash *htab;
2921
Alexey Dobriyandc2caba2008-11-25 17:24:15 -08002922 net->xfrm.policy_count[dir] = 0;
Herbert Xu53c2e282014-11-13 17:09:49 +08002923 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
Alexey Dobriyan8b18f8e2008-11-25 17:23:26 -08002924 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
David S. Miller2518c7c2006-08-24 04:45:07 -07002925
Alexey Dobriyana35f6c52008-11-25 17:23:48 -08002926 htab = &net->xfrm.policy_bydst[dir];
David S. Miller44e36b42006-08-24 04:50:50 -07002927 htab->table = xfrm_hash_alloc(sz);
David S. Miller2518c7c2006-08-24 04:45:07 -07002928 if (!htab->table)
Alexey Dobriyana35f6c52008-11-25 17:23:48 -08002929 goto out_bydst;
2930 htab->hmask = hmask;
Christophe Gouaultb58555f2014-08-29 16:16:04 +02002931 htab->dbits4 = 32;
2932 htab->sbits4 = 32;
2933 htab->dbits6 = 128;
2934 htab->sbits6 = 128;
David S. Miller2518c7c2006-08-24 04:45:07 -07002935 }
Christophe Gouault880a6fa2014-08-29 16:16:05 +02002936 net->xfrm.policy_hthresh.lbits4 = 32;
2937 net->xfrm.policy_hthresh.rbits4 = 32;
2938 net->xfrm.policy_hthresh.lbits6 = 128;
2939 net->xfrm.policy_hthresh.rbits6 = 128;
2940
2941 seqlock_init(&net->xfrm.policy_hthresh.lock);
David S. Miller2518c7c2006-08-24 04:45:07 -07002942
Alexey Dobriyanadfcf0b2008-11-25 17:22:11 -08002943 INIT_LIST_HEAD(&net->xfrm.policy_all);
Alexey Dobriyan66caf622008-11-25 17:28:57 -08002944 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
Christophe Gouault880a6fa2014-08-29 16:16:05 +02002945 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002946 return 0;
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002947
Alexey Dobriyana35f6c52008-11-25 17:23:48 -08002948out_bydst:
2949 for (dir--; dir >= 0; dir--) {
2950 struct xfrm_policy_hash *htab;
2951
2952 htab = &net->xfrm.policy_bydst[dir];
2953 xfrm_hash_free(htab->table, sz);
2954 }
2955 xfrm_hash_free(net->xfrm.policy_byidx, sz);
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002956out_byidx:
2957 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958}
2959
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002960static void xfrm_policy_fini(struct net *net)
2961{
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002962 unsigned int sz;
Alexey Dobriyan8b18f8e2008-11-25 17:23:26 -08002963 int dir;
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002964
Alexey Dobriyan7c2776e2008-11-25 17:57:44 -08002965 flush_work(&net->xfrm.policy_hash_work);
2966#ifdef CONFIG_XFRM_SUB_POLICY
Tetsuo Handa2e710292014-04-22 21:48:30 +09002967 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
Alexey Dobriyan7c2776e2008-11-25 17:57:44 -08002968#endif
Tetsuo Handa2e710292014-04-22 21:48:30 +09002969 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
Alexey Dobriyan7c2776e2008-11-25 17:57:44 -08002970
Alexey Dobriyanadfcf0b2008-11-25 17:22:11 -08002971 WARN_ON(!list_empty(&net->xfrm.policy_all));
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002972
Herbert Xu53c2e282014-11-13 17:09:49 +08002973 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
Alexey Dobriyana35f6c52008-11-25 17:23:48 -08002974 struct xfrm_policy_hash *htab;
2975
Alexey Dobriyan8b18f8e2008-11-25 17:23:26 -08002976 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
Alexey Dobriyana35f6c52008-11-25 17:23:48 -08002977
2978 htab = &net->xfrm.policy_bydst[dir];
Michal Kubecek5b653b22013-01-18 16:03:48 +01002979 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
Alexey Dobriyana35f6c52008-11-25 17:23:48 -08002980 WARN_ON(!hlist_empty(htab->table));
2981 xfrm_hash_free(htab->table, sz);
Alexey Dobriyan8b18f8e2008-11-25 17:23:26 -08002982 }
2983
Alexey Dobriyan8100bea2008-11-25 17:22:58 -08002984 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
Alexey Dobriyan93b851c2008-11-25 17:22:35 -08002985 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2986 xfrm_hash_free(net->xfrm.policy_byidx, sz);
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002987}
2988
2989static int __net_init xfrm_net_init(struct net *net)
2990{
2991 int rv;
2992
Florian Westphalc2822222017-02-08 11:52:29 +01002993 /* Initialize the per-net locks here */
2994 spin_lock_init(&net->xfrm.xfrm_state_lock);
2995 spin_lock_init(&net->xfrm.xfrm_policy_lock);
2996 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2997
Alexey Dobriyan59c99402008-11-25 17:59:52 -08002998 rv = xfrm_statistics_init(net);
2999 if (rv < 0)
3000 goto out_statistics;
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003001 rv = xfrm_state_init(net);
3002 if (rv < 0)
3003 goto out_state;
3004 rv = xfrm_policy_init(net);
3005 if (rv < 0)
3006 goto out_policy;
Alexey Dobriyanb27aead2008-11-25 18:00:48 -08003007 rv = xfrm_sysctl_init(net);
3008 if (rv < 0)
3009 goto out_sysctl;
Fan Du283bc9f2013-11-07 17:47:50 +08003010
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003011 return 0;
3012
Alexey Dobriyanb27aead2008-11-25 18:00:48 -08003013out_sysctl:
3014 xfrm_policy_fini(net);
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003015out_policy:
3016 xfrm_state_fini(net);
3017out_state:
Alexey Dobriyan59c99402008-11-25 17:59:52 -08003018 xfrm_statistics_fini(net);
3019out_statistics:
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003020 return rv;
3021}
3022
3023static void __net_exit xfrm_net_exit(struct net *net)
3024{
Alexey Dobriyanb27aead2008-11-25 18:00:48 -08003025 xfrm_sysctl_fini(net);
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003026 xfrm_policy_fini(net);
3027 xfrm_state_fini(net);
Alexey Dobriyan59c99402008-11-25 17:59:52 -08003028 xfrm_statistics_fini(net);
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003029}
3030
3031static struct pernet_operations __net_initdata xfrm_net_ops = {
3032 .init = xfrm_net_init,
3033 .exit = xfrm_net_exit,
3034};
3035
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036void __init xfrm_init(void)
3037{
Florian Westphalec30d782017-07-17 13:57:27 +02003038 int i;
3039
3040 xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work),
3041 GFP_KERNEL);
3042 BUG_ON(!xfrm_pcpu_work);
3043
3044 for (i = 0; i < NR_CPUS; i++)
3045 INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn);
3046
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08003047 register_pernet_subsys(&xfrm_net_ops);
Kirill Tkhaie9a441b2018-03-29 17:03:25 +03003048 xfrm_dev_init();
Florian Westphal30846092016-08-11 15:17:54 +02003049 seqcount_init(&xfrm_policy_hash_generation);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 xfrm_input_init();
Steffen Klassertf203b762018-06-12 14:07:12 +02003051
3052 RCU_INIT_POINTER(xfrm_if_cb, NULL);
3053 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054}
3055
Joy Lattenab5f5e82007-09-17 11:51:22 -07003056#ifdef CONFIG_AUDITSYSCALL
Ilpo Järvinen1486cbd72008-01-12 03:20:03 -08003057static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
3058 struct audit_buffer *audit_buf)
Joy Lattenab5f5e82007-09-17 11:51:22 -07003059{
Paul Moore875179f2007-12-01 23:27:18 +11003060 struct xfrm_sec_ctx *ctx = xp->security;
3061 struct xfrm_selector *sel = &xp->selector;
Joy Lattenab5f5e82007-09-17 11:51:22 -07003062
Paul Moore875179f2007-12-01 23:27:18 +11003063 if (ctx)
3064 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3065 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3066
Weilong Chen9b7a7872013-12-24 09:43:46 +08003067 switch (sel->family) {
Joy Lattenab5f5e82007-09-17 11:51:22 -07003068 case AF_INET:
Harvey Harrison21454aa2008-10-31 00:54:56 -07003069 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
Paul Moore875179f2007-12-01 23:27:18 +11003070 if (sel->prefixlen_s != 32)
3071 audit_log_format(audit_buf, " src_prefixlen=%d",
3072 sel->prefixlen_s);
Harvey Harrison21454aa2008-10-31 00:54:56 -07003073 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
Paul Moore875179f2007-12-01 23:27:18 +11003074 if (sel->prefixlen_d != 32)
3075 audit_log_format(audit_buf, " dst_prefixlen=%d",
3076 sel->prefixlen_d);
Joy Lattenab5f5e82007-09-17 11:51:22 -07003077 break;
3078 case AF_INET6:
Harvey Harrison5b095d9892008-10-29 12:52:50 -07003079 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
Paul Moore875179f2007-12-01 23:27:18 +11003080 if (sel->prefixlen_s != 128)
3081 audit_log_format(audit_buf, " src_prefixlen=%d",
3082 sel->prefixlen_s);
Harvey Harrison5b095d9892008-10-29 12:52:50 -07003083 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
Paul Moore875179f2007-12-01 23:27:18 +11003084 if (sel->prefixlen_d != 128)
3085 audit_log_format(audit_buf, " dst_prefixlen=%d",
3086 sel->prefixlen_d);
Joy Lattenab5f5e82007-09-17 11:51:22 -07003087 break;
3088 }
3089}
3090
Tetsuo Handa2e710292014-04-22 21:48:30 +09003091void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
Joy Lattenab5f5e82007-09-17 11:51:22 -07003092{
3093 struct audit_buffer *audit_buf;
Joy Lattenab5f5e82007-09-17 11:51:22 -07003094
Paul Mooreafeb14b2007-12-21 14:58:11 -08003095 audit_buf = xfrm_audit_start("SPD-add");
Joy Lattenab5f5e82007-09-17 11:51:22 -07003096 if (audit_buf == NULL)
3097 return;
Tetsuo Handa2e710292014-04-22 21:48:30 +09003098 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
Paul Mooreafeb14b2007-12-21 14:58:11 -08003099 audit_log_format(audit_buf, " res=%u", result);
Joy Lattenab5f5e82007-09-17 11:51:22 -07003100 xfrm_audit_common_policyinfo(xp, audit_buf);
3101 audit_log_end(audit_buf);
3102}
3103EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3104
Paul Moore68277ac2007-12-20 20:49:33 -08003105void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
Tetsuo Handa2e710292014-04-22 21:48:30 +09003106 bool task_valid)
Joy Lattenab5f5e82007-09-17 11:51:22 -07003107{
3108 struct audit_buffer *audit_buf;
Joy Lattenab5f5e82007-09-17 11:51:22 -07003109
Paul Mooreafeb14b2007-12-21 14:58:11 -08003110 audit_buf = xfrm_audit_start("SPD-delete");
Joy Lattenab5f5e82007-09-17 11:51:22 -07003111 if (audit_buf == NULL)
3112 return;
Tetsuo Handa2e710292014-04-22 21:48:30 +09003113 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
Paul Mooreafeb14b2007-12-21 14:58:11 -08003114 audit_log_format(audit_buf, " res=%u", result);
Joy Lattenab5f5e82007-09-17 11:51:22 -07003115 xfrm_audit_common_policyinfo(xp, audit_buf);
3116 audit_log_end(audit_buf);
3117}
3118EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3119#endif
3120
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003121#ifdef CONFIG_XFRM_MIGRATE
David S. Millerbc9b35a2012-05-15 15:04:57 -04003122static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3123 const struct xfrm_selector *sel_tgt)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003124{
3125 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3126 if (sel_tgt->family == sel_cmp->family &&
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00003127 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3128 sel_cmp->family) &&
3129 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3130 sel_cmp->family) &&
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003131 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3132 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
David S. Millerbc9b35a2012-05-15 15:04:57 -04003133 return true;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003134 }
3135 } else {
3136 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
David S. Millerbc9b35a2012-05-15 15:04:57 -04003137 return true;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003138 }
3139 }
David S. Millerbc9b35a2012-05-15 15:04:57 -04003140 return false;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003141}
3142
Weilong Chen3e94c2d2013-12-24 09:43:47 +08003143static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3144 u8 dir, u8 type, struct net *net)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003145{
3146 struct xfrm_policy *pol, *ret = NULL;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003147 struct hlist_head *chain;
3148 u32 priority = ~0U;
3149
Florian Westphal9d0380d2016-08-11 15:17:59 +02003150 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
Fan Du8d549c42013-11-07 17:47:49 +08003151 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
Sasha Levinb67bfe02013-02-27 17:06:00 -08003152 hlist_for_each_entry(pol, chain, bydst) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003153 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3154 pol->type == type) {
3155 ret = pol;
3156 priority = ret->priority;
3157 break;
3158 }
3159 }
Fan Du8d549c42013-11-07 17:47:49 +08003160 chain = &net->xfrm.policy_inexact[dir];
Sasha Levinb67bfe02013-02-27 17:06:00 -08003161 hlist_for_each_entry(pol, chain, bydst) {
Li RongQing8faf4912015-05-14 11:16:59 +08003162 if ((pol->priority >= priority) && ret)
3163 break;
3164
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003165 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
Li RongQing8faf4912015-05-14 11:16:59 +08003166 pol->type == type) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003167 ret = pol;
3168 break;
3169 }
3170 }
3171
Li RongQing586f2eb2015-04-30 17:13:41 +08003172 xfrm_pol_hold(ret);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003173
Florian Westphal9d0380d2016-08-11 15:17:59 +02003174 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003175
3176 return ret;
3177}
3178
David S. Millerdd701752011-02-24 00:21:08 -05003179static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003180{
3181 int match = 0;
3182
3183 if (t->mode == m->mode && t->id.proto == m->proto &&
3184 (m->reqid == 0 || t->reqid == m->reqid)) {
3185 switch (t->mode) {
3186 case XFRM_MODE_TUNNEL:
3187 case XFRM_MODE_BEET:
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00003188 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3189 m->old_family) &&
3190 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3191 m->old_family)) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003192 match = 1;
3193 }
3194 break;
3195 case XFRM_MODE_TRANSPORT:
3196 /* in case of transport mode, template does not store
3197 any IP addresses, hence we just compare mode and
3198 protocol */
3199 match = 1;
3200 break;
3201 default:
3202 break;
3203 }
3204 }
3205 return match;
3206}
3207
3208/* update endpoint address(es) of template(s) */
3209static int xfrm_policy_migrate(struct xfrm_policy *pol,
3210 struct xfrm_migrate *m, int num_migrate)
3211{
3212 struct xfrm_migrate *mp;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003213 int i, j, n = 0;
3214
3215 write_lock_bh(&pol->lock);
Herbert Xu12a169e2008-10-01 07:03:24 -07003216 if (unlikely(pol->walk.dead)) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003217 /* target policy has been deleted */
3218 write_unlock_bh(&pol->lock);
3219 return -ENOENT;
3220 }
3221
3222 for (i = 0; i < pol->xfrm_nr; i++) {
3223 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3224 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3225 continue;
3226 n++;
Herbert Xu1bfcb102007-10-17 21:31:50 -07003227 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3228 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003229 continue;
3230 /* update endpoints */
3231 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3232 sizeof(pol->xfrm_vec[i].id.daddr));
3233 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3234 sizeof(pol->xfrm_vec[i].saddr));
3235 pol->xfrm_vec[i].encap_family = mp->new_family;
3236 /* flush bundles */
Timo Teräs80c802f2010-04-07 00:30:05 +00003237 atomic_inc(&pol->genid);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003238 }
3239 }
3240
3241 write_unlock_bh(&pol->lock);
3242
3243 if (!n)
3244 return -ENODATA;
3245
3246 return 0;
3247}
3248
David S. Millerdd701752011-02-24 00:21:08 -05003249static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003250{
3251 int i, j;
3252
3253 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3254 return -EINVAL;
3255
3256 for (i = 0; i < num_migrate; i++) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003257 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3258 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3259 return -EINVAL;
3260
3261 /* check if there is any duplicated entry */
3262 for (j = i + 1; j < num_migrate; j++) {
3263 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3264 sizeof(m[i].old_daddr)) &&
3265 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3266 sizeof(m[i].old_saddr)) &&
3267 m[i].proto == m[j].proto &&
3268 m[i].mode == m[j].mode &&
3269 m[i].reqid == m[j].reqid &&
3270 m[i].old_family == m[j].old_family)
3271 return -EINVAL;
3272 }
3273 }
3274
3275 return 0;
3276}
3277
David S. Millerb4b7c0b2011-02-24 00:35:06 -05003278int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
Arnaud Ebalard13c1d182008-10-05 13:33:42 -07003279 struct xfrm_migrate *m, int num_migrate,
Antony Antony4ab47d42017-06-06 12:12:13 +02003280 struct xfrm_kmaddress *k, struct net *net,
3281 struct xfrm_encap_tmpl *encap)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003282{
3283 int i, err, nx_cur = 0, nx_new = 0;
3284 struct xfrm_policy *pol = NULL;
3285 struct xfrm_state *x, *xc;
3286 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3287 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3288 struct xfrm_migrate *mp;
3289
Vladis Dronov7bab0962017-08-02 19:50:14 +02003290 /* Stage 0 - sanity checks */
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003291 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3292 goto out;
3293
Vladis Dronov7bab0962017-08-02 19:50:14 +02003294 if (dir >= XFRM_POLICY_MAX) {
3295 err = -EINVAL;
3296 goto out;
3297 }
3298
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003299 /* Stage 1 - find policy */
Fan Du8d549c42013-11-07 17:47:49 +08003300 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003301 err = -ENOENT;
3302 goto out;
3303 }
3304
3305 /* Stage 2 - find and update state(s) */
3306 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
Fan Du283bc9f2013-11-07 17:47:50 +08003307 if ((x = xfrm_migrate_state_find(mp, net))) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003308 x_cur[nx_cur] = x;
3309 nx_cur++;
Antony Antony4ab47d42017-06-06 12:12:13 +02003310 xc = xfrm_state_migrate(x, mp, encap);
3311 if (xc) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003312 x_new[nx_new] = xc;
3313 nx_new++;
3314 } else {
3315 err = -ENODATA;
3316 goto restore_state;
3317 }
3318 }
3319 }
3320
3321 /* Stage 3 - update policy */
3322 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3323 goto restore_state;
3324
3325 /* Stage 4 - delete old state(s) */
3326 if (nx_cur) {
3327 xfrm_states_put(x_cur, nx_cur);
3328 xfrm_states_delete(x_cur, nx_cur);
3329 }
3330
3331 /* Stage 5 - announce */
Antony Antony8bafd732017-06-06 12:12:14 +02003332 km_migrate(sel, dir, type, m, num_migrate, k, encap);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003333
3334 xfrm_pol_put(pol);
3335
3336 return 0;
3337out:
3338 return err;
3339
3340restore_state:
3341 if (pol)
3342 xfrm_pol_put(pol);
3343 if (nx_cur)
3344 xfrm_states_put(x_cur, nx_cur);
3345 if (nx_new)
3346 xfrm_states_delete(x_new, nx_new);
3347
3348 return err;
3349}
David S. Millere610e672007-02-08 13:29:15 -08003350EXPORT_SYMBOL(xfrm_migrate);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08003351#endif