Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-3.10.git] / net / l2tp / l2tp_core.c
1 /*
2  * L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:     Martijn van Oosterhout <kleptog@svana.org>
10  *              James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *              Michal Ostrowski <mostrows@speakeasy.net>
13  *              Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *              David S. Miller (davem@redhat.com)
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/kernel.h>
30 #include <linux/spinlock.h>
31 #include <linux/kthread.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/errno.h>
35 #include <linux/jiffies.h>
36
37 #include <linux/netdevice.h>
38 #include <linux/net.h>
39 #include <linux/inetdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/udp.h>
45 #include <linux/l2tp.h>
46 #include <linux/hash.h>
47 #include <linux/sort.h>
48 #include <linux/file.h>
49 #include <linux/nsproxy.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
52 #include <net/dst.h>
53 #include <net/ip.h>
54 #include <net/udp.h>
55 #include <net/inet_common.h>
56 #include <net/xfrm.h>
57 #include <net/protocol.h>
58 #include <net/inet6_connection_sock.h>
59 #include <net/inet_ecn.h>
60 #include <net/ip6_route.h>
61 #include <net/ip6_checksum.h>
62
63 #include <asm/byteorder.h>
64 #include <linux/atomic.h>
65
66 #include "l2tp_core.h"
67
68 #define L2TP_DRV_VERSION        "V2.0"
69
70 /* L2TP header constants */
71 #define L2TP_HDRFLAG_T     0x8000
72 #define L2TP_HDRFLAG_L     0x4000
73 #define L2TP_HDRFLAG_S     0x0800
74 #define L2TP_HDRFLAG_O     0x0200
75 #define L2TP_HDRFLAG_P     0x0100
76
77 #define L2TP_HDR_VER_MASK  0x000F
78 #define L2TP_HDR_VER_2     0x0002
79 #define L2TP_HDR_VER_3     0x0003
80
81 /* L2TPv3 default L2-specific sublayer */
82 #define L2TP_SLFLAG_S      0x40000000
83 #define L2TP_SL_SEQ_MASK   0x00ffffff
84
85 #define L2TP_HDR_SIZE_SEQ               10
86 #define L2TP_HDR_SIZE_NOSEQ             6
87
88 /* Default trace flags */
89 #define L2TP_DEFAULT_DEBUG_FLAGS        0
90
91 /* Private data stored for received packets in the skb.
92  */
93 struct l2tp_skb_cb {
94         u32                     ns;
95         u16                     has_seq;
96         u16                     length;
97         unsigned long           expires;
98 };
99
100 #define L2TP_SKB_CB(skb)        ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102 static atomic_t l2tp_tunnel_count;
103 static atomic_t l2tp_session_count;
104 static struct workqueue_struct *l2tp_wq;
105
106 /* per-net private data for this module */
107 static unsigned int l2tp_net_id;
108 struct l2tp_net {
109         struct list_head l2tp_tunnel_list;
110         spinlock_t l2tp_tunnel_list_lock;
111         struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
112         spinlock_t l2tp_session_hlist_lock;
113 };
114
115 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
118
119 static inline struct l2tp_net *l2tp_pernet(struct net *net)
120 {
121         BUG_ON(!net);
122
123         return net_generic(net, l2tp_net_id);
124 }
125
126 /* Tunnel reference counts. Incremented per session that is added to
127  * the tunnel.
128  */
129 static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
130 {
131         atomic_inc(&tunnel->ref_count);
132 }
133
134 static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
135 {
136         if (atomic_dec_and_test(&tunnel->ref_count))
137                 l2tp_tunnel_free(tunnel);
138 }
139 #ifdef L2TP_REFCNT_DEBUG
140 #define l2tp_tunnel_inc_refcount(_t)                                    \
141 do {                                                                    \
142         pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n",        \
143                  __func__, __LINE__, (_t)->name,                        \
144                  atomic_read(&_t->ref_count));                          \
145         l2tp_tunnel_inc_refcount_1(_t);                                 \
146 } while (0)
147 #define l2tp_tunnel_dec_refcount(_t)
148 do {                                                                    \
149         pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n",        \
150                  __func__, __LINE__, (_t)->name,                        \
151                  atomic_read(&_t->ref_count));                          \
152         l2tp_tunnel_dec_refcount_1(_t);                                 \
153 } while (0)
154 #else
155 #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
156 #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
157 #endif
158
159 /* Session hash global list for L2TPv3.
160  * The session_id SHOULD be random according to RFC3931, but several
161  * L2TP implementations use incrementing session_ids.  So we do a real
162  * hash on the session_id, rather than a simple bitmask.
163  */
164 static inline struct hlist_head *
165 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
166 {
167         return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
168
169 }
170
171 /* Lookup the tunnel socket, possibly involving the fs code if the socket is
172  * owned by userspace.  A struct sock returned from this function must be
173  * released using l2tp_tunnel_sock_put once you're done with it.
174  */
175 struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
176 {
177         int err = 0;
178         struct socket *sock = NULL;
179         struct sock *sk = NULL;
180
181         if (!tunnel)
182                 goto out;
183
184         if (tunnel->fd >= 0) {
185                 /* Socket is owned by userspace, who might be in the process
186                  * of closing it.  Look the socket up using the fd to ensure
187                  * consistency.
188                  */
189                 sock = sockfd_lookup(tunnel->fd, &err);
190                 if (sock)
191                         sk = sock->sk;
192         } else {
193                 /* Socket is owned by kernelspace */
194                 sk = tunnel->sock;
195         }
196
197 out:
198         return sk;
199 }
200 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202 /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203 void l2tp_tunnel_sock_put(struct sock *sk)
204 {
205         struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206         if (tunnel) {
207                 if (tunnel->fd >= 0) {
208                         /* Socket is owned by userspace */
209                         sockfd_put(sk->sk_socket);
210                 }
211                 sock_put(sk);
212         }
213 }
214 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215
216 /* Lookup a session by id in the global session list
217  */
218 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
219 {
220         struct l2tp_net *pn = l2tp_pernet(net);
221         struct hlist_head *session_list =
222                 l2tp_session_id_hash_2(pn, session_id);
223         struct l2tp_session *session;
224         struct hlist_node *walk;
225
226         rcu_read_lock_bh();
227         hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
228                 if (session->session_id == session_id) {
229                         rcu_read_unlock_bh();
230                         return session;
231                 }
232         }
233         rcu_read_unlock_bh();
234
235         return NULL;
236 }
237
238 /* Session hash list.
239  * The session_id SHOULD be random according to RFC2661, but several
240  * L2TP implementations (Cisco and Microsoft) use incrementing
241  * session_ids.  So we do a real hash on the session_id, rather than a
242  * simple bitmask.
243  */
244 static inline struct hlist_head *
245 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
246 {
247         return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
248 }
249
250 /* Lookup a session by id
251  */
252 struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
253 {
254         struct hlist_head *session_list;
255         struct l2tp_session *session;
256         struct hlist_node *walk;
257
258         /* In L2TPv3, session_ids are unique over all tunnels and we
259          * sometimes need to look them up before we know the
260          * tunnel.
261          */
262         if (tunnel == NULL)
263                 return l2tp_session_find_2(net, session_id);
264
265         session_list = l2tp_session_id_hash(tunnel, session_id);
266         read_lock_bh(&tunnel->hlist_lock);
267         hlist_for_each_entry(session, walk, session_list, hlist) {
268                 if (session->session_id == session_id) {
269                         read_unlock_bh(&tunnel->hlist_lock);
270                         return session;
271                 }
272         }
273         read_unlock_bh(&tunnel->hlist_lock);
274
275         return NULL;
276 }
277 EXPORT_SYMBOL_GPL(l2tp_session_find);
278
279 struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
280 {
281         int hash;
282         struct hlist_node *walk;
283         struct l2tp_session *session;
284         int count = 0;
285
286         read_lock_bh(&tunnel->hlist_lock);
287         for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
288                 hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
289                         if (++count > nth) {
290                                 read_unlock_bh(&tunnel->hlist_lock);
291                                 return session;
292                         }
293                 }
294         }
295
296         read_unlock_bh(&tunnel->hlist_lock);
297
298         return NULL;
299 }
300 EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
301
302 /* Lookup a session by interface name.
303  * This is very inefficient but is only used by management interfaces.
304  */
305 struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
306 {
307         struct l2tp_net *pn = l2tp_pernet(net);
308         int hash;
309         struct hlist_node *walk;
310         struct l2tp_session *session;
311
312         rcu_read_lock_bh();
313         for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314                 hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
315                         if (!strcmp(session->ifname, ifname)) {
316                                 rcu_read_unlock_bh();
317                                 return session;
318                         }
319                 }
320         }
321
322         rcu_read_unlock_bh();
323
324         return NULL;
325 }
326 EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
327
328 /* Lookup a tunnel by id
329  */
330 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
331 {
332         struct l2tp_tunnel *tunnel;
333         struct l2tp_net *pn = l2tp_pernet(net);
334
335         rcu_read_lock_bh();
336         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
337                 if (tunnel->tunnel_id == tunnel_id) {
338                         rcu_read_unlock_bh();
339                         return tunnel;
340                 }
341         }
342         rcu_read_unlock_bh();
343
344         return NULL;
345 }
346 EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
347
348 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
349 {
350         struct l2tp_net *pn = l2tp_pernet(net);
351         struct l2tp_tunnel *tunnel;
352         int count = 0;
353
354         rcu_read_lock_bh();
355         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
356                 if (++count > nth) {
357                         rcu_read_unlock_bh();
358                         return tunnel;
359                 }
360         }
361
362         rcu_read_unlock_bh();
363
364         return NULL;
365 }
366 EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
367
368 /*****************************************************************************
369  * Receive data handling
370  *****************************************************************************/
371
372 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
373  * number.
374  */
375 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
376 {
377         struct sk_buff *skbp;
378         struct sk_buff *tmp;
379         u32 ns = L2TP_SKB_CB(skb)->ns;
380         struct l2tp_stats *sstats;
381
382         spin_lock_bh(&session->reorder_q.lock);
383         sstats = &session->stats;
384         skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
385                 if (L2TP_SKB_CB(skbp)->ns > ns) {
386                         __skb_queue_before(&session->reorder_q, skbp, skb);
387                         l2tp_dbg(session, L2TP_MSG_SEQ,
388                                  "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
389                                  session->name, ns, L2TP_SKB_CB(skbp)->ns,
390                                  skb_queue_len(&session->reorder_q));
391                         u64_stats_update_begin(&sstats->syncp);
392                         sstats->rx_oos_packets++;
393                         u64_stats_update_end(&sstats->syncp);
394                         goto out;
395                 }
396         }
397
398         __skb_queue_tail(&session->reorder_q, skb);
399
400 out:
401         spin_unlock_bh(&session->reorder_q.lock);
402 }
403
404 /* Dequeue a single skb.
405  */
406 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
407 {
408         struct l2tp_tunnel *tunnel = session->tunnel;
409         int length = L2TP_SKB_CB(skb)->length;
410         struct l2tp_stats *tstats, *sstats;
411
412         /* We're about to requeue the skb, so return resources
413          * to its current owner (a socket receive buffer).
414          */
415         skb_orphan(skb);
416
417         tstats = &tunnel->stats;
418         u64_stats_update_begin(&tstats->syncp);
419         sstats = &session->stats;
420         u64_stats_update_begin(&sstats->syncp);
421         tstats->rx_packets++;
422         tstats->rx_bytes += length;
423         sstats->rx_packets++;
424         sstats->rx_bytes += length;
425         u64_stats_update_end(&tstats->syncp);
426         u64_stats_update_end(&sstats->syncp);
427
428         if (L2TP_SKB_CB(skb)->has_seq) {
429                 /* Bump our Nr */
430                 session->nr++;
431                 if (tunnel->version == L2TP_HDR_VER_2)
432                         session->nr &= 0xffff;
433                 else
434                         session->nr &= 0xffffff;
435
436                 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
437                          session->name, session->nr);
438         }
439
440         /* call private receive handler */
441         if (session->recv_skb != NULL)
442                 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
443         else
444                 kfree_skb(skb);
445
446         if (session->deref)
447                 (*session->deref)(session);
448 }
449
450 /* Dequeue skbs from the session's reorder_q, subject to packet order.
451  * Skbs that have been in the queue for too long are simply discarded.
452  */
453 static void l2tp_recv_dequeue(struct l2tp_session *session)
454 {
455         struct sk_buff *skb;
456         struct sk_buff *tmp;
457         struct l2tp_stats *sstats;
458
459         /* If the pkt at the head of the queue has the nr that we
460          * expect to send up next, dequeue it and any other
461          * in-sequence packets behind it.
462          */
463 start:
464         spin_lock_bh(&session->reorder_q.lock);
465         sstats = &session->stats;
466         skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
467                 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
468                         u64_stats_update_begin(&sstats->syncp);
469                         sstats->rx_seq_discards++;
470                         sstats->rx_errors++;
471                         u64_stats_update_end(&sstats->syncp);
472                         l2tp_dbg(session, L2TP_MSG_SEQ,
473                                  "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
474                                  session->name, L2TP_SKB_CB(skb)->ns,
475                                  L2TP_SKB_CB(skb)->length, session->nr,
476                                  skb_queue_len(&session->reorder_q));
477                         session->reorder_skip = 1;
478                         __skb_unlink(skb, &session->reorder_q);
479                         kfree_skb(skb);
480                         if (session->deref)
481                                 (*session->deref)(session);
482                         continue;
483                 }
484
485                 if (L2TP_SKB_CB(skb)->has_seq) {
486                         if (session->reorder_skip) {
487                                 l2tp_dbg(session, L2TP_MSG_SEQ,
488                                          "%s: advancing nr to next pkt: %u -> %u",
489                                          session->name, session->nr,
490                                          L2TP_SKB_CB(skb)->ns);
491                                 session->reorder_skip = 0;
492                                 session->nr = L2TP_SKB_CB(skb)->ns;
493                         }
494                         if (L2TP_SKB_CB(skb)->ns != session->nr) {
495                                 l2tp_dbg(session, L2TP_MSG_SEQ,
496                                          "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
497                                          session->name, L2TP_SKB_CB(skb)->ns,
498                                          L2TP_SKB_CB(skb)->length, session->nr,
499                                          skb_queue_len(&session->reorder_q));
500                                 goto out;
501                         }
502                 }
503                 __skb_unlink(skb, &session->reorder_q);
504
505                 /* Process the skb. We release the queue lock while we
506                  * do so to let other contexts process the queue.
507                  */
508                 spin_unlock_bh(&session->reorder_q.lock);
509                 l2tp_recv_dequeue_skb(session, skb);
510                 goto start;
511         }
512
513 out:
514         spin_unlock_bh(&session->reorder_q.lock);
515 }
516
517 static inline int l2tp_verify_udp_checksum(struct sock *sk,
518                                            struct sk_buff *skb)
519 {
520         struct udphdr *uh = udp_hdr(skb);
521         u16 ulen = ntohs(uh->len);
522         __wsum psum;
523
524         if (sk->sk_no_check || skb_csum_unnecessary(skb))
525                 return 0;
526
527 #if IS_ENABLED(CONFIG_IPV6)
528         if (sk->sk_family == PF_INET6) {
529                 if (!uh->check) {
530                         LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
531                         return 1;
532                 }
533                 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
534                     !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
535                                      &ipv6_hdr(skb)->daddr, ulen,
536                                      IPPROTO_UDP, skb->csum)) {
537                         skb->ip_summed = CHECKSUM_UNNECESSARY;
538                         return 0;
539                 }
540                 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
541                                                          &ipv6_hdr(skb)->daddr,
542                                                          skb->len, IPPROTO_UDP,
543                                                          0));
544         } else
545 #endif
546         {
547                 struct inet_sock *inet;
548                 if (!uh->check)
549                         return 0;
550                 inet = inet_sk(sk);
551                 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
552                                           ulen, IPPROTO_UDP, 0);
553
554                 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
555                     !csum_fold(csum_add(psum, skb->csum)))
556                         return 0;
557                 skb->csum = psum;
558         }
559
560         return __skb_checksum_complete(skb);
561 }
562
563 /* Do receive processing of L2TP data frames. We handle both L2TPv2
564  * and L2TPv3 data frames here.
565  *
566  * L2TPv2 Data Message Header
567  *
568  *  0                   1                   2                   3
569  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
570  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
571  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
572  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
573  * |           Tunnel ID           |           Session ID          |
574  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
575  * |             Ns (opt)          |             Nr (opt)          |
576  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
577  * |      Offset Size (opt)        |    Offset pad... (opt)
578  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
579  *
580  * Data frames are marked by T=0. All other fields are the same as
581  * those in L2TP control frames.
582  *
583  * L2TPv3 Data Message Header
584  *
585  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
586  * |                      L2TP Session Header                      |
587  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
588  * |                      L2-Specific Sublayer                     |
589  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
590  * |                        Tunnel Payload                      ...
591  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
592  *
593  * L2TPv3 Session Header Over IP
594  *
595  *  0                   1                   2                   3
596  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
597  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
598  * |                           Session ID                          |
599  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
600  * |               Cookie (optional, maximum 64 bits)...
601  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
602  *                                                                 |
603  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
604  *
605  * L2TPv3 L2-Specific Sublayer Format
606  *
607  *  0                   1                   2                   3
608  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
609  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
610  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
611  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
612  *
613  * Cookie value, sublayer format and offset (pad) are negotiated with
614  * the peer when the session is set up. Unlike L2TPv2, we do not need
615  * to parse the packet header to determine if optional fields are
616  * present.
617  *
618  * Caller must already have parsed the frame and determined that it is
619  * a data (not control) frame before coming here. Fields up to the
620  * session-id have already been parsed and ptr points to the data
621  * after the session-id.
622  */
623 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
624                       unsigned char *ptr, unsigned char *optr, u16 hdrflags,
625                       int length, int (*payload_hook)(struct sk_buff *skb))
626 {
627         struct l2tp_tunnel *tunnel = session->tunnel;
628         int offset;
629         u32 ns, nr;
630         struct l2tp_stats *sstats = &session->stats;
631
632         /* The ref count is increased since we now hold a pointer to
633          * the session. Take care to decrement the refcnt when exiting
634          * this function from now on...
635          */
636         l2tp_session_inc_refcount(session);
637         if (session->ref)
638                 (*session->ref)(session);
639
640         /* Parse and check optional cookie */
641         if (session->peer_cookie_len > 0) {
642                 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
643                         l2tp_info(tunnel, L2TP_MSG_DATA,
644                                   "%s: cookie mismatch (%u/%u). Discarding.\n",
645                                   tunnel->name, tunnel->tunnel_id,
646                                   session->session_id);
647                         u64_stats_update_begin(&sstats->syncp);
648                         sstats->rx_cookie_discards++;
649                         u64_stats_update_end(&sstats->syncp);
650                         goto discard;
651                 }
652                 ptr += session->peer_cookie_len;
653         }
654
655         /* Handle the optional sequence numbers. Sequence numbers are
656          * in different places for L2TPv2 and L2TPv3.
657          *
658          * If we are the LAC, enable/disable sequence numbers under
659          * the control of the LNS.  If no sequence numbers present but
660          * we were expecting them, discard frame.
661          */
662         ns = nr = 0;
663         L2TP_SKB_CB(skb)->has_seq = 0;
664         if (tunnel->version == L2TP_HDR_VER_2) {
665                 if (hdrflags & L2TP_HDRFLAG_S) {
666                         ns = ntohs(*(__be16 *) ptr);
667                         ptr += 2;
668                         nr = ntohs(*(__be16 *) ptr);
669                         ptr += 2;
670
671                         /* Store L2TP info in the skb */
672                         L2TP_SKB_CB(skb)->ns = ns;
673                         L2TP_SKB_CB(skb)->has_seq = 1;
674
675                         l2tp_dbg(session, L2TP_MSG_SEQ,
676                                  "%s: recv data ns=%u, nr=%u, session nr=%u\n",
677                                  session->name, ns, nr, session->nr);
678                 }
679         } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
680                 u32 l2h = ntohl(*(__be32 *) ptr);
681
682                 if (l2h & 0x40000000) {
683                         ns = l2h & 0x00ffffff;
684
685                         /* Store L2TP info in the skb */
686                         L2TP_SKB_CB(skb)->ns = ns;
687                         L2TP_SKB_CB(skb)->has_seq = 1;
688
689                         l2tp_dbg(session, L2TP_MSG_SEQ,
690                                  "%s: recv data ns=%u, session nr=%u\n",
691                                  session->name, ns, session->nr);
692                 }
693         }
694
695         /* Advance past L2-specific header, if present */
696         ptr += session->l2specific_len;
697
698         if (L2TP_SKB_CB(skb)->has_seq) {
699                 /* Received a packet with sequence numbers. If we're the LNS,
700                  * check if we sre sending sequence numbers and if not,
701                  * configure it so.
702                  */
703                 if ((!session->lns_mode) && (!session->send_seq)) {
704                         l2tp_info(session, L2TP_MSG_SEQ,
705                                   "%s: requested to enable seq numbers by LNS\n",
706                                   session->name);
707                         session->send_seq = -1;
708                         l2tp_session_set_header_len(session, tunnel->version);
709                 }
710         } else {
711                 /* No sequence numbers.
712                  * If user has configured mandatory sequence numbers, discard.
713                  */
714                 if (session->recv_seq) {
715                         l2tp_warn(session, L2TP_MSG_SEQ,
716                                   "%s: recv data has no seq numbers when required. Discarding.\n",
717                                   session->name);
718                         u64_stats_update_begin(&sstats->syncp);
719                         sstats->rx_seq_discards++;
720                         u64_stats_update_end(&sstats->syncp);
721                         goto discard;
722                 }
723
724                 /* If we're the LAC and we're sending sequence numbers, the
725                  * LNS has requested that we no longer send sequence numbers.
726                  * If we're the LNS and we're sending sequence numbers, the
727                  * LAC is broken. Discard the frame.
728                  */
729                 if ((!session->lns_mode) && (session->send_seq)) {
730                         l2tp_info(session, L2TP_MSG_SEQ,
731                                   "%s: requested to disable seq numbers by LNS\n",
732                                   session->name);
733                         session->send_seq = 0;
734                         l2tp_session_set_header_len(session, tunnel->version);
735                 } else if (session->send_seq) {
736                         l2tp_warn(session, L2TP_MSG_SEQ,
737                                   "%s: recv data has no seq numbers when required. Discarding.\n",
738                                   session->name);
739                         u64_stats_update_begin(&sstats->syncp);
740                         sstats->rx_seq_discards++;
741                         u64_stats_update_end(&sstats->syncp);
742                         goto discard;
743                 }
744         }
745
746         /* Session data offset is handled differently for L2TPv2 and
747          * L2TPv3. For L2TPv2, there is an optional 16-bit value in
748          * the header. For L2TPv3, the offset is negotiated using AVPs
749          * in the session setup control protocol.
750          */
751         if (tunnel->version == L2TP_HDR_VER_2) {
752                 /* If offset bit set, skip it. */
753                 if (hdrflags & L2TP_HDRFLAG_O) {
754                         offset = ntohs(*(__be16 *)ptr);
755                         ptr += 2 + offset;
756                 }
757         } else
758                 ptr += session->offset;
759
760         offset = ptr - optr;
761         if (!pskb_may_pull(skb, offset))
762                 goto discard;
763
764         __skb_pull(skb, offset);
765
766         /* If caller wants to process the payload before we queue the
767          * packet, do so now.
768          */
769         if (payload_hook)
770                 if ((*payload_hook)(skb))
771                         goto discard;
772
773         /* Prepare skb for adding to the session's reorder_q.  Hold
774          * packets for max reorder_timeout or 1 second if not
775          * reordering.
776          */
777         L2TP_SKB_CB(skb)->length = length;
778         L2TP_SKB_CB(skb)->expires = jiffies +
779                 (session->reorder_timeout ? session->reorder_timeout : HZ);
780
781         /* Add packet to the session's receive queue. Reordering is done here, if
782          * enabled. Saved L2TP protocol info is stored in skb->sb[].
783          */
784         if (L2TP_SKB_CB(skb)->has_seq) {
785                 if (session->reorder_timeout != 0) {
786                         /* Packet reordering enabled. Add skb to session's
787                          * reorder queue, in order of ns.
788                          */
789                         l2tp_recv_queue_skb(session, skb);
790                 } else {
791                         /* Packet reordering disabled. Discard out-of-sequence
792                          * packets
793                          */
794                         if (L2TP_SKB_CB(skb)->ns != session->nr) {
795                                 u64_stats_update_begin(&sstats->syncp);
796                                 sstats->rx_seq_discards++;
797                                 u64_stats_update_end(&sstats->syncp);
798                                 l2tp_dbg(session, L2TP_MSG_SEQ,
799                                          "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
800                                          session->name, L2TP_SKB_CB(skb)->ns,
801                                          L2TP_SKB_CB(skb)->length, session->nr,
802                                          skb_queue_len(&session->reorder_q));
803                                 goto discard;
804                         }
805                         skb_queue_tail(&session->reorder_q, skb);
806                 }
807         } else {
808                 /* No sequence numbers. Add the skb to the tail of the
809                  * reorder queue. This ensures that it will be
810                  * delivered after all previous sequenced skbs.
811                  */
812                 skb_queue_tail(&session->reorder_q, skb);
813         }
814
815         /* Try to dequeue as many skbs from reorder_q as we can. */
816         l2tp_recv_dequeue(session);
817
818         l2tp_session_dec_refcount(session);
819
820         return;
821
822 discard:
823         u64_stats_update_begin(&sstats->syncp);
824         sstats->rx_errors++;
825         u64_stats_update_end(&sstats->syncp);
826         kfree_skb(skb);
827
828         if (session->deref)
829                 (*session->deref)(session);
830
831         l2tp_session_dec_refcount(session);
832 }
833 EXPORT_SYMBOL(l2tp_recv_common);
834
835 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
836  * here. The skb is not on a list when we get here.
837  * Returns 0 if the packet was a data packet and was successfully passed on.
838  * Returns 1 if the packet was not a good data packet and could not be
839  * forwarded.  All such packets are passed up to userspace to deal with.
840  */
841 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
842                               int (*payload_hook)(struct sk_buff *skb))
843 {
844         struct l2tp_session *session = NULL;
845         unsigned char *ptr, *optr;
846         u16 hdrflags;
847         u32 tunnel_id, session_id;
848         u16 version;
849         int length;
850         struct l2tp_stats *tstats;
851
852         if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
853                 goto discard_bad_csum;
854
855         /* UDP always verifies the packet length. */
856         __skb_pull(skb, sizeof(struct udphdr));
857
858         /* Short packet? */
859         if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
860                 l2tp_info(tunnel, L2TP_MSG_DATA,
861                           "%s: recv short packet (len=%d)\n",
862                           tunnel->name, skb->len);
863                 goto error;
864         }
865
866         /* Trace packet contents, if enabled */
867         if (tunnel->debug & L2TP_MSG_DATA) {
868                 length = min(32u, skb->len);
869                 if (!pskb_may_pull(skb, length))
870                         goto error;
871
872                 pr_debug("%s: recv\n", tunnel->name);
873                 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
874         }
875
876         /* Point to L2TP header */
877         optr = ptr = skb->data;
878
879         /* Get L2TP header flags */
880         hdrflags = ntohs(*(__be16 *) ptr);
881
882         /* Check protocol version */
883         version = hdrflags & L2TP_HDR_VER_MASK;
884         if (version != tunnel->version) {
885                 l2tp_info(tunnel, L2TP_MSG_DATA,
886                           "%s: recv protocol version mismatch: got %d expected %d\n",
887                           tunnel->name, version, tunnel->version);
888                 goto error;
889         }
890
891         /* Get length of L2TP packet */
892         length = skb->len;
893
894         /* If type is control packet, it is handled by userspace. */
895         if (hdrflags & L2TP_HDRFLAG_T) {
896                 l2tp_dbg(tunnel, L2TP_MSG_DATA,
897                          "%s: recv control packet, len=%d\n",
898                          tunnel->name, length);
899                 goto error;
900         }
901
902         /* Skip flags */
903         ptr += 2;
904
905         if (tunnel->version == L2TP_HDR_VER_2) {
906                 /* If length is present, skip it */
907                 if (hdrflags & L2TP_HDRFLAG_L)
908                         ptr += 2;
909
910                 /* Extract tunnel and session ID */
911                 tunnel_id = ntohs(*(__be16 *) ptr);
912                 ptr += 2;
913                 session_id = ntohs(*(__be16 *) ptr);
914                 ptr += 2;
915         } else {
916                 ptr += 2;       /* skip reserved bits */
917                 tunnel_id = tunnel->tunnel_id;
918                 session_id = ntohl(*(__be32 *) ptr);
919                 ptr += 4;
920         }
921
922         /* Find the session context */
923         session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
924         if (!session || !session->recv_skb) {
925                 /* Not found? Pass to userspace to deal with */
926                 l2tp_info(tunnel, L2TP_MSG_DATA,
927                           "%s: no session found (%u/%u). Passing up.\n",
928                           tunnel->name, tunnel_id, session_id);
929                 goto error;
930         }
931
932         l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
933
934         return 0;
935
936 discard_bad_csum:
937         LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
938         UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
939         tstats = &tunnel->stats;
940         u64_stats_update_begin(&tstats->syncp);
941         tstats->rx_errors++;
942         u64_stats_update_end(&tstats->syncp);
943         kfree_skb(skb);
944
945         return 0;
946
947 error:
948         /* Put UDP header back */
949         __skb_push(skb, sizeof(struct udphdr));
950
951         return 1;
952 }
953
954 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
955  * Return codes:
956  * 0 : success.
957  * <0: error
958  * >0: skb should be passed up to userspace as UDP.
959  */
960 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
961 {
962         struct l2tp_tunnel *tunnel;
963
964         tunnel = l2tp_sock_to_tunnel(sk);
965         if (tunnel == NULL)
966                 goto pass_up;
967
968         l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
969                  tunnel->name, skb->len);
970
971         if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
972                 goto pass_up_put;
973
974         sock_put(sk);
975         return 0;
976
977 pass_up_put:
978         sock_put(sk);
979 pass_up:
980         return 1;
981 }
982 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
983
984 /************************************************************************
985  * Transmit handling
986  ***********************************************************************/
987
988 /* Build an L2TP header for the session into the buffer provided.
989  */
990 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
991 {
992         struct l2tp_tunnel *tunnel = session->tunnel;
993         __be16 *bufp = buf;
994         __be16 *optr = buf;
995         u16 flags = L2TP_HDR_VER_2;
996         u32 tunnel_id = tunnel->peer_tunnel_id;
997         u32 session_id = session->peer_session_id;
998
999         if (session->send_seq)
1000                 flags |= L2TP_HDRFLAG_S;
1001
1002         /* Setup L2TP header. */
1003         *bufp++ = htons(flags);
1004         *bufp++ = htons(tunnel_id);
1005         *bufp++ = htons(session_id);
1006         if (session->send_seq) {
1007                 *bufp++ = htons(session->ns);
1008                 *bufp++ = 0;
1009                 session->ns++;
1010                 session->ns &= 0xffff;
1011                 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1012                          session->name, session->ns);
1013         }
1014
1015         return bufp - optr;
1016 }
1017
1018 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1019 {
1020         struct l2tp_tunnel *tunnel = session->tunnel;
1021         char *bufp = buf;
1022         char *optr = bufp;
1023
1024         /* Setup L2TP header. The header differs slightly for UDP and
1025          * IP encapsulations. For UDP, there is 4 bytes of flags.
1026          */
1027         if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1028                 u16 flags = L2TP_HDR_VER_3;
1029                 *((__be16 *) bufp) = htons(flags);
1030                 bufp += 2;
1031                 *((__be16 *) bufp) = 0;
1032                 bufp += 2;
1033         }
1034
1035         *((__be32 *) bufp) = htonl(session->peer_session_id);
1036         bufp += 4;
1037         if (session->cookie_len) {
1038                 memcpy(bufp, &session->cookie[0], session->cookie_len);
1039                 bufp += session->cookie_len;
1040         }
1041         if (session->l2specific_len) {
1042                 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1043                         u32 l2h = 0;
1044                         if (session->send_seq) {
1045                                 l2h = 0x40000000 | session->ns;
1046                                 session->ns++;
1047                                 session->ns &= 0xffffff;
1048                                 l2tp_dbg(session, L2TP_MSG_SEQ,
1049                                          "%s: updated ns to %u\n",
1050                                          session->name, session->ns);
1051                         }
1052
1053                         *((__be32 *) bufp) = htonl(l2h);
1054                 }
1055                 bufp += session->l2specific_len;
1056         }
1057         if (session->offset)
1058                 bufp += session->offset;
1059
1060         return bufp - optr;
1061 }
1062
1063 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1064                           struct flowi *fl, size_t data_len)
1065 {
1066         struct l2tp_tunnel *tunnel = session->tunnel;
1067         unsigned int len = skb->len;
1068         int error;
1069         struct l2tp_stats *tstats, *sstats;
1070
1071         /* Debug */
1072         if (session->send_seq)
1073                 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
1074                          session->name, data_len, session->ns - 1);
1075         else
1076                 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
1077                          session->name, data_len);
1078
1079         if (session->debug & L2TP_MSG_DATA) {
1080                 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1081                 unsigned char *datap = skb->data + uhlen;
1082
1083                 pr_debug("%s: xmit\n", session->name);
1084                 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1085                                      datap, min_t(size_t, 32, len - uhlen));
1086         }
1087
1088         /* Queue the packet to IP for output */
1089         skb->local_df = 1;
1090 #if IS_ENABLED(CONFIG_IPV6)
1091         if (skb->sk->sk_family == PF_INET6)
1092                 error = inet6_csk_xmit(skb, NULL);
1093         else
1094 #endif
1095                 error = ip_queue_xmit(skb, fl);
1096
1097         /* Update stats */
1098         tstats = &tunnel->stats;
1099         u64_stats_update_begin(&tstats->syncp);
1100         sstats = &session->stats;
1101         u64_stats_update_begin(&sstats->syncp);
1102         if (error >= 0) {
1103                 tstats->tx_packets++;
1104                 tstats->tx_bytes += len;
1105                 sstats->tx_packets++;
1106                 sstats->tx_bytes += len;
1107         } else {
1108                 tstats->tx_errors++;
1109                 sstats->tx_errors++;
1110         }
1111         u64_stats_update_end(&tstats->syncp);
1112         u64_stats_update_end(&sstats->syncp);
1113
1114         return 0;
1115 }
1116
1117 /* Automatically called when the skb is freed.
1118  */
1119 static void l2tp_sock_wfree(struct sk_buff *skb)
1120 {
1121         sock_put(skb->sk);
1122 }
1123
1124 /* For data skbs that we transmit, we associate with the tunnel socket
1125  * but don't do accounting.
1126  */
1127 static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1128 {
1129         sock_hold(sk);
1130         skb->sk = sk;
1131         skb->destructor = l2tp_sock_wfree;
1132 }
1133
1134 #if IS_ENABLED(CONFIG_IPV6)
1135 static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1136                                 int udp_len)
1137 {
1138         struct ipv6_pinfo *np = inet6_sk(sk);
1139         struct udphdr *uh = udp_hdr(skb);
1140
1141         if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1142             !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1143                 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1144                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1145                 uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
1146                                             IPPROTO_UDP, csum);
1147                 if (uh->check == 0)
1148                         uh->check = CSUM_MANGLED_0;
1149         } else {
1150                 skb->ip_summed = CHECKSUM_PARTIAL;
1151                 skb->csum_start = skb_transport_header(skb) - skb->head;
1152                 skb->csum_offset = offsetof(struct udphdr, check);
1153                 uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
1154                                              udp_len, IPPROTO_UDP, 0);
1155         }
1156 }
1157 #endif
1158
1159 /* If caller requires the skb to have a ppp header, the header must be
1160  * inserted in the skb data before calling this function.
1161  */
1162 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1163 {
1164         int data_len = skb->len;
1165         struct l2tp_tunnel *tunnel = session->tunnel;
1166         struct sock *sk = tunnel->sock;
1167         struct flowi *fl;
1168         struct udphdr *uh;
1169         struct inet_sock *inet;
1170         __wsum csum;
1171         int headroom;
1172         int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1173         int udp_len;
1174         int ret = NET_XMIT_SUCCESS;
1175
1176         /* Check that there's enough headroom in the skb to insert IP,
1177          * UDP and L2TP headers. If not enough, expand it to
1178          * make room. Adjust truesize.
1179          */
1180         headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1181                 uhlen + hdr_len;
1182         if (skb_cow_head(skb, headroom)) {
1183                 kfree_skb(skb);
1184                 return NET_XMIT_DROP;
1185         }
1186
1187         skb_orphan(skb);
1188         /* Setup L2TP header */
1189         session->build_header(session, __skb_push(skb, hdr_len));
1190
1191         /* Reset skb netfilter state */
1192         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1193         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1194                               IPSKB_REROUTED);
1195         nf_reset(skb);
1196
1197         bh_lock_sock(sk);
1198         if (sock_owned_by_user(sk)) {
1199                 kfree_skb(skb);
1200                 ret = NET_XMIT_DROP;
1201                 goto out_unlock;
1202         }
1203
1204         /* Get routing info from the tunnel socket */
1205         skb_dst_drop(skb);
1206         skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1207
1208         inet = inet_sk(sk);
1209         fl = &inet->cork.fl;
1210         switch (tunnel->encap) {
1211         case L2TP_ENCAPTYPE_UDP:
1212                 /* Setup UDP header */
1213                 __skb_push(skb, sizeof(*uh));
1214                 skb_reset_transport_header(skb);
1215                 uh = udp_hdr(skb);
1216                 uh->source = inet->inet_sport;
1217                 uh->dest = inet->inet_dport;
1218                 udp_len = uhlen + hdr_len + data_len;
1219                 uh->len = htons(udp_len);
1220                 uh->check = 0;
1221
1222                 /* Calculate UDP checksum if configured to do so */
1223 #if IS_ENABLED(CONFIG_IPV6)
1224                 if (sk->sk_family == PF_INET6)
1225                         l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1226                 else
1227 #endif
1228                 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1229                         skb->ip_summed = CHECKSUM_NONE;
1230                 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1231                          (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1232                         skb->ip_summed = CHECKSUM_COMPLETE;
1233                         csum = skb_checksum(skb, 0, udp_len, 0);
1234                         uh->check = csum_tcpudp_magic(inet->inet_saddr,
1235                                                       inet->inet_daddr,
1236                                                       udp_len, IPPROTO_UDP, csum);
1237                         if (uh->check == 0)
1238                                 uh->check = CSUM_MANGLED_0;
1239                 } else {
1240                         skb->ip_summed = CHECKSUM_PARTIAL;
1241                         skb->csum_start = skb_transport_header(skb) - skb->head;
1242                         skb->csum_offset = offsetof(struct udphdr, check);
1243                         uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1244                                                        inet->inet_daddr,
1245                                                        udp_len, IPPROTO_UDP, 0);
1246                 }
1247                 break;
1248
1249         case L2TP_ENCAPTYPE_IP:
1250                 break;
1251         }
1252
1253         l2tp_skb_set_owner_w(skb, sk);
1254
1255         l2tp_xmit_core(session, skb, fl, data_len);
1256 out_unlock:
1257         bh_unlock_sock(sk);
1258
1259         return ret;
1260 }
1261 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1262
1263 /*****************************************************************************
1264  * Tinnel and session create/destroy.
1265  *****************************************************************************/
1266
1267 /* Tunnel socket destruct hook.
1268  * The tunnel context is deleted only when all session sockets have been
1269  * closed.
1270  */
1271 static void l2tp_tunnel_destruct(struct sock *sk)
1272 {
1273         struct l2tp_tunnel *tunnel;
1274         struct l2tp_net *pn;
1275
1276         tunnel = sk->sk_user_data;
1277         if (tunnel == NULL)
1278                 goto end;
1279
1280         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1281
1282
1283         /* Disable udp encapsulation */
1284         switch (tunnel->encap) {
1285         case L2TP_ENCAPTYPE_UDP:
1286                 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1287                 (udp_sk(sk))->encap_type = 0;
1288                 (udp_sk(sk))->encap_rcv = NULL;
1289                 break;
1290         case L2TP_ENCAPTYPE_IP:
1291                 break;
1292         }
1293
1294         /* Remove hooks into tunnel socket */
1295         sk->sk_destruct = tunnel->old_sk_destruct;
1296         sk->sk_user_data = NULL;
1297         tunnel->sock = NULL;
1298
1299         /* Remove the tunnel struct from the tunnel list */
1300         pn = l2tp_pernet(tunnel->l2tp_net);
1301         spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1302         list_del_rcu(&tunnel->list);
1303         spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1304         atomic_dec(&l2tp_tunnel_count);
1305
1306         l2tp_tunnel_closeall(tunnel);
1307         l2tp_tunnel_dec_refcount(tunnel);
1308
1309         /* Call the original destructor */
1310         if (sk->sk_destruct)
1311                 (*sk->sk_destruct)(sk);
1312 end:
1313         return;
1314 }
1315
1316 /* When the tunnel is closed, all the attached sessions need to go too.
1317  */
1318 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1319 {
1320         int hash;
1321         struct hlist_node *walk;
1322         struct hlist_node *tmp;
1323         struct l2tp_session *session;
1324
1325         BUG_ON(tunnel == NULL);
1326
1327         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1328                   tunnel->name);
1329
1330         write_lock_bh(&tunnel->hlist_lock);
1331         for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1332 again:
1333                 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1334                         session = hlist_entry(walk, struct l2tp_session, hlist);
1335
1336                         l2tp_info(session, L2TP_MSG_CONTROL,
1337                                   "%s: closing session\n", session->name);
1338
1339                         hlist_del_init(&session->hlist);
1340
1341                         /* Since we should hold the sock lock while
1342                          * doing any unbinding, we need to release the
1343                          * lock we're holding before taking that lock.
1344                          * Hold a reference to the sock so it doesn't
1345                          * disappear as we're jumping between locks.
1346                          */
1347                         if (session->ref != NULL)
1348                                 (*session->ref)(session);
1349
1350                         write_unlock_bh(&tunnel->hlist_lock);
1351
1352                         if (tunnel->version != L2TP_HDR_VER_2) {
1353                                 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1354
1355                                 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1356                                 hlist_del_init_rcu(&session->global_hlist);
1357                                 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1358                                 synchronize_rcu();
1359                         }
1360
1361                         if (session->session_close != NULL)
1362                                 (*session->session_close)(session);
1363
1364                         if (session->deref != NULL)
1365                                 (*session->deref)(session);
1366
1367                         write_lock_bh(&tunnel->hlist_lock);
1368
1369                         /* Now restart from the beginning of this hash
1370                          * chain.  We always remove a session from the
1371                          * list so we are guaranteed to make forward
1372                          * progress.
1373                          */
1374                         goto again;
1375                 }
1376         }
1377         write_unlock_bh(&tunnel->hlist_lock);
1378 }
1379
1380 /* Really kill the tunnel.
1381  * Come here only when all sessions have been cleared from the tunnel.
1382  */
1383 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1384 {
1385         BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1386         BUG_ON(tunnel->sock != NULL);
1387         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1388         kfree_rcu(tunnel, rcu);
1389 }
1390
1391 /* Workqueue tunnel deletion function */
1392 static void l2tp_tunnel_del_work(struct work_struct *work)
1393 {
1394         struct l2tp_tunnel *tunnel = NULL;
1395         struct socket *sock = NULL;
1396         struct sock *sk = NULL;
1397
1398         tunnel = container_of(work, struct l2tp_tunnel, del_work);
1399         sk = l2tp_tunnel_sock_lookup(tunnel);
1400         if (!sk)
1401                 return;
1402
1403         sock = sk->sk_socket;
1404         BUG_ON(!sock);
1405
1406         /* If the tunnel socket was created directly by the kernel, use the
1407          * sk_* API to release the socket now.  Otherwise go through the
1408          * inet_* layer to shut the socket down, and let userspace close it.
1409          * In either case the tunnel resources are freed in the socket
1410          * destructor when the tunnel socket goes away.
1411          */
1412         if (sock->file == NULL) {
1413                 kernel_sock_shutdown(sock, SHUT_RDWR);
1414                 sk_release_kernel(sk);
1415         } else {
1416                 inet_shutdown(sock, 2);
1417         }
1418
1419         l2tp_tunnel_sock_put(sk);
1420 }
1421
1422 /* Create a socket for the tunnel, if one isn't set up by
1423  * userspace. This is used for static tunnels where there is no
1424  * managing L2TP daemon.
1425  *
1426  * Since we don't want these sockets to keep a namespace alive by
1427  * themselves, we drop the socket's namespace refcount after creation.
1428  * These sockets are freed when the namespace exits using the pernet
1429  * exit hook.
1430  */
1431 static int l2tp_tunnel_sock_create(struct net *net,
1432                                 u32 tunnel_id,
1433                                 u32 peer_tunnel_id,
1434                                 struct l2tp_tunnel_cfg *cfg,
1435                                 struct socket **sockp)
1436 {
1437         int err = -EINVAL;
1438         struct socket *sock = NULL;
1439         struct sockaddr_in udp_addr = {0};
1440         struct sockaddr_l2tpip ip_addr = {0};
1441 #if IS_ENABLED(CONFIG_IPV6)
1442         struct sockaddr_in6 udp6_addr = {0};
1443         struct sockaddr_l2tpip6 ip6_addr = {0};
1444 #endif
1445
1446         switch (cfg->encap) {
1447         case L2TP_ENCAPTYPE_UDP:
1448 #if IS_ENABLED(CONFIG_IPV6)
1449                 if (cfg->local_ip6 && cfg->peer_ip6) {
1450                         err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1451                         if (err < 0)
1452                                 goto out;
1453
1454                         sk_change_net(sock->sk, net);
1455
1456                         udp6_addr.sin6_family = AF_INET6;
1457                         memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1458                                sizeof(udp6_addr.sin6_addr));
1459                         udp6_addr.sin6_port = htons(cfg->local_udp_port);
1460                         err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1461                                           sizeof(udp6_addr));
1462                         if (err < 0)
1463                                 goto out;
1464
1465                         udp6_addr.sin6_family = AF_INET6;
1466                         memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1467                                sizeof(udp6_addr.sin6_addr));
1468                         udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1469                         err = kernel_connect(sock,
1470                                              (struct sockaddr *) &udp6_addr,
1471                                              sizeof(udp6_addr), 0);
1472                         if (err < 0)
1473                                 goto out;
1474                 } else
1475 #endif
1476                 {
1477                         err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1478                         if (err < 0)
1479                                 goto out;
1480
1481                         sk_change_net(sock->sk, net);
1482
1483                         udp_addr.sin_family = AF_INET;
1484                         udp_addr.sin_addr = cfg->local_ip;
1485                         udp_addr.sin_port = htons(cfg->local_udp_port);
1486                         err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1487                                           sizeof(udp_addr));
1488                         if (err < 0)
1489                                 goto out;
1490
1491                         udp_addr.sin_family = AF_INET;
1492                         udp_addr.sin_addr = cfg->peer_ip;
1493                         udp_addr.sin_port = htons(cfg->peer_udp_port);
1494                         err = kernel_connect(sock,
1495                                              (struct sockaddr *) &udp_addr,
1496                                              sizeof(udp_addr), 0);
1497                         if (err < 0)
1498                                 goto out;
1499                 }
1500
1501                 if (!cfg->use_udp_checksums)
1502                         sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1503
1504                 break;
1505
1506         case L2TP_ENCAPTYPE_IP:
1507 #if IS_ENABLED(CONFIG_IPV6)
1508                 if (cfg->local_ip6 && cfg->peer_ip6) {
1509                         err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1510                                           IPPROTO_L2TP, &sock);
1511                         if (err < 0)
1512                                 goto out;
1513
1514                         sk_change_net(sock->sk, net);
1515
1516                         ip6_addr.l2tp_family = AF_INET6;
1517                         memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1518                                sizeof(ip6_addr.l2tp_addr));
1519                         ip6_addr.l2tp_conn_id = tunnel_id;
1520                         err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1521                                           sizeof(ip6_addr));
1522                         if (err < 0)
1523                                 goto out;
1524
1525                         ip6_addr.l2tp_family = AF_INET6;
1526                         memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1527                                sizeof(ip6_addr.l2tp_addr));
1528                         ip6_addr.l2tp_conn_id = peer_tunnel_id;
1529                         err = kernel_connect(sock,
1530                                              (struct sockaddr *) &ip6_addr,
1531                                              sizeof(ip6_addr), 0);
1532                         if (err < 0)
1533                                 goto out;
1534                 } else
1535 #endif
1536                 {
1537                         err = sock_create_kern(AF_INET, SOCK_DGRAM,
1538                                           IPPROTO_L2TP, &sock);
1539                         if (err < 0)
1540                                 goto out;
1541
1542                         sk_change_net(sock->sk, net);
1543
1544                         ip_addr.l2tp_family = AF_INET;
1545                         ip_addr.l2tp_addr = cfg->local_ip;
1546                         ip_addr.l2tp_conn_id = tunnel_id;
1547                         err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1548                                           sizeof(ip_addr));
1549                         if (err < 0)
1550                                 goto out;
1551
1552                         ip_addr.l2tp_family = AF_INET;
1553                         ip_addr.l2tp_addr = cfg->peer_ip;
1554                         ip_addr.l2tp_conn_id = peer_tunnel_id;
1555                         err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1556                                              sizeof(ip_addr), 0);
1557                         if (err < 0)
1558                                 goto out;
1559                 }
1560                 break;
1561
1562         default:
1563                 goto out;
1564         }
1565
1566 out:
1567         *sockp = sock;
1568         if ((err < 0) && sock) {
1569                 kernel_sock_shutdown(sock, SHUT_RDWR);
1570                 sk_release_kernel(sock->sk);
1571                 *sockp = NULL;
1572         }
1573
1574         return err;
1575 }
1576
1577 static struct lock_class_key l2tp_socket_class;
1578
1579 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1580 {
1581         struct l2tp_tunnel *tunnel = NULL;
1582         int err;
1583         struct socket *sock = NULL;
1584         struct sock *sk = NULL;
1585         struct l2tp_net *pn;
1586         enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1587
1588         /* Get the tunnel socket from the fd, which was opened by
1589          * the userspace L2TP daemon. If not specified, create a
1590          * kernel socket.
1591          */
1592         if (fd < 0) {
1593                 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1594                                 cfg, &sock);
1595                 if (err < 0)
1596                         goto err;
1597         } else {
1598                 sock = sockfd_lookup(fd, &err);
1599                 if (!sock) {
1600                         pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1601                                tunnel_id, fd, err);
1602                         err = -EBADF;
1603                         goto err;
1604                 }
1605
1606                 /* Reject namespace mismatches */
1607                 if (!net_eq(sock_net(sock->sk), net)) {
1608                         pr_err("tunl %u: netns mismatch\n", tunnel_id);
1609                         err = -EINVAL;
1610                         goto err;
1611                 }
1612         }
1613
1614         sk = sock->sk;
1615
1616         if (cfg != NULL)
1617                 encap = cfg->encap;
1618
1619         /* Quick sanity checks */
1620         switch (encap) {
1621         case L2TP_ENCAPTYPE_UDP:
1622                 err = -EPROTONOSUPPORT;
1623                 if (sk->sk_protocol != IPPROTO_UDP) {
1624                         pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1625                                tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1626                         goto err;
1627                 }
1628                 break;
1629         case L2TP_ENCAPTYPE_IP:
1630                 err = -EPROTONOSUPPORT;
1631                 if (sk->sk_protocol != IPPROTO_L2TP) {
1632                         pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1633                                tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1634                         goto err;
1635                 }
1636                 break;
1637         }
1638
1639         /* Check if this socket has already been prepped */
1640         tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
1641         if (tunnel != NULL) {
1642                 /* This socket has already been prepped */
1643                 err = -EBUSY;
1644                 goto err;
1645         }
1646
1647         tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1648         if (tunnel == NULL) {
1649                 err = -ENOMEM;
1650                 goto err;
1651         }
1652
1653         tunnel->version = version;
1654         tunnel->tunnel_id = tunnel_id;
1655         tunnel->peer_tunnel_id = peer_tunnel_id;
1656         tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1657
1658         tunnel->magic = L2TP_TUNNEL_MAGIC;
1659         sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1660         rwlock_init(&tunnel->hlist_lock);
1661
1662         /* The net we belong to */
1663         tunnel->l2tp_net = net;
1664         pn = l2tp_pernet(net);
1665
1666         if (cfg != NULL)
1667                 tunnel->debug = cfg->debug;
1668
1669         /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1670         tunnel->encap = encap;
1671         if (encap == L2TP_ENCAPTYPE_UDP) {
1672                 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1673                 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1674                 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1675 #if IS_ENABLED(CONFIG_IPV6)
1676                 if (sk->sk_family == PF_INET6)
1677                         udpv6_encap_enable();
1678                 else
1679 #endif
1680                 udp_encap_enable();
1681         }
1682
1683         sk->sk_user_data = tunnel;
1684
1685         /* Hook on the tunnel socket destructor so that we can cleanup
1686          * if the tunnel socket goes away.
1687          */
1688         tunnel->old_sk_destruct = sk->sk_destruct;
1689         sk->sk_destruct = &l2tp_tunnel_destruct;
1690         tunnel->sock = sk;
1691         tunnel->fd = fd;
1692         lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1693
1694         sk->sk_allocation = GFP_ATOMIC;
1695
1696         /* Init delete workqueue struct */
1697         INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1698
1699         /* Add tunnel to our list */
1700         INIT_LIST_HEAD(&tunnel->list);
1701         atomic_inc(&l2tp_tunnel_count);
1702
1703         /* Bump the reference count. The tunnel context is deleted
1704          * only when this drops to zero. Must be done before list insertion
1705          */
1706         l2tp_tunnel_inc_refcount(tunnel);
1707         spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1708         list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1709         spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1710
1711         err = 0;
1712 err:
1713         if (tunnelp)
1714                 *tunnelp = tunnel;
1715
1716         /* If tunnel's socket was created by the kernel, it doesn't
1717          *  have a file.
1718          */
1719         if (sock && sock->file)
1720                 sockfd_put(sock);
1721
1722         return err;
1723 }
1724 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1725
1726 /* This function is used by the netlink TUNNEL_DELETE command.
1727  */
1728 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1729 {
1730         return (false == queue_work(l2tp_wq, &tunnel->del_work));
1731 }
1732 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1733
1734 /* Really kill the session.
1735  */
1736 void l2tp_session_free(struct l2tp_session *session)
1737 {
1738         struct l2tp_tunnel *tunnel;
1739
1740         BUG_ON(atomic_read(&session->ref_count) != 0);
1741
1742         tunnel = session->tunnel;
1743         if (tunnel != NULL) {
1744                 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1745
1746                 /* Delete the session from the hash */
1747                 write_lock_bh(&tunnel->hlist_lock);
1748                 hlist_del_init(&session->hlist);
1749                 write_unlock_bh(&tunnel->hlist_lock);
1750
1751                 /* Unlink from the global hash if not L2TPv2 */
1752                 if (tunnel->version != L2TP_HDR_VER_2) {
1753                         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1754
1755                         spin_lock_bh(&pn->l2tp_session_hlist_lock);
1756                         hlist_del_init_rcu(&session->global_hlist);
1757                         spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1758                         synchronize_rcu();
1759                 }
1760
1761                 if (session->session_id != 0)
1762                         atomic_dec(&l2tp_session_count);
1763
1764                 sock_put(tunnel->sock);
1765
1766                 /* This will delete the tunnel context if this
1767                  * is the last session on the tunnel.
1768                  */
1769                 session->tunnel = NULL;
1770                 l2tp_tunnel_dec_refcount(tunnel);
1771         }
1772
1773         kfree(session);
1774
1775         return;
1776 }
1777 EXPORT_SYMBOL_GPL(l2tp_session_free);
1778
1779 /* This function is used by the netlink SESSION_DELETE command and by
1780    pseudowire modules.
1781  */
1782 int l2tp_session_delete(struct l2tp_session *session)
1783 {
1784         if (session->session_close != NULL)
1785                 (*session->session_close)(session);
1786
1787         l2tp_session_dec_refcount(session);
1788
1789         return 0;
1790 }
1791 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1792
1793
1794 /* We come here whenever a session's send_seq, cookie_len or
1795  * l2specific_len parameters are set.
1796  */
1797 static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1798 {
1799         if (version == L2TP_HDR_VER_2) {
1800                 session->hdr_len = 6;
1801                 if (session->send_seq)
1802                         session->hdr_len += 4;
1803         } else {
1804                 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1805                 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1806                         session->hdr_len += 4;
1807         }
1808
1809 }
1810
1811 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1812 {
1813         struct l2tp_session *session;
1814
1815         session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1816         if (session != NULL) {
1817                 session->magic = L2TP_SESSION_MAGIC;
1818                 session->tunnel = tunnel;
1819
1820                 session->session_id = session_id;
1821                 session->peer_session_id = peer_session_id;
1822                 session->nr = 0;
1823
1824                 sprintf(&session->name[0], "sess %u/%u",
1825                         tunnel->tunnel_id, session->session_id);
1826
1827                 skb_queue_head_init(&session->reorder_q);
1828
1829                 INIT_HLIST_NODE(&session->hlist);
1830                 INIT_HLIST_NODE(&session->global_hlist);
1831
1832                 /* Inherit debug options from tunnel */
1833                 session->debug = tunnel->debug;
1834
1835                 if (cfg) {
1836                         session->pwtype = cfg->pw_type;
1837                         session->debug = cfg->debug;
1838                         session->mtu = cfg->mtu;
1839                         session->mru = cfg->mru;
1840                         session->send_seq = cfg->send_seq;
1841                         session->recv_seq = cfg->recv_seq;
1842                         session->lns_mode = cfg->lns_mode;
1843                         session->reorder_timeout = cfg->reorder_timeout;
1844                         session->offset = cfg->offset;
1845                         session->l2specific_type = cfg->l2specific_type;
1846                         session->l2specific_len = cfg->l2specific_len;
1847                         session->cookie_len = cfg->cookie_len;
1848                         memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1849                         session->peer_cookie_len = cfg->peer_cookie_len;
1850                         memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1851                 }
1852
1853                 if (tunnel->version == L2TP_HDR_VER_2)
1854                         session->build_header = l2tp_build_l2tpv2_header;
1855                 else
1856                         session->build_header = l2tp_build_l2tpv3_header;
1857
1858                 l2tp_session_set_header_len(session, tunnel->version);
1859
1860                 /* Bump the reference count. The session context is deleted
1861                  * only when this drops to zero.
1862                  */
1863                 l2tp_session_inc_refcount(session);
1864                 l2tp_tunnel_inc_refcount(tunnel);
1865
1866                 /* Ensure tunnel socket isn't deleted */
1867                 sock_hold(tunnel->sock);
1868
1869                 /* Add session to the tunnel's hash list */
1870                 write_lock_bh(&tunnel->hlist_lock);
1871                 hlist_add_head(&session->hlist,
1872                                l2tp_session_id_hash(tunnel, session_id));
1873                 write_unlock_bh(&tunnel->hlist_lock);
1874
1875                 /* And to the global session list if L2TPv3 */
1876                 if (tunnel->version != L2TP_HDR_VER_2) {
1877                         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1878
1879                         spin_lock_bh(&pn->l2tp_session_hlist_lock);
1880                         hlist_add_head_rcu(&session->global_hlist,
1881                                            l2tp_session_id_hash_2(pn, session_id));
1882                         spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1883                 }
1884
1885                 /* Ignore management session in session count value */
1886                 if (session->session_id != 0)
1887                         atomic_inc(&l2tp_session_count);
1888         }
1889
1890         return session;
1891 }
1892 EXPORT_SYMBOL_GPL(l2tp_session_create);
1893
1894 /*****************************************************************************
1895  * Init and cleanup
1896  *****************************************************************************/
1897
1898 static __net_init int l2tp_init_net(struct net *net)
1899 {
1900         struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1901         int hash;
1902
1903         INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1904         spin_lock_init(&pn->l2tp_tunnel_list_lock);
1905
1906         for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1907                 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1908
1909         spin_lock_init(&pn->l2tp_session_hlist_lock);
1910
1911         return 0;
1912 }
1913
1914 static __net_exit void l2tp_exit_net(struct net *net)
1915 {
1916         struct l2tp_net *pn = l2tp_pernet(net);
1917         struct l2tp_tunnel *tunnel = NULL;
1918
1919         rcu_read_lock_bh();
1920         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1921                 (void)l2tp_tunnel_delete(tunnel);
1922         }
1923         rcu_read_unlock_bh();
1924 }
1925
1926 static struct pernet_operations l2tp_net_ops = {
1927         .init = l2tp_init_net,
1928         .exit = l2tp_exit_net,
1929         .id   = &l2tp_net_id,
1930         .size = sizeof(struct l2tp_net),
1931 };
1932
1933 static int __init l2tp_init(void)
1934 {
1935         int rc = 0;
1936
1937         rc = register_pernet_device(&l2tp_net_ops);
1938         if (rc)
1939                 goto out;
1940
1941         l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
1942         if (!l2tp_wq) {
1943                 pr_err("alloc_workqueue failed\n");
1944                 rc = -ENOMEM;
1945                 goto out;
1946         }
1947
1948         pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1949
1950 out:
1951         return rc;
1952 }
1953
1954 static void __exit l2tp_exit(void)
1955 {
1956         unregister_pernet_device(&l2tp_net_ops);
1957         if (l2tp_wq) {
1958                 destroy_workqueue(l2tp_wq);
1959                 l2tp_wq = NULL;
1960         }
1961 }
1962
1963 module_init(l2tp_init);
1964 module_exit(l2tp_exit);
1965
1966 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1967 MODULE_DESCRIPTION("L2TP core");
1968 MODULE_LICENSE("GPL");
1969 MODULE_VERSION(L2TP_DRV_VERSION);
1970