blob: b844f517b75bd6a52538e9f7687e039e22c93bc7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP fragmentation functionality.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09008 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
Alan Cox113aa832008-10-13 19:01:08 -070010 * Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Fixes:
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
22 */
23
Joe Perchesafd465032012-03-12 07:03:32 +000024#define pr_fmt(fmt) "IPv4: " fmt
25
Herbert Xu89cee8b2005-12-13 23:14:27 -080026#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/jiffies.h>
31#include <linux/skbuff.h>
32#include <linux/list.h>
33#include <linux/ip.h>
34#include <linux/icmp.h>
35#include <linux/netdevice.h>
36#include <linux/jhash.h>
37#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Shan Weie9017b52010-01-23 01:57:42 -080039#include <net/route.h>
40#include <net/dst.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <net/sock.h>
42#include <net/ip.h>
43#include <net/icmp.h>
44#include <net/checksum.h>
Herbert Xu89cee8b2005-12-13 23:14:27 -080045#include <net/inetpeer.h>
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070046#include <net/inet_frag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/tcp.h>
48#include <linux/udp.h>
49#include <linux/inet.h>
50#include <linux/netfilter_ipv4.h>
Eric Dumazet6623e3b2011-01-05 07:52:55 +000051#include <net/inet_ecn.h>
David Ahern385add92015-09-29 20:07:13 -070052#include <net/l3mdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
57 */
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +020058static const char ip_frag_cache_name[] = "ip4-frags";
Herbert Xu89cee8b2005-12-13 23:14:27 -080059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060struct ipfrag_skb_cb
61{
62 struct inet_skb_parm h;
63 int offset;
64};
65
Jianjun Kongfd3f8c42008-11-03 02:47:38 -080066#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68/* Describe an entry in the "incomplete datagrams" queue. */
69struct ipq {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070070 struct inet_frag_queue q;
71
Eric Dumazet6623e3b2011-01-05 07:52:55 +000072 u8 ecn; /* RFC3168 support */
Florian Westphald6b915e2015-05-22 16:32:51 +020073 u16 max_df_size; /* largest frag with DF set seen */
Herbert Xu89cee8b2005-12-13 23:14:27 -080074 int iif;
75 unsigned int rid;
76 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077};
78
Fabian Frederickaa1f7312014-11-04 20:44:04 +010079static u8 ip4_frag_ecn(u8 tos)
Eric Dumazet6623e3b2011-01-05 07:52:55 +000080{
Eric Dumazet5173cc02011-05-16 08:37:37 +000081 return 1 << (tos & INET_ECN_MASK);
Eric Dumazet6623e3b2011-01-05 07:52:55 +000082}
83
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070084static struct inet_frags ip4_frags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Herbert Xu1706d582007-10-14 00:38:15 -070086static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
87 struct net_device *dev);
88
Pavel Emelyanovabd65232007-10-17 19:47:21 -070089
Florian Westphal36c77782014-07-24 16:50:29 +020090static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070091{
92 struct ipq *qp = container_of(q, struct ipq, q);
Gao feng54db0cc2012-06-08 01:21:40 +000093 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
94 frags);
95 struct net *net = container_of(ipv4, struct net, ipv4);
96
Eric Dumazet648700f2018-03-31 12:58:49 -070097 const struct frag_v4_compare_key *key = a;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070098
Eric Dumazet648700f2018-03-31 12:58:49 -070099 q->key.v4 = *key;
100 qp->ecn = 0;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200101 qp->peer = q->net->max_dist ?
Eric Dumazet648700f2018-03-31 12:58:49 -0700102 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
David Ahern192132b2015-08-27 16:07:03 -0700103 NULL;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700104}
105
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100106static void ip4_frag_free(struct inet_frag_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700108 struct ipq *qp;
109
110 qp = container_of(q, struct ipq, q);
111 if (qp->peer)
112 inet_putpeer(qp->peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116/* Destruction primitives. */
117
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100118static void ipq_put(struct ipq *ipq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Eric Dumazet093ba722018-03-31 12:58:44 -0700120 inet_frag_put(&ipq->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
123/* Kill ipq entry. It is not destroyed immediately,
124 * because caller (and someone more) holds reference count.
125 */
126static void ipq_kill(struct ipq *ipq)
127{
Eric Dumazet093ba722018-03-31 12:58:44 -0700128 inet_frag_kill(&ipq->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
Andy Zhou5cf42282015-05-15 14:15:35 -0700131static bool frag_expire_skip_icmp(u32 user)
132{
133 return user == IP_DEFRAG_AF_PACKET ||
134 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
Andy Zhou8bc04862015-05-15 14:15:36 -0700135 __IP_DEFRAG_CONNTRACK_IN_END) ||
136 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
137 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
Andy Zhou5cf42282015-05-15 14:15:35 -0700138}
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/*
141 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
142 */
Kees Cook78802012017-10-16 17:29:20 -0700143static void ip_expire(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
Kees Cook78802012017-10-16 17:29:20 -0700145 struct inet_frag_queue *frag = from_timer(frag, t, timer);
Eric Dumazet399d1402018-03-31 12:58:51 -0700146 struct sk_buff *clone, *head;
147 const struct iphdr *iph;
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -0700148 struct net *net;
Eric Dumazet399d1402018-03-31 12:58:51 -0700149 struct ipq *qp;
150 int err;
Pavel Emelyanove521db92007-10-17 19:45:23 -0700151
Kees Cook78802012017-10-16 17:29:20 -0700152 qp = container_of(frag, struct ipq, q);
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -0700153 net = container_of(qp->q.net, struct net, ipv4.frags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700155 rcu_read_lock();
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700156 spin_lock(&qp->q.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200158 if (qp->q.flags & INET_FRAG_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 goto out;
160
161 ipq_kill(qp);
Eric Dumazetb45386e2016-04-27 16:44:35 -0700162 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Eric Dumazet399d1402018-03-31 12:58:51 -0700164 head = qp->q.fragments;
Denis V. Lunevcb846632008-03-24 15:31:00 -0700165
Eric Dumazet399d1402018-03-31 12:58:51 -0700166 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
Nikolay Aleksandrov2e404f62014-08-01 12:29:47 +0200167
Eric Dumazet399d1402018-03-31 12:58:51 -0700168 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
169 goto out;
Nikolay Aleksandrov2e404f62014-08-01 12:29:47 +0200170
Eric Dumazet399d1402018-03-31 12:58:51 -0700171 head->dev = dev_get_by_index_rcu(net, qp->iif);
172 if (!head->dev)
173 goto out;
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700174
Shan Weie9017b52010-01-23 01:57:42 -0800175
Eric Dumazet399d1402018-03-31 12:58:51 -0700176 /* skb has no dst, perform route lookup again */
177 iph = ip_hdr(head);
178 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
David S. Millerc6cffba2012-07-26 11:14:38 +0000179 iph->tos, head->dev);
Eric Dumazet399d1402018-03-31 12:58:51 -0700180 if (err)
181 goto out;
Eric Dumazet64f3b9e2011-05-04 10:02:26 +0000182
Eric Dumazet399d1402018-03-31 12:58:51 -0700183 /* Only an end host needs to send an ICMP
184 * "Fragment Reassembly Timeout" message, per RFC792.
185 */
186 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
187 (skb_rtable(head)->rt_type != RTN_LOCAL))
188 goto out;
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700189
Eric Dumazet399d1402018-03-31 12:58:51 -0700190 clone = skb_clone(head, GFP_ATOMIC);
Shan Weie9017b52010-01-23 01:57:42 -0800191
Eric Dumazet399d1402018-03-31 12:58:51 -0700192 /* Send an ICMP "Fragment Reassembly Timeout" message. */
193 if (clone) {
194 spin_unlock(&qp->q.lock);
195 icmp_send(clone, ICMP_TIME_EXCEEDED,
196 ICMP_EXC_FRAGTIME, 0);
197 consume_skb(clone);
198 goto out_rcu_unlock;
Patrick McHardyd1c9ae62010-02-02 11:46:50 -0800199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200out:
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700201 spin_unlock(&qp->q.lock);
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700202out_rcu_unlock:
203 rcu_read_unlock();
Pavel Emelyanov4b6cb5d2007-10-15 02:41:09 -0700204 ipq_put(qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700207/* Find the correct entry in the "incomplete datagrams" queue for
208 * this IP datagram, and create new one, if nothing is found.
209 */
David Ahern9972f132015-08-13 14:59:09 -0600210static struct ipq *ip_find(struct net *net, struct iphdr *iph,
211 u32 user, int vif)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Eric Dumazet648700f2018-03-31 12:58:49 -0700213 struct frag_v4_compare_key key = {
214 .saddr = iph->saddr,
215 .daddr = iph->daddr,
216 .user = user,
217 .vif = vif,
218 .id = iph->id,
219 .protocol = iph->protocol,
220 };
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700221 struct inet_frag_queue *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Eric Dumazet648700f2018-03-31 12:58:49 -0700223 q = inet_frag_find(&net->ipv4.frags, &key);
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000224 if (IS_ERR_OR_NULL(q)) {
225 inet_frag_maybe_warn_overflow(q, pr_fmt());
226 return NULL;
227 }
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700228 return container_of(q, struct ipq, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229}
230
Herbert Xu89cee8b2005-12-13 23:14:27 -0800231/* Is the fragment too far ahead to be part of ipq? */
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100232static int ip_frag_too_far(struct ipq *qp)
Herbert Xu89cee8b2005-12-13 23:14:27 -0800233{
234 struct inet_peer *peer = qp->peer;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200235 unsigned int max = qp->q.net->max_dist;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800236 unsigned int start, end;
237
238 int rc;
239
240 if (!peer || !max)
241 return 0;
242
243 start = qp->rid;
244 end = atomic_inc_return(&peer->rid);
245 qp->rid = end;
246
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700247 rc = qp->q.fragments && (end - start) > max;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800248
249 if (rc) {
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -0700250 struct net *net;
251
252 net = container_of(qp->q.net, struct net, ipv4.frags);
Eric Dumazetb45386e2016-04-27 16:44:35 -0700253 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800254 }
255
256 return rc;
257}
258
259static int ip_frag_reinit(struct ipq *qp)
260{
261 struct sk_buff *fp;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000262 unsigned int sum_truesize = 0;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800263
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800264 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300265 refcount_inc(&qp->q.refcnt);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800266 return -ETIMEDOUT;
267 }
268
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700269 fp = qp->q.fragments;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800270 do {
271 struct sk_buff *xp = fp->next;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000272
273 sum_truesize += fp->truesize;
274 kfree_skb(fp);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800275 fp = xp;
276 } while (fp);
Florian Westphal0e60d242015-07-23 12:05:38 +0200277 sub_frag_mem_limit(qp->q.net, sum_truesize);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800278
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200279 qp->q.flags = 0;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700280 qp->q.len = 0;
281 qp->q.meat = 0;
282 qp->q.fragments = NULL;
Changli Gaod6bebca2010-06-29 04:39:37 +0000283 qp->q.fragments_tail = NULL;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800284 qp->iif = 0;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000285 qp->ecn = 0;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800286
287 return 0;
288}
289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290/* Add new segment to existing queue. */
Herbert Xu1706d582007-10-14 00:38:15 -0700291static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
293 struct sk_buff *prev, *next;
Herbert Xu1706d582007-10-14 00:38:15 -0700294 struct net_device *dev;
Florian Westphald6b915e2015-05-22 16:32:51 +0200295 unsigned int fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 int flags, offset;
297 int ihl, end;
Herbert Xu1706d582007-10-14 00:38:15 -0700298 int err = -ENOENT;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000299 u8 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200301 if (qp->q.flags & INET_FRAG_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 goto err;
303
Herbert Xu89cee8b2005-12-13 23:14:27 -0800304 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
Herbert Xu1706d582007-10-14 00:38:15 -0700305 unlikely(ip_frag_too_far(qp)) &&
306 unlikely(err = ip_frag_reinit(qp))) {
Herbert Xu89cee8b2005-12-13 23:14:27 -0800307 ipq_kill(qp);
308 goto err;
309 }
310
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000311 ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700312 offset = ntohs(ip_hdr(skb)->frag_off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 flags = offset & ~IP_OFFSET;
314 offset &= IP_OFFSET;
315 offset <<= 3; /* offset is in 8-byte chunks */
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300316 ihl = ip_hdrlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318 /* Determine the position of this fragment. */
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200319 end = offset + skb->len - skb_network_offset(skb) - ihl;
Herbert Xu1706d582007-10-14 00:38:15 -0700320 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322 /* Is this the final fragment? */
323 if ((flags & IP_MF) == 0) {
324 /* If we already have some bits beyond end
Justin P. Mattock42b2aa82011-11-28 20:31:00 -0800325 * or have different end, the segment is corrupted.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700327 if (end < qp->q.len ||
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200328 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 goto err;
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200330 qp->q.flags |= INET_FRAG_LAST_IN;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700331 qp->q.len = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 } else {
333 if (end&7) {
334 end &= ~7;
335 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
336 skb->ip_summed = CHECKSUM_NONE;
337 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700338 if (end > qp->q.len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* Some bits beyond end -> corruption. */
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200340 if (qp->q.flags & INET_FRAG_LAST_IN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 goto err;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700342 qp->q.len = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 }
344 }
345 if (end == offset)
346 goto err;
347
Herbert Xu1706d582007-10-14 00:38:15 -0700348 err = -ENOMEM;
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200349 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 goto err;
Herbert Xu1706d582007-10-14 00:38:15 -0700351
352 err = pskb_trim_rcsum(skb, end - offset);
353 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 goto err;
355
356 /* Find out which fragments are in front and at the back of us
357 * in the chain of fragments so far. We must know where to put
358 * this fragment, right?
359 */
Changli Gaod6bebca2010-06-29 04:39:37 +0000360 prev = qp->q.fragments_tail;
361 if (!prev || FRAG_CB(prev)->offset < offset) {
362 next = NULL;
363 goto found;
364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 prev = NULL;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700366 for (next = qp->q.fragments; next != NULL; next = next->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 if (FRAG_CB(next)->offset >= offset)
368 break; /* bingo! */
369 prev = next;
370 }
371
Changli Gaod6bebca2010-06-29 04:39:37 +0000372found:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 /* We found where to put this one. Check for overlap with
374 * preceding fragment, and, if needed, align things so that
375 * any overlaps are eliminated.
376 */
377 if (prev) {
378 int i = (FRAG_CB(prev)->offset + prev->len) - offset;
379
380 if (i > 0) {
381 offset += i;
Herbert Xu1706d582007-10-14 00:38:15 -0700382 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (end <= offset)
384 goto err;
Herbert Xu1706d582007-10-14 00:38:15 -0700385 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 if (!pskb_pull(skb, i))
387 goto err;
388 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
389 skb->ip_summed = CHECKSUM_NONE;
390 }
391 }
392
Herbert Xu1706d582007-10-14 00:38:15 -0700393 err = -ENOMEM;
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 while (next && FRAG_CB(next)->offset < end) {
396 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
397
398 if (i < next->len) {
399 /* Eat head of the next overlapped fragment
400 * and leave the loop. The next ones cannot overlap.
401 */
402 if (!pskb_pull(next, i))
403 goto err;
404 FRAG_CB(next)->offset += i;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700405 qp->q.meat -= i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (next->ip_summed != CHECKSUM_UNNECESSARY)
407 next->ip_summed = CHECKSUM_NONE;
408 break;
409 } else {
410 struct sk_buff *free_it = next;
411
Peter Zijlstra47c6bf772006-12-12 19:48:59 +0100412 /* Old fragment is completely overridden with
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 * new one drop it.
414 */
415 next = next->next;
416
417 if (prev)
418 prev->next = next;
419 else
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700420 qp->q.fragments = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700422 qp->q.meat -= free_it->len;
Florian Westphal0e60d242015-07-23 12:05:38 +0200423 sub_frag_mem_limit(qp->q.net, free_it->truesize);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000424 kfree_skb(free_it);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
426 }
427
428 FRAG_CB(skb)->offset = offset;
429
430 /* Insert this fragment in the chain of fragments. */
431 skb->next = next;
Changli Gaod6bebca2010-06-29 04:39:37 +0000432 if (!next)
433 qp->q.fragments_tail = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 if (prev)
435 prev->next = skb;
436 else
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700437 qp->q.fragments = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Herbert Xu1706d582007-10-14 00:38:15 -0700439 dev = skb->dev;
440 if (dev) {
441 qp->iif = dev->ifindex;
442 skb->dev = NULL;
443 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700444 qp->q.stamp = skb->tstamp;
445 qp->q.meat += skb->len;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000446 qp->ecn |= ecn;
Florian Westphal0e60d242015-07-23 12:05:38 +0200447 add_frag_mem_limit(qp->q.net, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 if (offset == 0)
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200449 qp->q.flags |= INET_FRAG_FIRST_IN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Florian Westphald6b915e2015-05-22 16:32:51 +0200451 fragsize = skb->len + ihl;
452
453 if (fragsize > qp->q.max_size)
454 qp->q.max_size = fragsize;
455
Patrick McHardy5f2d04f2012-08-26 19:13:55 +0200456 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
Florian Westphald6b915e2015-05-22 16:32:51 +0200457 fragsize > qp->max_df_size)
458 qp->max_df_size = fragsize;
Patrick McHardy5f2d04f2012-08-26 19:13:55 +0200459
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200460 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
Eric Dumazet97599dc2013-04-16 12:55:41 +0000461 qp->q.meat == qp->q.len) {
462 unsigned long orefdst = skb->_skb_refdst;
Herbert Xu1706d582007-10-14 00:38:15 -0700463
Eric Dumazet97599dc2013-04-16 12:55:41 +0000464 skb->_skb_refdst = 0UL;
465 err = ip_frag_reasm(qp, prev, dev);
466 skb->_skb_refdst = orefdst;
467 return err;
468 }
469
470 skb_dst_drop(skb);
Herbert Xu1706d582007-10-14 00:38:15 -0700471 return -EINPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473err:
474 kfree_skb(skb);
Herbert Xu1706d582007-10-14 00:38:15 -0700475 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
478
479/* Build a new IP datagram from all its fragments. */
480
Herbert Xu1706d582007-10-14 00:38:15 -0700481static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
482 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Jorge Boncompte [DTI2]2bad35b2009-03-18 23:26:11 -0700484 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 struct iphdr *iph;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700486 struct sk_buff *fp, *head = qp->q.fragments;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 int len;
488 int ihlen;
Herbert Xu1706d582007-10-14 00:38:15 -0700489 int err;
Eric Dumazet5173cc02011-05-16 08:37:37 +0000490 u8 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 ipq_kill(qp);
493
Hannes Frederic Sowabe991972013-03-22 08:24:37 +0000494 ecn = ip_frag_ecn_table[qp->ecn];
Eric Dumazet5173cc02011-05-16 08:37:37 +0000495 if (unlikely(ecn == 0xff)) {
496 err = -EINVAL;
497 goto out_fail;
498 }
Herbert Xu1706d582007-10-14 00:38:15 -0700499 /* Make the one we just received the head. */
500 if (prev) {
501 head = prev->next;
502 fp = skb_clone(head, GFP_ATOMIC);
Herbert Xu1706d582007-10-14 00:38:15 -0700503 if (!fp)
504 goto out_nomem;
505
506 fp->next = head->next;
Changli Gaod6bebca2010-06-29 04:39:37 +0000507 if (!fp->next)
508 qp->q.fragments_tail = fp;
Herbert Xu1706d582007-10-14 00:38:15 -0700509 prev->next = fp;
510
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700511 skb_morph(head, qp->q.fragments);
512 head->next = qp->q.fragments->next;
Herbert Xu1706d582007-10-14 00:38:15 -0700513
Eric Dumazetcbf8f7b2012-04-19 06:10:26 +0000514 consume_skb(qp->q.fragments);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700515 qp->q.fragments = head;
Herbert Xu1706d582007-10-14 00:38:15 -0700516 }
517
Ian Morris51456b22015-04-03 09:17:26 +0100518 WARN_ON(!head);
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700519 WARN_ON(FRAG_CB(head)->offset != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 /* Allocate a new buffer for the datagram. */
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300522 ihlen = ip_hdrlen(head);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700523 len = ihlen + qp->q.len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Herbert Xu1706d582007-10-14 00:38:15 -0700525 err = -E2BIG;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800526 if (len > 65535)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 goto out_oversize;
528
529 /* Head of list must not be cloned. */
Pravin B Shelar14bbd6a2013-02-14 09:44:49 +0000530 if (skb_unclone(head, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 goto out_nomem;
532
533 /* If the first fragment is fragmented itself, we split
534 * it to two chunks: the first with data and paged part
535 * and the second, holding only fragments. */
David S. Miller21dc3302010-08-23 00:13:46 -0700536 if (skb_has_frag_list(head)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct sk_buff *clone;
538 int i, plen = 0;
539
Ian Morris51456b22015-04-03 09:17:26 +0100540 clone = alloc_skb(0, GFP_ATOMIC);
541 if (!clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 goto out_nomem;
543 clone->next = head->next;
544 head->next = clone;
545 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700546 skb_frag_list_init(head);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000547 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
548 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 clone->len = clone->data_len = head->data_len - plen;
550 head->data_len -= clone->len;
551 head->len -= clone->len;
552 clone->csum = 0;
553 clone->ip_summed = head->ip_summed;
Florian Westphal0e60d242015-07-23 12:05:38 +0200554 add_frag_mem_limit(qp->q.net, clone->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
556
Florian Westphal14fe22e2015-07-11 01:37:36 +0200557 skb_shinfo(head)->frag_list = head->next;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700558 skb_push(head, head->data - skb_network_header(head));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Florian Westphal14fe22e2015-07-11 01:37:36 +0200560 for (fp=head->next; fp; fp = fp->next) {
561 head->data_len += fp->len;
562 head->len += fp->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 if (head->ip_summed != fp->ip_summed)
564 head->ip_summed = CHECKSUM_NONE;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700565 else if (head->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 head->csum = csum_add(head->csum, fp->csum);
Florian Westphal14fe22e2015-07-11 01:37:36 +0200567 head->truesize += fp->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
David S. Miller5510b3c2015-07-31 23:52:20 -0700569 sub_frag_mem_limit(qp->q.net, head->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 head->next = NULL;
572 head->dev = dev;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700573 head->tstamp = qp->q.stamp;
Florian Westphald6b915e2015-05-22 16:32:51 +0200574 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700576 iph = ip_hdr(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 iph->tot_len = htons(len);
Eric Dumazet5173cc02011-05-16 08:37:37 +0000578 iph->tos |= ecn;
Florian Westphald6b915e2015-05-22 16:32:51 +0200579
580 /* When we set IP_DF on a refragmented skb we must also force a
581 * call to ip_fragment to avoid forwarding a DF-skb of size s while
582 * original sender only sent fragments of size f (where f < s).
583 *
584 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
585 * frag seen to avoid sending tiny DF-fragments in case skb was built
586 * from one very small df-fragment and one large non-df frag.
587 */
588 if (qp->max_df_size == qp->q.max_size) {
589 IPCB(head)->flags |= IPSKB_FRAG_PMTU;
590 iph->frag_off = htons(IP_DF);
591 } else {
592 iph->frag_off = 0;
593 }
594
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200595 ip_send_check(iph);
596
Eric Dumazetb45386e2016-04-27 16:44:35 -0700597 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700598 qp->q.fragments = NULL;
Changli Gaod6bebca2010-06-29 04:39:37 +0000599 qp->q.fragments_tail = NULL;
Herbert Xu1706d582007-10-14 00:38:15 -0700600 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602out_nomem:
Joe Perchesba7a46f2014-11-11 10:59:17 -0800603 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
David Howells45542472007-10-17 21:37:22 -0700604 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 goto out_fail;
606out_oversize:
Eric Dumazet648700f2018-03-31 12:58:49 -0700607 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608out_fail:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700609 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Herbert Xu1706d582007-10-14 00:38:15 -0700610 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611}
612
613/* Process an incoming IP datagram fragment. */
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500614int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
David Ahern9972f132015-08-13 14:59:09 -0600616 struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
David Ahern385add92015-09-29 20:07:13 -0700617 int vif = l3mdev_master_ifindex_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 struct ipq *qp;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900619
Eric Dumazetb45386e2016-04-27 16:44:35 -0700620 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
Joe Stringer8282f272016-01-22 15:49:12 -0800621 skb_orphan(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 /* Lookup (or create) queue header */
David Ahern9972f132015-08-13 14:59:09 -0600624 qp = ip_find(net, ip_hdr(skb), user, vif);
Ian Morris00db4122015-04-03 09:17:27 +0100625 if (qp) {
Herbert Xu1706d582007-10-14 00:38:15 -0700626 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700628 spin_lock(&qp->q.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Herbert Xu1706d582007-10-14 00:38:15 -0700630 ret = ip_frag_queue(qp, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700632 spin_unlock(&qp->q.lock);
Pavel Emelyanov4b6cb5d2007-10-15 02:41:09 -0700633 ipq_put(qp);
Herbert Xu776c7292007-10-14 00:38:32 -0700634 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 }
636
Eric Dumazetb45386e2016-04-27 16:44:35 -0700637 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 kfree_skb(skb);
Herbert Xu776c7292007-10-14 00:38:32 -0700639 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000641EXPORT_SYMBOL(ip_defrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500643struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000644{
Johannes Berg1bf37512012-12-09 23:41:06 +0000645 struct iphdr iph;
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300646 int netoff;
Eric Dumazetbc416d92011-10-06 10:28:31 +0000647 u32 len;
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return skb;
651
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300652 netoff = skb_network_offset(skb);
653
654 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000655 return skb;
656
Johannes Berg1bf37512012-12-09 23:41:06 +0000657 if (iph.ihl < 5 || iph.version != 4)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000658 return skb;
659
Johannes Berg1bf37512012-12-09 23:41:06 +0000660 len = ntohs(iph.tot_len);
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300661 if (skb->len < netoff + len || len < (iph.ihl * 4))
Johannes Berg1bf37512012-12-09 23:41:06 +0000662 return skb;
663
664 if (ip_is_fragment(&iph)) {
Eric Dumazetbc416d92011-10-06 10:28:31 +0000665 skb = skb_share_check(skb, GFP_ATOMIC);
666 if (skb) {
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300667 if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
Johannes Berg1bf37512012-12-09 23:41:06 +0000668 return skb;
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300669 if (pskb_trim_rcsum(skb, netoff + len))
Eric Dumazetbc416d92011-10-06 10:28:31 +0000670 return skb;
671 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500672 if (ip_defrag(net, skb, user))
Eric Dumazetbc416d92011-10-06 10:28:31 +0000673 return NULL;
Tom Herbert7539fad2013-12-15 22:12:18 -0800674 skb_clear_hash(skb);
Eric Dumazetbc416d92011-10-06 10:28:31 +0000675 }
676 }
677 return skb;
678}
679EXPORT_SYMBOL(ip_check_defrag);
680
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800681#ifdef CONFIG_SYSCTL
682static int zero;
683
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700684static struct ctl_table ip4_frags_ns_ctl_table[] = {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800685 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800686 .procname = "ipfrag_high_thresh",
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800687 .data = &init_net.ipv4.frags.high_thresh,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800688 .maxlen = sizeof(int),
689 .mode = 0644,
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200690 .proc_handler = proc_dointvec_minmax,
691 .extra1 = &init_net.ipv4.frags.low_thresh
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800692 },
693 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800694 .procname = "ipfrag_low_thresh",
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800695 .data = &init_net.ipv4.frags.low_thresh,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800696 .maxlen = sizeof(int),
697 .mode = 0644,
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200698 .proc_handler = proc_dointvec_minmax,
699 .extra1 = &zero,
700 .extra2 = &init_net.ipv4.frags.high_thresh
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800701 },
702 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800703 .procname = "ipfrag_time",
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800704 .data = &init_net.ipv4.frags.timeout,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800705 .maxlen = sizeof(int),
706 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -0800707 .proc_handler = proc_dointvec_jiffies,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800708 },
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200709 {
710 .procname = "ipfrag_max_dist",
711 .data = &init_net.ipv4.frags.max_dist,
712 .maxlen = sizeof(int),
713 .mode = 0644,
714 .proc_handler = proc_dointvec_minmax,
715 .extra1 = &zero
716 },
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700717 { }
718};
719
Florian Westphale3a57d12014-07-24 16:50:35 +0200720/* secret interval has been deprecated */
721static int ip4_frags_secret_interval_unused;
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700722static struct ctl_table ip4_frags_ctl_table[] = {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800723 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800724 .procname = "ipfrag_secret_interval",
Florian Westphale3a57d12014-07-24 16:50:35 +0200725 .data = &ip4_frags_secret_interval_unused,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800726 .maxlen = sizeof(int),
727 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -0800728 .proc_handler = proc_dointvec_jiffies,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800729 },
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800730 { }
731};
732
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000733static int __net_init ip4_frags_ns_ctl_register(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800734{
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800735 struct ctl_table *table;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800736 struct ctl_table_header *hdr;
737
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700738 table = ip4_frags_ns_ctl_table;
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800739 if (!net_eq(net, &init_net)) {
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700740 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
Ian Morris51456b22015-04-03 09:17:26 +0100741 if (!table)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800742 goto err_alloc;
743
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800744 table[0].data = &net->ipv4.frags.high_thresh;
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200745 table[0].extra1 = &net->ipv4.frags.low_thresh;
746 table[0].extra2 = &init_net.ipv4.frags.high_thresh;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800747 table[1].data = &net->ipv4.frags.low_thresh;
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200748 table[1].extra2 = &net->ipv4.frags.high_thresh;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800749 table[2].data = &net->ipv4.frags.timeout;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200750 table[3].data = &net->ipv4.frags.max_dist;
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800751 }
752
Eric W. Biedermanec8f23c2012-04-19 13:44:49 +0000753 hdr = register_net_sysctl(net, "net/ipv4", table);
Ian Morris51456b22015-04-03 09:17:26 +0100754 if (!hdr)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800755 goto err_reg;
756
757 net->ipv4.frags_hdr = hdr;
758 return 0;
759
760err_reg:
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800761 if (!net_eq(net, &init_net))
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800762 kfree(table);
763err_alloc:
764 return -ENOMEM;
765}
766
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000767static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800768{
769 struct ctl_table *table;
770
771 table = net->ipv4.frags_hdr->ctl_table_arg;
772 unregister_net_sysctl_table(net->ipv4.frags_hdr);
773 kfree(table);
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800774}
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700775
Fabian Frederick57a02c32014-10-01 19:18:57 +0200776static void __init ip4_frags_ctl_register(void)
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700777{
Eric W. Biederman43444752012-04-19 13:22:55 +0000778 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700779}
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800780#else
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100781static int ip4_frags_ns_ctl_register(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800782{
783 return 0;
784}
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800785
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100786static void ip4_frags_ns_ctl_unregister(struct net *net)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800787{
788}
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700789
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100790static void __init ip4_frags_ctl_register(void)
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700791{
792}
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800793#endif
794
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000795static int __net_init ipv4_frags_init_net(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800796{
Eric Dumazet787bea72018-03-31 12:58:43 -0700797 int res;
798
Jesper Dangaard Brouerc2a93662013-01-15 07:16:35 +0000799 /* Fragment cache limits.
800 *
801 * The fragment memory accounting code, (tries to) account for
802 * the real memory usage, by measuring both the size of frag
803 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
804 * and the SKB's truesize.
805 *
806 * A 64K fragment consumes 129736 bytes (44*2944)+200
807 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
808 *
809 * We will commit 4MB at one time. Should we cross that limit
810 * we will prune down to 3MB, making room for approx 8 big 64K
811 * fragments 8x128k.
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800812 */
Jesper Dangaard Brouerc2a93662013-01-15 07:16:35 +0000813 net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
814 net->ipv4.frags.low_thresh = 3 * 1024 * 1024;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800815 /*
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800816 * Important NOTE! Fragment queue must be destroyed before MSL expires.
817 * RFC791 is wrong proposing to prolongate timer each fragment arrival
818 * by TTL.
819 */
820 net->ipv4.frags.timeout = IP_FRAG_TIME;
821
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200822 net->ipv4.frags.max_dist = 64;
Eric Dumazet093ba722018-03-31 12:58:44 -0700823 net->ipv4.frags.f = &ip4_frags;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200824
Eric Dumazet787bea72018-03-31 12:58:43 -0700825 res = inet_frags_init_net(&net->ipv4.frags);
826 if (res < 0)
827 return res;
828 res = ip4_frags_ns_ctl_register(net);
829 if (res < 0)
Eric Dumazet093ba722018-03-31 12:58:44 -0700830 inet_frags_exit_net(&net->ipv4.frags);
Eric Dumazet787bea72018-03-31 12:58:43 -0700831 return res;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800832}
833
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000834static void __net_exit ipv4_frags_exit_net(struct net *net)
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800835{
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700836 ip4_frags_ns_ctl_unregister(net);
Eric Dumazet093ba722018-03-31 12:58:44 -0700837 inet_frags_exit_net(&net->ipv4.frags);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800838}
839
840static struct pernet_operations ip4_frags_ops = {
841 .init = ipv4_frags_init_net,
842 .exit = ipv4_frags_exit_net,
843};
844
Eric Dumazet648700f2018-03-31 12:58:49 -0700845
846static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
847{
848 return jhash2(data,
849 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
850}
851
852static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
853{
854 const struct inet_frag_queue *fq = data;
855
856 return jhash2((const u32 *)&fq->key.v4,
857 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
858}
859
860static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
861{
862 const struct frag_v4_compare_key *key = arg->key;
863 const struct inet_frag_queue *fq = ptr;
864
865 return !!memcmp(&fq->key, key, sizeof(*key));
866}
867
868static const struct rhashtable_params ip4_rhash_params = {
869 .head_offset = offsetof(struct inet_frag_queue, node),
870 .key_offset = offsetof(struct inet_frag_queue, key),
871 .key_len = sizeof(struct frag_v4_compare_key),
872 .hashfn = ip4_key_hashfn,
873 .obj_hashfn = ip4_obj_hashfn,
874 .obj_cmpfn = ip4_obj_cmpfn,
875 .automatic_shrinking = true,
876};
877
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -0700878void __init ipfrag_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700880 ip4_frags.constructor = ip4_frag_init;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700881 ip4_frags.destructor = ip4_frag_free;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700882 ip4_frags.qsize = sizeof(struct ipq);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700883 ip4_frags.frag_expire = ip_expire;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200884 ip4_frags.frags_cache_name = ip_frag_cache_name;
Eric Dumazet648700f2018-03-31 12:58:49 -0700885 ip4_frags.rhash_params = ip4_rhash_params;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200886 if (inet_frags_init(&ip4_frags))
887 panic("IP: failed to allocate ip4_frags cache\n");
Eric Dumazet483a6e42018-03-31 12:58:47 -0700888 ip4_frags_ctl_register();
889 register_pernet_subsys(&ip4_frags_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890}