tcp: skip cwnd moderation in TCP_CA_Open in tcp_try_to_open
[linux-2.6.git] / net / ipv4 / ip_fragment.c
index 453ae04..fdaabf2 100644 (file)
@@ -5,10 +5,8 @@
  *
  *             The IP fragmentation functionality.
  *
- * Version:    $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
- *
  * Authors:    Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
- *             Alan Cox <Alan.Cox@linux.org>
+ *             Alan Cox <alan@lxorguk.ukuu.org.uk>
  *
  * Fixes:
  *             Alan Cox        :       Split from ip.c , see ip_input.c for history.
@@ -34,6 +32,9 @@
 #include <linux/netdevice.h>
 #include <linux/jhash.h>
 #include <linux/random.h>
+#include <linux/slab.h>
+#include <net/route.h>
+#include <net/dst.h>
 #include <net/sock.h>
 #include <net/ip.h>
 #include <net/icmp.h>
 #include <linux/udp.h>
 #include <linux/inet.h>
 #include <linux/netfilter_ipv4.h>
+#include <net/inet_ecn.h>
 
 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
  * as well. Or notify me, at least. --ANK
  */
 
-int sysctl_ipfrag_max_dist __read_mostly = 64;
+static int sysctl_ipfrag_max_dist __read_mostly = 64;
 
 struct ipfrag_skb_cb
 {
@@ -58,7 +60,7 @@ struct ipfrag_skb_cb
        int                     offset;
 };
 
-#define FRAG_CB(skb)   ((struct ipfrag_skb_cb*)((skb)->cb))
+#define FRAG_CB(skb)   ((struct ipfrag_skb_cb *)((skb)->cb))
 
 /* Describe an entry in the "incomplete datagrams" queue. */
 struct ipq {
@@ -69,40 +71,56 @@ struct ipq {
        __be32          daddr;
        __be16          id;
        u8              protocol;
+       u8              ecn; /* RFC3168 support */
        int             iif;
        unsigned int    rid;
        struct inet_peer *peer;
 };
 
-struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
-       /*
-        * Fragment cache limits. We will commit 256K at one time. Should we
-        * cross that limit we will prune down to 192K. This should cope with
-        * even the most extreme cases without allowing an attacker to
-        * measurably harm machine performance.
-        */
-       .high_thresh     = 256 * 1024,
-       .low_thresh      = 192 * 1024,
+/* RFC 3168 support :
+ * We want to check ECN values of all fragments, do detect invalid combinations.
+ * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
+ */
+#define        IPFRAG_ECN_NOT_ECT      0x01 /* one frag had ECN_NOT_ECT */
+#define        IPFRAG_ECN_ECT_1        0x02 /* one frag had ECN_ECT_1 */
+#define        IPFRAG_ECN_ECT_0        0x04 /* one frag had ECN_ECT_0 */
+#define        IPFRAG_ECN_CE           0x08 /* one frag had ECN_CE */
 
-       /*
-        * Important NOTE! Fragment queue must be destroyed before MSL expires.
-        * RFC791 is wrong proposing to prolongate timer each fragment arrival
-        * by TTL.
-        */
-       .timeout         = IP_FRAG_TIME,
-       .secret_interval = 10 * 60 * HZ,
+static inline u8 ip4_frag_ecn(u8 tos)
+{
+       return 1 << (tos & INET_ECN_MASK);
+}
+
+/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
+ * Value : 0xff if frame should be dropped.
+ *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
+ */
+static const u8 ip4_frag_ecn_table[16] = {
+       /* at least one fragment had CE, and others ECT_0 or ECT_1 */
+       [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]                      = INET_ECN_CE,
+       [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]                      = INET_ECN_CE,
+       [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]   = INET_ECN_CE,
+
+       /* invalid combinations : drop frame */
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
+       [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
 };
 
 static struct inet_frags ip4_frags;
 
-int ip_frag_nqueues(void)
+int ip_frag_nqueues(struct net *net)
 {
-       return ip4_frags.nqueues;
+       return net->ipv4.frags.nqueues;
 }
 
-int ip_frag_mem(void)
+int ip_frag_mem(struct net *net)
 {
-       return atomic_read(&ip4_frags.mem);
+       return atomic_read(&net->ipv4.frags.mem);
 }
 
 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
@@ -134,19 +152,17 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
        struct ip4_create_arg *arg = a;
 
        qp = container_of(q, struct ipq, q);
-       return (qp->id == arg->iph->id &&
+       return  qp->id == arg->iph->id &&
                        qp->saddr == arg->iph->saddr &&
                        qp->daddr == arg->iph->daddr &&
                        qp->protocol == arg->iph->protocol &&
-                       qp->user == arg->user);
+                       qp->user == arg->user;
 }
 
 /* Memory Tracking Functions. */
-static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
 {
-       if (work)
-               *work -= skb->truesize;
-       atomic_sub(skb->truesize, &ip4_frags.mem);
+       atomic_sub(skb->truesize, &nf->mem);
        kfree_skb(skb);
 }
 
@@ -157,11 +173,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
 
        qp->protocol = arg->iph->protocol;
        qp->id = arg->iph->id;
+       qp->ecn = ip4_frag_ecn(arg->iph->tos);
        qp->saddr = arg->iph->saddr;
        qp->daddr = arg->iph->daddr;
        qp->user = arg->user;
        qp->peer = sysctl_ipfrag_max_dist ?
-               inet_getpeer(arg->iph->saddr, 1) : NULL;
+               inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
 }
 
 static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
@@ -192,13 +209,13 @@ static void ipq_kill(struct ipq *ipq)
 /* Memory limiting on fragments.  Evictor trashes the oldest
  * fragment queue until we are back under the threshold.
  */
-static void ip_evictor(void)
+static void ip_evictor(struct net *net)
 {
        int evicted;
 
-       evicted = inet_frag_evictor(&ip4_frags);
+       evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
        if (evicted)
-               IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted);
+               IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
 }
 
 /*
@@ -207,26 +224,53 @@ static void ip_evictor(void)
 static void ip_expire(unsigned long arg)
 {
        struct ipq *qp;
+       struct net *net;
 
        qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
+       net = container_of(qp->q.net, struct net, ipv4.frags);
 
        spin_lock(&qp->q.lock);
 
-       if (qp->q.last_in & COMPLETE)
+       if (qp->q.last_in & INET_FRAG_COMPLETE)
                goto out;
 
        ipq_kill(qp);
 
-       IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
-       IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
 
-       if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) {
+       if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
                struct sk_buff *head = qp->q.fragments;
+               const struct iphdr *iph;
+               int err;
+
+               rcu_read_lock();
+               head->dev = dev_get_by_index_rcu(net, qp->iif);
+               if (!head->dev)
+                       goto out_rcu_unlock;
+
+               /* skb dst is stale, drop it, and perform route lookup again */
+               skb_dst_drop(head);
+               iph = ip_hdr(head);
+               err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+                                          iph->tos, head->dev);
+               if (err)
+                       goto out_rcu_unlock;
+
+               /*
+                * Only an end host needs to send an ICMP
+                * "Fragment Reassembly Timeout" message, per RFC792.
+                */
+               if (qp->user == IP_DEFRAG_AF_PACKET ||
+                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
+                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                       goto out_rcu_unlock;
+
+
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
-               if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) {
-                       icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-                       dev_put(head->dev);
-               }
+               icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+out_rcu_unlock:
+               rcu_read_unlock();
        }
 out:
        spin_unlock(&qp->q.lock);
@@ -236,7 +280,7 @@ out:
 /* Find the correct entry in the "incomplete datagrams" queue for
  * this IP datagram, and create new one, if nothing is found.
  */
-static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
+static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
 {
        struct inet_frag_queue *q;
        struct ip4_create_arg arg;
@@ -244,9 +288,11 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
 
        arg.iph = iph;
        arg.user = user;
+
+       read_lock(&ip4_frags.lock);
        hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
-       q = inet_frag_find(&ip4_frags, &arg, hash);
+       q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
        if (q == NULL)
                goto out_nomem;
 
@@ -276,7 +322,10 @@ static inline int ip_frag_too_far(struct ipq *qp)
        rc = qp->q.fragments && (end - start) > max;
 
        if (rc) {
-               IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+               struct net *net;
+
+               net = container_of(qp->q.net, struct net, ipv4.frags);
+               IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
        }
 
        return rc;
@@ -286,7 +335,7 @@ static int ip_frag_reinit(struct ipq *qp)
 {
        struct sk_buff *fp;
 
-       if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) {
+       if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
                atomic_inc(&qp->q.refcnt);
                return -ETIMEDOUT;
        }
@@ -294,7 +343,7 @@ static int ip_frag_reinit(struct ipq *qp)
        fp = qp->q.fragments;
        do {
                struct sk_buff *xp = fp->next;
-               frag_kfree_skb(fp, NULL);
+               frag_kfree_skb(qp->q.net, fp);
                fp = xp;
        } while (fp);
 
@@ -302,7 +351,9 @@ static int ip_frag_reinit(struct ipq *qp)
        qp->q.len = 0;
        qp->q.meat = 0;
        qp->q.fragments = NULL;
+       qp->q.fragments_tail = NULL;
        qp->iif = 0;
+       qp->ecn = 0;
 
        return 0;
 }
@@ -315,8 +366,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        int flags, offset;
        int ihl, end;
        int err = -ENOENT;
+       u8 ecn;
 
-       if (qp->q.last_in & COMPLETE)
+       if (qp->q.last_in & INET_FRAG_COMPLETE)
                goto err;
 
        if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
@@ -326,6 +378,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                goto err;
        }
 
+       ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
        offset = ntohs(ip_hdr(skb)->frag_off);
        flags = offset & ~IP_OFFSET;
        offset &= IP_OFFSET;
@@ -342,9 +395,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                 * or have different end, the segment is corrrupted.
                 */
                if (end < qp->q.len ||
-                   ((qp->q.last_in & LAST_IN) && end != qp->q.len))
+                   ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
                        goto err;
-               qp->q.last_in |= LAST_IN;
+               qp->q.last_in |= INET_FRAG_LAST_IN;
                qp->q.len = end;
        } else {
                if (end&7) {
@@ -354,7 +407,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                }
                if (end > qp->q.len) {
                        /* Some bits beyond end -> corruption. */
-                       if (qp->q.last_in & LAST_IN)
+                       if (qp->q.last_in & INET_FRAG_LAST_IN)
                                goto err;
                        qp->q.len = end;
                }
@@ -374,6 +427,11 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
         * in the chain of fragments so far.  We must know where to put
         * this fragment, right?
         */
+       prev = qp->q.fragments_tail;
+       if (!prev || FRAG_CB(prev)->offset < offset) {
+               next = NULL;
+               goto found;
+       }
        prev = NULL;
        for (next = qp->q.fragments; next != NULL; next = next->next) {
                if (FRAG_CB(next)->offset >= offset)
@@ -381,6 +439,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                prev = next;
        }
 
+found:
        /* We found where to put this one.  Check for overlap with
         * preceding fragment, and, if needed, align things so that
         * any overlaps are eliminated.
@@ -431,7 +490,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                                qp->q.fragments = next;
 
                        qp->q.meat -= free_it->len;
-                       frag_kfree_skb(free_it, NULL);
+                       frag_kfree_skb(qp->q.net, free_it);
                }
        }
 
@@ -439,6 +498,8 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 
        /* Insert this fragment in the chain of fragments. */
        skb->next = next;
+       if (!next)
+               qp->q.fragments_tail = skb;
        if (prev)
                prev->next = skb;
        else
@@ -451,15 +512,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        }
        qp->q.stamp = skb->tstamp;
        qp->q.meat += skb->len;
-       atomic_add(skb->truesize, &ip4_frags.mem);
+       qp->ecn |= ecn;
+       atomic_add(skb->truesize, &qp->q.net->mem);
        if (offset == 0)
-               qp->q.last_in |= FIRST_IN;
+               qp->q.last_in |= INET_FRAG_FIRST_IN;
 
-       if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len)
+       if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+           qp->q.meat == qp->q.len)
                return ip_frag_reasm(qp, prev, dev);
 
        write_lock(&ip4_frags.lock);
-       list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
+       list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
        write_unlock(&ip4_frags.lock);
        return -EINPROGRESS;
 
@@ -474,23 +537,31 @@ err:
 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                         struct net_device *dev)
 {
+       struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
        struct iphdr *iph;
        struct sk_buff *fp, *head = qp->q.fragments;
        int len;
        int ihlen;
        int err;
+       u8 ecn;
 
        ipq_kill(qp);
 
+       ecn = ip4_frag_ecn_table[qp->ecn];
+       if (unlikely(ecn == 0xff)) {
+               err = -EINVAL;
+               goto out_fail;
+       }
        /* Make the one we just received the head. */
        if (prev) {
                head = prev->next;
                fp = skb_clone(head, GFP_ATOMIC);
-
                if (!fp)
                        goto out_nomem;
 
                fp->next = head->next;
+               if (!fp->next)
+                       qp->q.fragments_tail = fp;
                prev->next = fp;
 
                skb_morph(head, qp->q.fragments);
@@ -500,8 +571,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                qp->q.fragments = head;
        }
 
-       BUG_TRAP(head != NULL);
-       BUG_TRAP(FRAG_CB(head)->offset == 0);
+       WARN_ON(head == NULL);
+       WARN_ON(FRAG_CB(head)->offset != 0);
 
        /* Allocate a new buffer for the datagram. */
        ihlen = ip_hdrlen(head);
@@ -512,14 +583,13 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                goto out_oversize;
 
        /* Head of list must not be cloned. */
-       err = -ENOMEM;
        if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
                goto out_nomem;
 
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
-       if (skb_shinfo(head)->frag_list) {
+       if (skb_has_frag_list(head)) {
                struct sk_buff *clone;
                int i, plen = 0;
 
@@ -528,20 +598,19 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                clone->next = head->next;
                head->next = clone;
                skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-               skb_shinfo(head)->frag_list = NULL;
-               for (i=0; i<skb_shinfo(head)->nr_frags; i++)
-                       plen += skb_shinfo(head)->frags[i].size;
+               skb_frag_list_init(head);
+               for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+                       plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
                clone->len = clone->data_len = head->data_len - plen;
                head->data_len -= clone->len;
                head->len -= clone->len;
                clone->csum = 0;
                clone->ip_summed = head->ip_summed;
-               atomic_add(clone->truesize, &ip4_frags.mem);
+               atomic_add(clone->truesize, &qp->q.net->mem);
        }
 
        skb_shinfo(head)->frag_list = head->next;
        skb_push(head, head->data - skb_network_header(head));
-       atomic_sub(head->truesize, &ip4_frags.mem);
 
        for (fp=head->next; fp; fp = fp->next) {
                head->data_len += fp->len;
@@ -551,8 +620,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                else if (head->ip_summed == CHECKSUM_COMPLETE)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
-               atomic_sub(fp->truesize, &ip4_frags.mem);
        }
+       atomic_sub(head->truesize, &qp->q.net->mem);
 
        head->next = NULL;
        head->dev = dev;
@@ -561,21 +630,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        iph = ip_hdr(head);
        iph->frag_off = 0;
        iph->tot_len = htons(len);
-       IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+       iph->tos |= ecn;
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
        qp->q.fragments = NULL;
+       qp->q.fragments_tail = NULL;
        return 0;
 
 out_nomem:
        LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
                              "queue %p\n", qp);
+       err = -ENOMEM;
        goto out_fail;
 out_oversize:
        if (net_ratelimit())
-               printk(KERN_INFO
-                       "Oversized IP packet from %d.%d.%d.%d.\n",
-                       NIPQUAD(qp->saddr));
+               printk(KERN_INFO "Oversized IP packet from %pI4.\n",
+                       &qp->saddr);
 out_fail:
-       IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
        return err;
 }
 
@@ -583,15 +654,17 @@ out_fail:
 int ip_defrag(struct sk_buff *skb, u32 user)
 {
        struct ipq *qp;
+       struct net *net;
 
-       IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
+       net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Start by cleaning up the memory. */
-       if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh)
-               ip_evictor();
+       if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
+               ip_evictor(net);
 
        /* Lookup (or create) queue header */
-       if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
+       if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
                int ret;
 
                spin_lock(&qp->q.lock);
@@ -603,14 +676,190 @@ int ip_defrag(struct sk_buff *skb, u32 user)
                return ret;
        }
 
-       IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -ENOMEM;
 }
+EXPORT_SYMBOL(ip_defrag);
+
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+       const struct iphdr *iph;
+       u32 len;
+
+       if (skb->protocol != htons(ETH_P_IP))
+               return skb;
+
+       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+               return skb;
+
+       iph = ip_hdr(skb);
+       if (iph->ihl < 5 || iph->version != 4)
+               return skb;
+       if (!pskb_may_pull(skb, iph->ihl*4))
+               return skb;
+       iph = ip_hdr(skb);
+       len = ntohs(iph->tot_len);
+       if (skb->len < len || len < (iph->ihl * 4))
+               return skb;
+
+       if (ip_is_fragment(ip_hdr(skb))) {
+               skb = skb_share_check(skb, GFP_ATOMIC);
+               if (skb) {
+                       if (pskb_trim_rcsum(skb, len))
+                               return skb;
+                       memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+                       if (ip_defrag(skb, user))
+                               return NULL;
+                       skb->rxhash = 0;
+               }
+       }
+       return skb;
+}
+EXPORT_SYMBOL(ip_check_defrag);
+
+#ifdef CONFIG_SYSCTL
+static int zero;
+
+static struct ctl_table ip4_frags_ns_ctl_table[] = {
+       {
+               .procname       = "ipfrag_high_thresh",
+               .data           = &init_net.ipv4.frags.high_thresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "ipfrag_low_thresh",
+               .data           = &init_net.ipv4.frags.low_thresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "ipfrag_time",
+               .data           = &init_net.ipv4.frags.timeout,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       { }
+};
+
+static struct ctl_table ip4_frags_ctl_table[] = {
+       {
+               .procname       = "ipfrag_secret_interval",
+               .data           = &ip4_frags.secret_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "ipfrag_max_dist",
+               .data           = &sysctl_ipfrag_max_dist,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero
+       },
+       { }
+};
+
+static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+{
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+
+       table = ip4_frags_ns_ctl_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+
+               table[0].data = &net->ipv4.frags.high_thresh;
+               table[1].data = &net->ipv4.frags.low_thresh;
+               table[2].data = &net->ipv4.frags.timeout;
+       }
+
+       hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
+       if (hdr == NULL)
+               goto err_reg;
+
+       net->ipv4.frags_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
+}
+
+static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
+{
+       struct ctl_table *table;
+
+       table = net->ipv4.frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->ipv4.frags_hdr);
+       kfree(table);
+}
+
+static void ip4_frags_ctl_register(void)
+{
+       register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
+}
+#else
+static inline int ip4_frags_ns_ctl_register(struct net *net)
+{
+       return 0;
+}
+
+static inline void ip4_frags_ns_ctl_unregister(struct net *net)
+{
+}
+
+static inline void ip4_frags_ctl_register(void)
+{
+}
+#endif
+
+static int __net_init ipv4_frags_init_net(struct net *net)
+{
+       /*
+        * Fragment cache limits. We will commit 256K at one time. Should we
+        * cross that limit we will prune down to 192K. This should cope with
+        * even the most extreme cases without allowing an attacker to
+        * measurably harm machine performance.
+        */
+       net->ipv4.frags.high_thresh = 256 * 1024;
+       net->ipv4.frags.low_thresh = 192 * 1024;
+       /*
+        * Important NOTE! Fragment queue must be destroyed before MSL expires.
+        * RFC791 is wrong proposing to prolongate timer each fragment arrival
+        * by TTL.
+        */
+       net->ipv4.frags.timeout = IP_FRAG_TIME;
+
+       inet_frags_init_net(&net->ipv4.frags);
+
+       return ip4_frags_ns_ctl_register(net);
+}
+
+static void __net_exit ipv4_frags_exit_net(struct net *net)
+{
+       ip4_frags_ns_ctl_unregister(net);
+       inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
+}
+
+static struct pernet_operations ip4_frags_ops = {
+       .init = ipv4_frags_init_net,
+       .exit = ipv4_frags_exit_net,
+};
 
 void __init ipfrag_init(void)
 {
-       ip4_frags.ctl = &ip4_frags_ctl;
+       ip4_frags_ctl_register();
+       register_pernet_subsys(&ip4_frags_ops);
        ip4_frags.hashfn = ip4_hashfn;
        ip4_frags.constructor = ip4_frag_init;
        ip4_frags.destructor = ip4_frag_free;
@@ -618,7 +867,6 @@ void __init ipfrag_init(void)
        ip4_frags.qsize = sizeof(struct ipq);
        ip4_frags.match = ip4_frag_match;
        ip4_frags.frag_expire = ip_expire;
+       ip4_frags.secret_interval = 10 * 60 * HZ;
        inet_frags_init(&ip4_frags);
 }
-
-EXPORT_SYMBOL(ip_defrag);