04578593e100cee88d67fc88999efe8c421f353d
[linux-2.6.git] / net / ipv4 / netfilter / nf_nat_core.c
1 /* NAT for netfilter; shared with compatibility layer. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <net/checksum.h>
16 #include <net/icmp.h>
17 #include <net/ip.h>
18 #include <net/tcp.h>  /* For tcp_prot in getorigdst */
19 #include <linux/icmp.h>
20 #include <linux/udp.h>
21 #include <linux/jhash.h>
22
23 #include <linux/netfilter_ipv4.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_l3proto.h>
32 #include <net/netfilter/nf_conntrack_l4proto.h>
33
34 static DEFINE_SPINLOCK(nf_nat_lock);
35
36 static struct nf_conntrack_l3proto *l3proto __read_mostly;
37
38 /* Calculated at init based on memory size */
39 static unsigned int nf_nat_htable_size __read_mostly;
40 static int nf_nat_vmalloced;
41
42 static struct hlist_head *bysource __read_mostly;
43
44 #define MAX_IP_NAT_PROTO 256
45 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
46                                                 __read_mostly;
47
48 static inline const struct nf_nat_protocol *
49 __nf_nat_proto_find(u_int8_t protonum)
50 {
51         return rcu_dereference(nf_nat_protos[protonum]);
52 }
53
54 const struct nf_nat_protocol *
55 nf_nat_proto_find_get(u_int8_t protonum)
56 {
57         const struct nf_nat_protocol *p;
58
59         rcu_read_lock();
60         p = __nf_nat_proto_find(protonum);
61         if (!try_module_get(p->me))
62                 p = &nf_nat_unknown_protocol;
63         rcu_read_unlock();
64
65         return p;
66 }
67 EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
68
69 void
70 nf_nat_proto_put(const struct nf_nat_protocol *p)
71 {
72         module_put(p->me);
73 }
74 EXPORT_SYMBOL_GPL(nf_nat_proto_put);
75
76 /* We keep an extra hash for each conntrack, for fast searching. */
77 static inline unsigned int
78 hash_by_src(const struct nf_conntrack_tuple *tuple)
79 {
80         unsigned int hash;
81
82         /* Original src, to ensure we map it consistently if poss. */
83         hash = jhash_3words((__force u32)tuple->src.u3.ip,
84                             (__force u32)tuple->src.u.all,
85                             tuple->dst.protonum, 0);
86         return ((u64)hash * nf_nat_htable_size) >> 32;
87 }
88
89 /* Is this tuple already taken? (not by us) */
90 int
91 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
92                   const struct nf_conn *ignored_conntrack)
93 {
94         /* Conntrack tracking doesn't keep track of outgoing tuples; only
95            incoming ones.  NAT means they don't have a fixed mapping,
96            so we invert the tuple and look for the incoming reply.
97
98            We could keep a separate hash if this proves too slow. */
99         struct nf_conntrack_tuple reply;
100
101         nf_ct_invert_tuplepr(&reply, tuple);
102         return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
103 }
104 EXPORT_SYMBOL(nf_nat_used_tuple);
105
106 /* If we source map this tuple so reply looks like reply_tuple, will
107  * that meet the constraints of range. */
108 static int
109 in_range(const struct nf_conntrack_tuple *tuple,
110          const struct nf_nat_range *range)
111 {
112         const struct nf_nat_protocol *proto;
113         int ret = 0;
114
115         /* If we are supposed to map IPs, then we must be in the
116            range specified, otherwise let this drag us onto a new src IP. */
117         if (range->flags & IP_NAT_RANGE_MAP_IPS) {
118                 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
119                     ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
120                         return 0;
121         }
122
123         rcu_read_lock();
124         proto = __nf_nat_proto_find(tuple->dst.protonum);
125         if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
126             proto->in_range(tuple, IP_NAT_MANIP_SRC,
127                             &range->min, &range->max))
128                 ret = 1;
129         rcu_read_unlock();
130
131         return ret;
132 }
133
134 static inline int
135 same_src(const struct nf_conn *ct,
136          const struct nf_conntrack_tuple *tuple)
137 {
138         const struct nf_conntrack_tuple *t;
139
140         t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
141         return (t->dst.protonum == tuple->dst.protonum &&
142                 t->src.u3.ip == tuple->src.u3.ip &&
143                 t->src.u.all == tuple->src.u.all);
144 }
145
146 /* Only called for SRC manip */
147 static int
148 find_appropriate_src(const struct nf_conntrack_tuple *tuple,
149                      struct nf_conntrack_tuple *result,
150                      const struct nf_nat_range *range)
151 {
152         unsigned int h = hash_by_src(tuple);
153         const struct nf_conn_nat *nat;
154         const struct nf_conn *ct;
155         const struct hlist_node *n;
156
157         rcu_read_lock();
158         hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) {
159                 ct = nat->ct;
160                 if (same_src(ct, tuple)) {
161                         /* Copy source part from reply tuple. */
162                         nf_ct_invert_tuplepr(result,
163                                        &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
164                         result->dst = tuple->dst;
165
166                         if (in_range(result, range)) {
167                                 rcu_read_unlock();
168                                 return 1;
169                         }
170                 }
171         }
172         rcu_read_unlock();
173         return 0;
174 }
175
176 /* For [FUTURE] fragmentation handling, we want the least-used
177    src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
178    if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
179    1-65535, we don't do pro-rata allocation based on ports; we choose
180    the ip with the lowest src-ip/dst-ip/proto usage.
181 */
182 static void
183 find_best_ips_proto(struct nf_conntrack_tuple *tuple,
184                     const struct nf_nat_range *range,
185                     const struct nf_conn *ct,
186                     enum nf_nat_manip_type maniptype)
187 {
188         __be32 *var_ipp;
189         /* Host order */
190         u_int32_t minip, maxip, j;
191
192         /* No IP mapping?  Do nothing. */
193         if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
194                 return;
195
196         if (maniptype == IP_NAT_MANIP_SRC)
197                 var_ipp = &tuple->src.u3.ip;
198         else
199                 var_ipp = &tuple->dst.u3.ip;
200
201         /* Fast path: only one choice. */
202         if (range->min_ip == range->max_ip) {
203                 *var_ipp = range->min_ip;
204                 return;
205         }
206
207         /* Hashing source and destination IPs gives a fairly even
208          * spread in practice (if there are a small number of IPs
209          * involved, there usually aren't that many connections
210          * anyway).  The consistency means that servers see the same
211          * client coming from the same IP (some Internet Banking sites
212          * like this), even across reboots. */
213         minip = ntohl(range->min_ip);
214         maxip = ntohl(range->max_ip);
215         j = jhash_2words((__force u32)tuple->src.u3.ip,
216                          (__force u32)tuple->dst.u3.ip, 0);
217         j = ((u64)j * (maxip - minip + 1)) >> 32;
218         *var_ipp = htonl(minip + j);
219 }
220
221 /* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
222  * we change the source to map into the range.  For NF_INET_PRE_ROUTING
223  * and NF_INET_LOCAL_OUT, we change the destination to map into the
224  * range.  It might not be possible to get a unique tuple, but we try.
225  * At worst (or if we race), we will end up with a final duplicate in
226  * __ip_conntrack_confirm and drop the packet. */
227 static void
228 get_unique_tuple(struct nf_conntrack_tuple *tuple,
229                  const struct nf_conntrack_tuple *orig_tuple,
230                  const struct nf_nat_range *range,
231                  struct nf_conn *ct,
232                  enum nf_nat_manip_type maniptype)
233 {
234         const struct nf_nat_protocol *proto;
235
236         /* 1) If this srcip/proto/src-proto-part is currently mapped,
237            and that same mapping gives a unique tuple within the given
238            range, use that.
239
240            This is only required for source (ie. NAT/masq) mappings.
241            So far, we don't do local source mappings, so multiple
242            manips not an issue.  */
243         if (maniptype == IP_NAT_MANIP_SRC) {
244                 if (find_appropriate_src(orig_tuple, tuple, range)) {
245                         pr_debug("get_unique_tuple: Found current src map\n");
246                         if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
247                                 if (!nf_nat_used_tuple(tuple, ct))
248                                         return;
249                 }
250         }
251
252         /* 2) Select the least-used IP/proto combination in the given
253            range. */
254         *tuple = *orig_tuple;
255         find_best_ips_proto(tuple, range, ct, maniptype);
256
257         /* 3) The per-protocol part of the manip is made to map into
258            the range to make a unique tuple. */
259
260         rcu_read_lock();
261         proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
262
263         /* Change protocol info to have some randomization */
264         if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
265                 proto->unique_tuple(tuple, range, maniptype, ct);
266                 goto out;
267         }
268
269         /* Only bother mapping if it's not already in range and unique */
270         if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
271              proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
272             !nf_nat_used_tuple(tuple, ct))
273                 goto out;
274
275         /* Last change: get protocol to try to obtain unique tuple. */
276         proto->unique_tuple(tuple, range, maniptype, ct);
277 out:
278         rcu_read_unlock();
279 }
280
281 unsigned int
282 nf_nat_setup_info(struct nf_conn *ct,
283                   const struct nf_nat_range *range,
284                   enum nf_nat_manip_type maniptype)
285 {
286         struct nf_conntrack_tuple curr_tuple, new_tuple;
287         struct nf_conn_nat *nat;
288         int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
289
290         /* nat helper or nfctnetlink also setup binding */
291         nat = nfct_nat(ct);
292         if (!nat) {
293                 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
294                 if (nat == NULL) {
295                         pr_debug("failed to add NAT extension\n");
296                         return NF_ACCEPT;
297                 }
298         }
299
300         NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
301                      maniptype == IP_NAT_MANIP_DST);
302         BUG_ON(nf_nat_initialized(ct, maniptype));
303
304         /* What we've got will look like inverse of reply. Normally
305            this is what is in the conntrack, except for prior
306            manipulations (future optimization: if num_manips == 0,
307            orig_tp =
308            conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
309         nf_ct_invert_tuplepr(&curr_tuple,
310                              &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
311
312         get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
313
314         if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
315                 struct nf_conntrack_tuple reply;
316
317                 /* Alter conntrack table so will recognize replies. */
318                 nf_ct_invert_tuplepr(&reply, &new_tuple);
319                 nf_conntrack_alter_reply(ct, &reply);
320
321                 /* Non-atomic: we own this at the moment. */
322                 if (maniptype == IP_NAT_MANIP_SRC)
323                         ct->status |= IPS_SRC_NAT;
324                 else
325                         ct->status |= IPS_DST_NAT;
326         }
327
328         /* Place in source hash if this is the first time. */
329         if (have_to_hash) {
330                 unsigned int srchash;
331
332                 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
333                 spin_lock_bh(&nf_nat_lock);
334                 /* nf_conntrack_alter_reply might re-allocate exntension aera */
335                 nat = nfct_nat(ct);
336                 nat->ct = ct;
337                 hlist_add_head_rcu(&nat->bysource, &bysource[srchash]);
338                 spin_unlock_bh(&nf_nat_lock);
339         }
340
341         /* It's done. */
342         if (maniptype == IP_NAT_MANIP_DST)
343                 set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
344         else
345                 set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
346
347         return NF_ACCEPT;
348 }
349 EXPORT_SYMBOL(nf_nat_setup_info);
350
351 /* Returns true if succeeded. */
352 static bool
353 manip_pkt(u_int16_t proto,
354           struct sk_buff *skb,
355           unsigned int iphdroff,
356           const struct nf_conntrack_tuple *target,
357           enum nf_nat_manip_type maniptype)
358 {
359         struct iphdr *iph;
360         const struct nf_nat_protocol *p;
361
362         if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
363                 return false;
364
365         iph = (void *)skb->data + iphdroff;
366
367         /* Manipulate protcol part. */
368
369         /* rcu_read_lock()ed by nf_hook_slow */
370         p = __nf_nat_proto_find(proto);
371         if (!p->manip_pkt(skb, iphdroff, target, maniptype))
372                 return false;
373
374         iph = (void *)skb->data + iphdroff;
375
376         if (maniptype == IP_NAT_MANIP_SRC) {
377                 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
378                 iph->saddr = target->src.u3.ip;
379         } else {
380                 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
381                 iph->daddr = target->dst.u3.ip;
382         }
383         return true;
384 }
385
386 /* Do packet manipulations according to nf_nat_setup_info. */
387 unsigned int nf_nat_packet(struct nf_conn *ct,
388                            enum ip_conntrack_info ctinfo,
389                            unsigned int hooknum,
390                            struct sk_buff *skb)
391 {
392         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
393         unsigned long statusbit;
394         enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
395
396         if (mtype == IP_NAT_MANIP_SRC)
397                 statusbit = IPS_SRC_NAT;
398         else
399                 statusbit = IPS_DST_NAT;
400
401         /* Invert if this is reply dir. */
402         if (dir == IP_CT_DIR_REPLY)
403                 statusbit ^= IPS_NAT_MASK;
404
405         /* Non-atomic: these bits don't change. */
406         if (ct->status & statusbit) {
407                 struct nf_conntrack_tuple target;
408
409                 /* We are aiming to look like inverse of other direction. */
410                 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
411
412                 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
413                         return NF_DROP;
414         }
415         return NF_ACCEPT;
416 }
417 EXPORT_SYMBOL_GPL(nf_nat_packet);
418
419 /* Dir is direction ICMP is coming from (opposite to packet it contains) */
420 int nf_nat_icmp_reply_translation(struct nf_conn *ct,
421                                   enum ip_conntrack_info ctinfo,
422                                   unsigned int hooknum,
423                                   struct sk_buff *skb)
424 {
425         struct {
426                 struct icmphdr icmp;
427                 struct iphdr ip;
428         } *inside;
429         const struct nf_conntrack_l4proto *l4proto;
430         struct nf_conntrack_tuple inner, target;
431         int hdrlen = ip_hdrlen(skb);
432         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
433         unsigned long statusbit;
434         enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
435
436         if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
437                 return 0;
438
439         inside = (void *)skb->data + ip_hdrlen(skb);
440
441         /* We're actually going to mangle it beyond trivial checksum
442            adjustment, so make sure the current checksum is correct. */
443         if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
444                 return 0;
445
446         /* Must be RELATED */
447         NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
448                      skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
449
450         /* Redirects on non-null nats must be dropped, else they'll
451            start talking to each other without our translation, and be
452            confused... --RR */
453         if (inside->icmp.type == ICMP_REDIRECT) {
454                 /* If NAT isn't finished, assume it and drop. */
455                 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
456                         return 0;
457
458                 if (ct->status & IPS_NAT_MASK)
459                         return 0;
460         }
461
462         pr_debug("icmp_reply_translation: translating error %p manip %u "
463                  "dir %s\n", skb, manip,
464                  dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
465
466         /* rcu_read_lock()ed by nf_hook_slow */
467         l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
468
469         if (!nf_ct_get_tuple(skb,
470                              ip_hdrlen(skb) + sizeof(struct icmphdr),
471                              (ip_hdrlen(skb) +
472                               sizeof(struct icmphdr) + inside->ip.ihl * 4),
473                              (u_int16_t)AF_INET,
474                              inside->ip.protocol,
475                              &inner, l3proto, l4proto))
476                 return 0;
477
478         /* Change inner back to look like incoming packet.  We do the
479            opposite manip on this hook to normal, because it might not
480            pass all hooks (locally-generated ICMP).  Consider incoming
481            packet: PREROUTING (DST manip), routing produces ICMP, goes
482            through POSTROUTING (which must correct the DST manip). */
483         if (!manip_pkt(inside->ip.protocol, skb,
484                        ip_hdrlen(skb) + sizeof(inside->icmp),
485                        &ct->tuplehash[!dir].tuple,
486                        !manip))
487                 return 0;
488
489         if (skb->ip_summed != CHECKSUM_PARTIAL) {
490                 /* Reloading "inside" here since manip_pkt inner. */
491                 inside = (void *)skb->data + ip_hdrlen(skb);
492                 inside->icmp.checksum = 0;
493                 inside->icmp.checksum =
494                         csum_fold(skb_checksum(skb, hdrlen,
495                                                skb->len - hdrlen, 0));
496         }
497
498         /* Change outer to look the reply to an incoming packet
499          * (proto 0 means don't invert per-proto part). */
500         if (manip == IP_NAT_MANIP_SRC)
501                 statusbit = IPS_SRC_NAT;
502         else
503                 statusbit = IPS_DST_NAT;
504
505         /* Invert if this is reply dir. */
506         if (dir == IP_CT_DIR_REPLY)
507                 statusbit ^= IPS_NAT_MASK;
508
509         if (ct->status & statusbit) {
510                 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
511                 if (!manip_pkt(0, skb, 0, &target, manip))
512                         return 0;
513         }
514
515         return 1;
516 }
517 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
518
519 /* Protocol registration. */
520 int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
521 {
522         int ret = 0;
523
524         spin_lock_bh(&nf_nat_lock);
525         if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
526                 ret = -EBUSY;
527                 goto out;
528         }
529         rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
530  out:
531         spin_unlock_bh(&nf_nat_lock);
532         return ret;
533 }
534 EXPORT_SYMBOL(nf_nat_protocol_register);
535
536 /* Noone stores the protocol anywhere; simply delete it. */
537 void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
538 {
539         spin_lock_bh(&nf_nat_lock);
540         rcu_assign_pointer(nf_nat_protos[proto->protonum],
541                            &nf_nat_unknown_protocol);
542         spin_unlock_bh(&nf_nat_lock);
543         synchronize_rcu();
544 }
545 EXPORT_SYMBOL(nf_nat_protocol_unregister);
546
547 /* Noone using conntrack by the time this called. */
548 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
549 {
550         struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
551
552         if (nat == NULL || nat->ct == NULL)
553                 return;
554
555         NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
556
557         spin_lock_bh(&nf_nat_lock);
558         hlist_del_rcu(&nat->bysource);
559         nat->ct = NULL;
560         spin_unlock_bh(&nf_nat_lock);
561 }
562
563 static void nf_nat_move_storage(void *new, void *old)
564 {
565         struct nf_conn_nat *new_nat = new;
566         struct nf_conn_nat *old_nat = old;
567         struct nf_conn *ct = old_nat->ct;
568
569         if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
570                 return;
571
572         spin_lock_bh(&nf_nat_lock);
573         hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
574         new_nat->ct = ct;
575         spin_unlock_bh(&nf_nat_lock);
576 }
577
578 static struct nf_ct_ext_type nat_extend __read_mostly = {
579         .len            = sizeof(struct nf_conn_nat),
580         .align          = __alignof__(struct nf_conn_nat),
581         .destroy        = nf_nat_cleanup_conntrack,
582         .move           = nf_nat_move_storage,
583         .id             = NF_CT_EXT_NAT,
584         .flags          = NF_CT_EXT_F_PREALLOC,
585 };
586
587 static int __init nf_nat_init(void)
588 {
589         size_t i;
590         int ret;
591
592         need_ipv4_conntrack();
593
594         ret = nf_ct_extend_register(&nat_extend);
595         if (ret < 0) {
596                 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
597                 return ret;
598         }
599
600         /* Leave them the same for the moment. */
601         nf_nat_htable_size = nf_conntrack_htable_size;
602
603         bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
604                                          &nf_nat_vmalloced);
605         if (!bysource) {
606                 ret = -ENOMEM;
607                 goto cleanup_extend;
608         }
609
610         /* Sew in builtin protocols. */
611         spin_lock_bh(&nf_nat_lock);
612         for (i = 0; i < MAX_IP_NAT_PROTO; i++)
613                 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
614         rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
615         rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
616         rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
617         spin_unlock_bh(&nf_nat_lock);
618
619         /* Initialize fake conntrack so that NAT will skip it */
620         nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
621
622         l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
623
624         BUG_ON(nf_nat_seq_adjust_hook != NULL);
625         rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
626         return 0;
627
628  cleanup_extend:
629         nf_ct_extend_unregister(&nat_extend);
630         return ret;
631 }
632
633 /* Clear NAT section of all conntracks, in case we're loaded again. */
634 static int clean_nat(struct nf_conn *i, void *data)
635 {
636         struct nf_conn_nat *nat = nfct_nat(i);
637
638         if (!nat)
639                 return 0;
640         memset(nat, 0, sizeof(*nat));
641         i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
642         return 0;
643 }
644
645 static void __exit nf_nat_cleanup(void)
646 {
647         nf_ct_iterate_cleanup(&clean_nat, NULL);
648         synchronize_rcu();
649         nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
650         nf_ct_l3proto_put(l3proto);
651         nf_ct_extend_unregister(&nat_extend);
652         rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
653         synchronize_net();
654 }
655
656 MODULE_LICENSE("GPL");
657
658 module_init(nf_nat_init);
659 module_exit(nf_nat_cleanup);