netfilter: xtables: consolidate comefrom debug cast access
[linux-2.6.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
26
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
30
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
34
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
38
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...)  printk(format , ## args)
41 #else
42 #define dprintf(format, args...)
43 #endif
44
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
47 #else
48 #define duprintf(format, args...)
49 #endif
50
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x)                                         \
53 do {                                                            \
54         if (!(x))                                               \
55                 printk("IP_NF_ASSERT: %s:%s:%u\n",              \
56                        __func__, __FILE__, __LINE__);   \
57 } while(0)
58 #else
59 #define IP_NF_ASSERT(x)
60 #endif
61
62 #if 0
63 /* All the better to debug you with... */
64 #define static
65 #define inline
66 #endif
67
68 /*
69    We keep a set of rules for each CPU, so we can avoid write-locking
70    them in the softirq when updating the counters and therefore
71    only need to read-lock in the softirq; doing a write_lock_bh() in user
72    context stops packets coming through and allows user context to read
73    the counters or update the rules.
74
75    Hence the start of any table is given by get_table() below.  */
76
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
79 static inline bool
80 ip_packet_match(const struct iphdr *ip,
81                 const char *indev,
82                 const char *outdev,
83                 const struct ipt_ip *ipinfo,
84                 int isfrag)
85 {
86         unsigned long ret;
87
88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
89
90         if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91                   IPT_INV_SRCIP)
92             || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93                      IPT_INV_DSTIP)) {
94                 dprintf("Source or dest mismatch.\n");
95
96                 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
97                         &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
98                         ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
99                 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
100                         &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
101                         ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
102                 return false;
103         }
104
105         ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
106
107         if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
108                 dprintf("VIA in mismatch (%s vs %s).%s\n",
109                         indev, ipinfo->iniface,
110                         ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
111                 return false;
112         }
113
114         ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
115
116         if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
117                 dprintf("VIA out mismatch (%s vs %s).%s\n",
118                         outdev, ipinfo->outiface,
119                         ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
120                 return false;
121         }
122
123         /* Check specific protocol */
124         if (ipinfo->proto
125             && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
126                 dprintf("Packet protocol %hi does not match %hi.%s\n",
127                         ip->protocol, ipinfo->proto,
128                         ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
129                 return false;
130         }
131
132         /* If we have a fragment rule but the packet is not a fragment
133          * then we return zero */
134         if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
135                 dprintf("Fragment rule but not fragment.%s\n",
136                         ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
137                 return false;
138         }
139
140         return true;
141 }
142
143 static bool
144 ip_checkentry(const struct ipt_ip *ip)
145 {
146         if (ip->flags & ~IPT_F_MASK) {
147                 duprintf("Unknown flag bits set: %08X\n",
148                          ip->flags & ~IPT_F_MASK);
149                 return false;
150         }
151         if (ip->invflags & ~IPT_INV_MASK) {
152                 duprintf("Unknown invflag bits set: %08X\n",
153                          ip->invflags & ~IPT_INV_MASK);
154                 return false;
155         }
156         return true;
157 }
158
159 static unsigned int
160 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
161 {
162         if (net_ratelimit())
163                 printk("ip_tables: error: `%s'\n",
164                        (const char *)par->targinfo);
165
166         return NF_DROP;
167 }
168
169 /* Performance critical - called for every packet */
170 static inline bool
171 do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
172          struct xt_match_param *par)
173 {
174         par->match     = m->u.kernel.match;
175         par->matchinfo = m->data;
176
177         /* Stop iteration if it doesn't match */
178         if (!m->u.kernel.match->match(skb, par))
179                 return true;
180         else
181                 return false;
182 }
183
184 /* Performance critical */
185 static inline struct ipt_entry *
186 get_entry(void *base, unsigned int offset)
187 {
188         return (struct ipt_entry *)(base + offset);
189 }
190
191 /* All zeroes == unconditional rule. */
192 /* Mildly perf critical (only if packet tracing is on) */
193 static inline int
194 unconditional(const struct ipt_ip *ip)
195 {
196         unsigned int i;
197
198         for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
199                 if (((__u32 *)ip)[i])
200                         return 0;
201
202         return 1;
203 #undef FWINV
204 }
205
206 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
207     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
208 static const char *const hooknames[] = {
209         [NF_INET_PRE_ROUTING]           = "PREROUTING",
210         [NF_INET_LOCAL_IN]              = "INPUT",
211         [NF_INET_FORWARD]               = "FORWARD",
212         [NF_INET_LOCAL_OUT]             = "OUTPUT",
213         [NF_INET_POST_ROUTING]          = "POSTROUTING",
214 };
215
216 enum nf_ip_trace_comments {
217         NF_IP_TRACE_COMMENT_RULE,
218         NF_IP_TRACE_COMMENT_RETURN,
219         NF_IP_TRACE_COMMENT_POLICY,
220 };
221
222 static const char *const comments[] = {
223         [NF_IP_TRACE_COMMENT_RULE]      = "rule",
224         [NF_IP_TRACE_COMMENT_RETURN]    = "return",
225         [NF_IP_TRACE_COMMENT_POLICY]    = "policy",
226 };
227
228 static struct nf_loginfo trace_loginfo = {
229         .type = NF_LOG_TYPE_LOG,
230         .u = {
231                 .log = {
232                         .level = 4,
233                         .logflags = NF_LOG_MASK,
234                 },
235         },
236 };
237
238 /* Mildly perf critical (only if packet tracing is on) */
239 static inline int
240 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
241                       const char *hookname, const char **chainname,
242                       const char **comment, unsigned int *rulenum)
243 {
244         struct ipt_standard_target *t = (void *)ipt_get_target(s);
245
246         if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
247                 /* Head of user chain: ERROR target with chainname */
248                 *chainname = t->target.data;
249                 (*rulenum) = 0;
250         } else if (s == e) {
251                 (*rulenum)++;
252
253                 if (s->target_offset == sizeof(struct ipt_entry)
254                    && strcmp(t->target.u.kernel.target->name,
255                              IPT_STANDARD_TARGET) == 0
256                    && t->verdict < 0
257                    && unconditional(&s->ip)) {
258                         /* Tail of chains: STANDARD target (return/policy) */
259                         *comment = *chainname == hookname
260                                 ? comments[NF_IP_TRACE_COMMENT_POLICY]
261                                 : comments[NF_IP_TRACE_COMMENT_RETURN];
262                 }
263                 return 1;
264         } else
265                 (*rulenum)++;
266
267         return 0;
268 }
269
270 static void trace_packet(struct sk_buff *skb,
271                          unsigned int hook,
272                          const struct net_device *in,
273                          const struct net_device *out,
274                          const char *tablename,
275                          struct xt_table_info *private,
276                          struct ipt_entry *e)
277 {
278         void *table_base;
279         const struct ipt_entry *root;
280         const char *hookname, *chainname, *comment;
281         unsigned int rulenum = 0;
282
283         table_base = private->entries[smp_processor_id()];
284         root = get_entry(table_base, private->hook_entry[hook]);
285
286         hookname = chainname = hooknames[hook];
287         comment = comments[NF_IP_TRACE_COMMENT_RULE];
288
289         IPT_ENTRY_ITERATE(root,
290                           private->size - private->hook_entry[hook],
291                           get_chainname_rulenum,
292                           e, hookname, &chainname, &comment, &rulenum);
293
294         nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
295                       "TRACE: %s:%s:%s:%u ",
296                       tablename, chainname, comment, rulenum);
297 }
298 #endif
299
300 static inline __pure
301 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
302 {
303         return (void *)entry + entry->next_offset;
304 }
305
306 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
307 unsigned int
308 ipt_do_table(struct sk_buff *skb,
309              unsigned int hook,
310              const struct net_device *in,
311              const struct net_device *out,
312              struct xt_table *table)
313 {
314 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
315
316         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
317         const struct iphdr *ip;
318         u_int16_t datalen;
319         bool hotdrop = false;
320         /* Initializing verdict to NF_DROP keeps gcc happy. */
321         unsigned int verdict = NF_DROP;
322         const char *indev, *outdev;
323         void *table_base;
324         struct ipt_entry *e, *back;
325         struct xt_table_info *private;
326         struct xt_match_param mtpar;
327         struct xt_target_param tgpar;
328
329         /* Initialization */
330         ip = ip_hdr(skb);
331         datalen = skb->len - ip->ihl * 4;
332         indev = in ? in->name : nulldevname;
333         outdev = out ? out->name : nulldevname;
334         /* We handle fragments by dealing with the first fragment as
335          * if it was a normal packet.  All other fragments are treated
336          * normally, except that they will NEVER match rules that ask
337          * things we don't know, ie. tcp syn flag or ports).  If the
338          * rule is also a fragment-specific rule, non-fragments won't
339          * match it. */
340         mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
341         mtpar.thoff   = ip_hdrlen(skb);
342         mtpar.hotdrop = &hotdrop;
343         mtpar.in      = tgpar.in  = in;
344         mtpar.out     = tgpar.out = out;
345         mtpar.family  = tgpar.family = NFPROTO_IPV4;
346         tgpar.hooknum = hook;
347
348         IP_NF_ASSERT(table->valid_hooks & (1 << hook));
349         xt_info_rdlock_bh();
350         private = table->private;
351         table_base = private->entries[smp_processor_id()];
352
353         e = get_entry(table_base, private->hook_entry[hook]);
354
355         /* For return from builtin chain */
356         back = get_entry(table_base, private->underflow[hook]);
357
358         do {
359                 struct ipt_entry_target *t;
360
361                 IP_NF_ASSERT(e);
362                 IP_NF_ASSERT(back);
363                 if (!ip_packet_match(ip, indev, outdev,
364                     &e->ip, mtpar.fragoff) ||
365                     IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
366                         e = ipt_next_entry(e);
367                         continue;
368                 }
369
370                 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
371
372                 t = ipt_get_target(e);
373                 IP_NF_ASSERT(t->u.kernel.target);
374
375 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
376     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
377                 /* The packet is traced: log it */
378                 if (unlikely(skb->nf_trace))
379                         trace_packet(skb, hook, in, out,
380                                      table->name, private, e);
381 #endif
382                 /* Standard target? */
383                 if (!t->u.kernel.target->target) {
384                         int v;
385
386                         v = ((struct ipt_standard_target *)t)->verdict;
387                         if (v < 0) {
388                                 /* Pop from stack? */
389                                 if (v != IPT_RETURN) {
390                                         verdict = (unsigned)(-v) - 1;
391                                         break;
392                                 }
393                                 e = back;
394                                 back = get_entry(table_base, back->comefrom);
395                                 continue;
396                         }
397                         if (table_base + v != ipt_next_entry(e)
398                             && !(e->ip.flags & IPT_F_GOTO)) {
399                                 /* Save old back ptr in next entry */
400                                 struct ipt_entry *next = ipt_next_entry(e);
401                                 next->comefrom = (void *)back - table_base;
402                                 /* set back pointer to next entry */
403                                 back = next;
404                         }
405
406                         e = get_entry(table_base, v);
407                         continue;
408                 }
409
410                 /* Targets which reenter must return
411                    abs. verdicts */
412                 tgpar.target   = t->u.kernel.target;
413                 tgpar.targinfo = t->data;
414
415
416 #ifdef CONFIG_NETFILTER_DEBUG
417                 tb_comefrom = 0xeeeeeeec;
418 #endif
419                 verdict = t->u.kernel.target->target(skb, &tgpar);
420 #ifdef CONFIG_NETFILTER_DEBUG
421                 if (comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
422                         printk("Target %s reentered!\n",
423                                t->u.kernel.target->name);
424                         verdict = NF_DROP;
425                 }
426                 tb_comefrom = 0x57acc001;
427 #endif
428                 /* Target might have changed stuff. */
429                 ip = ip_hdr(skb);
430                 datalen = skb->len - ip->ihl * 4;
431
432                 if (verdict == IPT_CONTINUE)
433                         e = ipt_next_entry(e);
434                 else
435                         /* Verdict */
436                         break;
437         } while (!hotdrop);
438         xt_info_rdunlock_bh();
439
440 #ifdef DEBUG_ALLOW_ALL
441         return NF_ACCEPT;
442 #else
443         if (hotdrop)
444                 return NF_DROP;
445         else return verdict;
446 #endif
447
448 #undef tb_comefrom
449 }
450
451 /* Figures out from what hook each rule can be called: returns 0 if
452    there are loops.  Puts hook bitmask in comefrom. */
453 static int
454 mark_source_chains(struct xt_table_info *newinfo,
455                    unsigned int valid_hooks, void *entry0)
456 {
457         unsigned int hook;
458
459         /* No recursion; use packet counter to save back ptrs (reset
460            to 0 as we leave), and comefrom to save source hook bitmask */
461         for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
462                 unsigned int pos = newinfo->hook_entry[hook];
463                 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
464
465                 if (!(valid_hooks & (1 << hook)))
466                         continue;
467
468                 /* Set initial back pointer. */
469                 e->counters.pcnt = pos;
470
471                 for (;;) {
472                         struct ipt_standard_target *t
473                                 = (void *)ipt_get_target(e);
474                         int visited = e->comefrom & (1 << hook);
475
476                         if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
477                                 printk("iptables: loop hook %u pos %u %08X.\n",
478                                        hook, pos, e->comefrom);
479                                 return 0;
480                         }
481                         e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
482
483                         /* Unconditional return/END. */
484                         if ((e->target_offset == sizeof(struct ipt_entry)
485                             && (strcmp(t->target.u.user.name,
486                                        IPT_STANDARD_TARGET) == 0)
487                             && t->verdict < 0
488                             && unconditional(&e->ip)) || visited) {
489                                 unsigned int oldpos, size;
490
491                                 if ((strcmp(t->target.u.user.name,
492                                             IPT_STANDARD_TARGET) == 0) &&
493                                     t->verdict < -NF_MAX_VERDICT - 1) {
494                                         duprintf("mark_source_chains: bad "
495                                                 "negative verdict (%i)\n",
496                                                                 t->verdict);
497                                         return 0;
498                                 }
499
500                                 /* Return: backtrack through the last
501                                    big jump. */
502                                 do {
503                                         e->comefrom ^= (1<<NF_INET_NUMHOOKS);
504 #ifdef DEBUG_IP_FIREWALL_USER
505                                         if (e->comefrom
506                                             & (1 << NF_INET_NUMHOOKS)) {
507                                                 duprintf("Back unset "
508                                                          "on hook %u "
509                                                          "rule %u\n",
510                                                          hook, pos);
511                                         }
512 #endif
513                                         oldpos = pos;
514                                         pos = e->counters.pcnt;
515                                         e->counters.pcnt = 0;
516
517                                         /* We're at the start. */
518                                         if (pos == oldpos)
519                                                 goto next;
520
521                                         e = (struct ipt_entry *)
522                                                 (entry0 + pos);
523                                 } while (oldpos == pos + e->next_offset);
524
525                                 /* Move along one */
526                                 size = e->next_offset;
527                                 e = (struct ipt_entry *)
528                                         (entry0 + pos + size);
529                                 e->counters.pcnt = pos;
530                                 pos += size;
531                         } else {
532                                 int newpos = t->verdict;
533
534                                 if (strcmp(t->target.u.user.name,
535                                            IPT_STANDARD_TARGET) == 0
536                                     && newpos >= 0) {
537                                         if (newpos > newinfo->size -
538                                                 sizeof(struct ipt_entry)) {
539                                                 duprintf("mark_source_chains: "
540                                                         "bad verdict (%i)\n",
541                                                                 newpos);
542                                                 return 0;
543                                         }
544                                         /* This a jump; chase it. */
545                                         duprintf("Jump rule %u -> %u\n",
546                                                  pos, newpos);
547                                 } else {
548                                         /* ... this is a fallthru */
549                                         newpos = pos + e->next_offset;
550                                 }
551                                 e = (struct ipt_entry *)
552                                         (entry0 + newpos);
553                                 e->counters.pcnt = pos;
554                                 pos = newpos;
555                         }
556                 }
557                 next:
558                 duprintf("Finished chain %u\n", hook);
559         }
560         return 1;
561 }
562
563 static int
564 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
565 {
566         struct xt_mtdtor_param par;
567
568         if (i && (*i)-- == 0)
569                 return 1;
570
571         par.match     = m->u.kernel.match;
572         par.matchinfo = m->data;
573         par.family    = NFPROTO_IPV4;
574         if (par.match->destroy != NULL)
575                 par.match->destroy(&par);
576         module_put(par.match->me);
577         return 0;
578 }
579
580 static int
581 check_entry(struct ipt_entry *e, const char *name)
582 {
583         struct ipt_entry_target *t;
584
585         if (!ip_checkentry(&e->ip)) {
586                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
587                 return -EINVAL;
588         }
589
590         if (e->target_offset + sizeof(struct ipt_entry_target) >
591             e->next_offset)
592                 return -EINVAL;
593
594         t = ipt_get_target(e);
595         if (e->target_offset + t->u.target_size > e->next_offset)
596                 return -EINVAL;
597
598         return 0;
599 }
600
601 static int
602 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
603             unsigned int *i)
604 {
605         const struct ipt_ip *ip = par->entryinfo;
606         int ret;
607
608         par->match     = m->u.kernel.match;
609         par->matchinfo = m->data;
610
611         ret = xt_check_match(par, m->u.match_size - sizeof(*m),
612               ip->proto, ip->invflags & IPT_INV_PROTO);
613         if (ret < 0) {
614                 duprintf("ip_tables: check failed for `%s'.\n",
615                          par.match->name);
616                 return ret;
617         }
618         ++*i;
619         return 0;
620 }
621
622 static int
623 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
624                  unsigned int *i)
625 {
626         struct xt_match *match;
627         int ret;
628
629         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
630                                                       m->u.user.revision),
631                                         "ipt_%s", m->u.user.name);
632         if (IS_ERR(match) || !match) {
633                 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
634                 return match ? PTR_ERR(match) : -ENOENT;
635         }
636         m->u.kernel.match = match;
637
638         ret = check_match(m, par, i);
639         if (ret)
640                 goto err;
641
642         return 0;
643 err:
644         module_put(m->u.kernel.match->me);
645         return ret;
646 }
647
648 static int check_target(struct ipt_entry *e, const char *name)
649 {
650         struct ipt_entry_target *t = ipt_get_target(e);
651         struct xt_tgchk_param par = {
652                 .table     = name,
653                 .entryinfo = e,
654                 .target    = t->u.kernel.target,
655                 .targinfo  = t->data,
656                 .hook_mask = e->comefrom,
657                 .family    = NFPROTO_IPV4,
658         };
659         int ret;
660
661         ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
662               e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
663         if (ret < 0) {
664                 duprintf("ip_tables: check failed for `%s'.\n",
665                          t->u.kernel.target->name);
666                 return ret;
667         }
668         return 0;
669 }
670
671 static int
672 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
673                  unsigned int *i)
674 {
675         struct ipt_entry_target *t;
676         struct xt_target *target;
677         int ret;
678         unsigned int j;
679         struct xt_mtchk_param mtpar;
680
681         ret = check_entry(e, name);
682         if (ret)
683                 return ret;
684
685         j = 0;
686         mtpar.table     = name;
687         mtpar.entryinfo = &e->ip;
688         mtpar.hook_mask = e->comefrom;
689         mtpar.family    = NFPROTO_IPV4;
690         ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
691         if (ret != 0)
692                 goto cleanup_matches;
693
694         t = ipt_get_target(e);
695         target = try_then_request_module(xt_find_target(AF_INET,
696                                                         t->u.user.name,
697                                                         t->u.user.revision),
698                                          "ipt_%s", t->u.user.name);
699         if (IS_ERR(target) || !target) {
700                 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701                 ret = target ? PTR_ERR(target) : -ENOENT;
702                 goto cleanup_matches;
703         }
704         t->u.kernel.target = target;
705
706         ret = check_target(e, name);
707         if (ret)
708                 goto err;
709
710         (*i)++;
711         return 0;
712  err:
713         module_put(t->u.kernel.target->me);
714  cleanup_matches:
715         IPT_MATCH_ITERATE(e, cleanup_match, &j);
716         return ret;
717 }
718
719 static int
720 check_entry_size_and_hooks(struct ipt_entry *e,
721                            struct xt_table_info *newinfo,
722                            unsigned char *base,
723                            unsigned char *limit,
724                            const unsigned int *hook_entries,
725                            const unsigned int *underflows,
726                            unsigned int *i)
727 {
728         unsigned int h;
729
730         if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731             || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732                 duprintf("Bad offset %p\n", e);
733                 return -EINVAL;
734         }
735
736         if (e->next_offset
737             < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738                 duprintf("checking: element %p size %u\n",
739                          e, e->next_offset);
740                 return -EINVAL;
741         }
742
743         /* Check hooks & underflows */
744         for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745                 if ((unsigned char *)e - base == hook_entries[h])
746                         newinfo->hook_entry[h] = hook_entries[h];
747                 if ((unsigned char *)e - base == underflows[h])
748                         newinfo->underflow[h] = underflows[h];
749         }
750
751         /* FIXME: underflows must be unconditional, standard verdicts
752            < 0 (not IPT_RETURN). --RR */
753
754         /* Clear counters and comefrom */
755         e->counters = ((struct xt_counters) { 0, 0 });
756         e->comefrom = 0;
757
758         (*i)++;
759         return 0;
760 }
761
762 static int
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
764 {
765         struct xt_tgdtor_param par;
766         struct ipt_entry_target *t;
767
768         if (i && (*i)-- == 0)
769                 return 1;
770
771         /* Cleanup all matches */
772         IPT_MATCH_ITERATE(e, cleanup_match, NULL);
773         t = ipt_get_target(e);
774
775         par.target   = t->u.kernel.target;
776         par.targinfo = t->data;
777         par.family   = NFPROTO_IPV4;
778         if (par.target->destroy != NULL)
779                 par.target->destroy(&par);
780         module_put(par.target->me);
781         return 0;
782 }
783
784 /* Checks and translates the user-supplied table segment (held in
785    newinfo) */
786 static int
787 translate_table(const char *name,
788                 unsigned int valid_hooks,
789                 struct xt_table_info *newinfo,
790                 void *entry0,
791                 unsigned int size,
792                 unsigned int number,
793                 const unsigned int *hook_entries,
794                 const unsigned int *underflows)
795 {
796         unsigned int i;
797         int ret;
798
799         newinfo->size = size;
800         newinfo->number = number;
801
802         /* Init all hooks to impossible value. */
803         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
804                 newinfo->hook_entry[i] = 0xFFFFFFFF;
805                 newinfo->underflow[i] = 0xFFFFFFFF;
806         }
807
808         duprintf("translate_table: size %u\n", newinfo->size);
809         i = 0;
810         /* Walk through entries, checking offsets. */
811         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
812                                 check_entry_size_and_hooks,
813                                 newinfo,
814                                 entry0,
815                                 entry0 + size,
816                                 hook_entries, underflows, &i);
817         if (ret != 0)
818                 return ret;
819
820         if (i != number) {
821                 duprintf("translate_table: %u not %u entries\n",
822                          i, number);
823                 return -EINVAL;
824         }
825
826         /* Check hooks all assigned */
827         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828                 /* Only hooks which are valid */
829                 if (!(valid_hooks & (1 << i)))
830                         continue;
831                 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
832                         duprintf("Invalid hook entry %u %u\n",
833                                  i, hook_entries[i]);
834                         return -EINVAL;
835                 }
836                 if (newinfo->underflow[i] == 0xFFFFFFFF) {
837                         duprintf("Invalid underflow %u %u\n",
838                                  i, underflows[i]);
839                         return -EINVAL;
840                 }
841         }
842
843         if (!mark_source_chains(newinfo, valid_hooks, entry0))
844                 return -ELOOP;
845
846         /* Finally, each sanity check must pass */
847         i = 0;
848         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
849                                 find_check_entry, name, size, &i);
850
851         if (ret != 0) {
852                 IPT_ENTRY_ITERATE(entry0, newinfo->size,
853                                 cleanup_entry, &i);
854                 return ret;
855         }
856
857         /* And one copy for every other CPU */
858         for_each_possible_cpu(i) {
859                 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
860                         memcpy(newinfo->entries[i], entry0, newinfo->size);
861         }
862
863         return ret;
864 }
865
866 /* Gets counters. */
867 static inline int
868 add_entry_to_counter(const struct ipt_entry *e,
869                      struct xt_counters total[],
870                      unsigned int *i)
871 {
872         ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
873
874         (*i)++;
875         return 0;
876 }
877
878 static inline int
879 set_entry_to_counter(const struct ipt_entry *e,
880                      struct ipt_counters total[],
881                      unsigned int *i)
882 {
883         SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
884
885         (*i)++;
886         return 0;
887 }
888
889 static void
890 get_counters(const struct xt_table_info *t,
891              struct xt_counters counters[])
892 {
893         unsigned int cpu;
894         unsigned int i;
895         unsigned int curcpu;
896
897         /* Instead of clearing (by a previous call to memset())
898          * the counters and using adds, we set the counters
899          * with data used by 'current' CPU.
900          *
901          * Bottom half has to be disabled to prevent deadlock
902          * if new softirq were to run and call ipt_do_table
903          */
904         local_bh_disable();
905         curcpu = smp_processor_id();
906
907         i = 0;
908         IPT_ENTRY_ITERATE(t->entries[curcpu],
909                           t->size,
910                           set_entry_to_counter,
911                           counters,
912                           &i);
913
914         for_each_possible_cpu(cpu) {
915                 if (cpu == curcpu)
916                         continue;
917                 i = 0;
918                 xt_info_wrlock(cpu);
919                 IPT_ENTRY_ITERATE(t->entries[cpu],
920                                   t->size,
921                                   add_entry_to_counter,
922                                   counters,
923                                   &i);
924                 xt_info_wrunlock(cpu);
925         }
926         local_bh_enable();
927 }
928
929 static struct xt_counters * alloc_counters(struct xt_table *table)
930 {
931         unsigned int countersize;
932         struct xt_counters *counters;
933         struct xt_table_info *private = table->private;
934
935         /* We need atomic snapshot of counters: rest doesn't change
936            (other than comefrom, which userspace doesn't care
937            about). */
938         countersize = sizeof(struct xt_counters) * private->number;
939         counters = vmalloc_node(countersize, numa_node_id());
940
941         if (counters == NULL)
942                 return ERR_PTR(-ENOMEM);
943
944         get_counters(private, counters);
945
946         return counters;
947 }
948
949 static int
950 copy_entries_to_user(unsigned int total_size,
951                      struct xt_table *table,
952                      void __user *userptr)
953 {
954         unsigned int off, num;
955         struct ipt_entry *e;
956         struct xt_counters *counters;
957         const struct xt_table_info *private = table->private;
958         int ret = 0;
959         const void *loc_cpu_entry;
960
961         counters = alloc_counters(table);
962         if (IS_ERR(counters))
963                 return PTR_ERR(counters);
964
965         /* choose the copy that is on our node/cpu, ...
966          * This choice is lazy (because current thread is
967          * allowed to migrate to another cpu)
968          */
969         loc_cpu_entry = private->entries[raw_smp_processor_id()];
970         if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
971                 ret = -EFAULT;
972                 goto free_counters;
973         }
974
975         /* FIXME: use iterator macros --RR */
976         /* ... then go back and fix counters and names */
977         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
978                 unsigned int i;
979                 const struct ipt_entry_match *m;
980                 const struct ipt_entry_target *t;
981
982                 e = (struct ipt_entry *)(loc_cpu_entry + off);
983                 if (copy_to_user(userptr + off
984                                  + offsetof(struct ipt_entry, counters),
985                                  &counters[num],
986                                  sizeof(counters[num])) != 0) {
987                         ret = -EFAULT;
988                         goto free_counters;
989                 }
990
991                 for (i = sizeof(struct ipt_entry);
992                      i < e->target_offset;
993                      i += m->u.match_size) {
994                         m = (void *)e + i;
995
996                         if (copy_to_user(userptr + off + i
997                                          + offsetof(struct ipt_entry_match,
998                                                     u.user.name),
999                                          m->u.kernel.match->name,
1000                                          strlen(m->u.kernel.match->name)+1)
1001                             != 0) {
1002                                 ret = -EFAULT;
1003                                 goto free_counters;
1004                         }
1005                 }
1006
1007                 t = ipt_get_target(e);
1008                 if (copy_to_user(userptr + off + e->target_offset
1009                                  + offsetof(struct ipt_entry_target,
1010                                             u.user.name),
1011                                  t->u.kernel.target->name,
1012                                  strlen(t->u.kernel.target->name)+1) != 0) {
1013                         ret = -EFAULT;
1014                         goto free_counters;
1015                 }
1016         }
1017
1018  free_counters:
1019         vfree(counters);
1020         return ret;
1021 }
1022
1023 #ifdef CONFIG_COMPAT
1024 static void compat_standard_from_user(void *dst, void *src)
1025 {
1026         int v = *(compat_int_t *)src;
1027
1028         if (v > 0)
1029                 v += xt_compat_calc_jump(AF_INET, v);
1030         memcpy(dst, &v, sizeof(v));
1031 }
1032
1033 static int compat_standard_to_user(void __user *dst, void *src)
1034 {
1035         compat_int_t cv = *(int *)src;
1036
1037         if (cv > 0)
1038                 cv -= xt_compat_calc_jump(AF_INET, cv);
1039         return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1040 }
1041
1042 static inline int
1043 compat_calc_match(struct ipt_entry_match *m, int *size)
1044 {
1045         *size += xt_compat_match_offset(m->u.kernel.match);
1046         return 0;
1047 }
1048
1049 static int compat_calc_entry(struct ipt_entry *e,
1050                              const struct xt_table_info *info,
1051                              void *base, struct xt_table_info *newinfo)
1052 {
1053         struct ipt_entry_target *t;
1054         unsigned int entry_offset;
1055         int off, i, ret;
1056
1057         off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1058         entry_offset = (void *)e - base;
1059         IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1060         t = ipt_get_target(e);
1061         off += xt_compat_target_offset(t->u.kernel.target);
1062         newinfo->size -= off;
1063         ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1064         if (ret)
1065                 return ret;
1066
1067         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1068                 if (info->hook_entry[i] &&
1069                     (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1070                         newinfo->hook_entry[i] -= off;
1071                 if (info->underflow[i] &&
1072                     (e < (struct ipt_entry *)(base + info->underflow[i])))
1073                         newinfo->underflow[i] -= off;
1074         }
1075         return 0;
1076 }
1077
1078 static int compat_table_info(const struct xt_table_info *info,
1079                              struct xt_table_info *newinfo)
1080 {
1081         void *loc_cpu_entry;
1082
1083         if (!newinfo || !info)
1084                 return -EINVAL;
1085
1086         /* we dont care about newinfo->entries[] */
1087         memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1088         newinfo->initial_entries = 0;
1089         loc_cpu_entry = info->entries[raw_smp_processor_id()];
1090         return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1091                                  compat_calc_entry, info, loc_cpu_entry,
1092                                  newinfo);
1093 }
1094 #endif
1095
1096 static int get_info(struct net *net, void __user *user, int *len, int compat)
1097 {
1098         char name[IPT_TABLE_MAXNAMELEN];
1099         struct xt_table *t;
1100         int ret;
1101
1102         if (*len != sizeof(struct ipt_getinfo)) {
1103                 duprintf("length %u != %zu\n", *len,
1104                          sizeof(struct ipt_getinfo));
1105                 return -EINVAL;
1106         }
1107
1108         if (copy_from_user(name, user, sizeof(name)) != 0)
1109                 return -EFAULT;
1110
1111         name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1112 #ifdef CONFIG_COMPAT
1113         if (compat)
1114                 xt_compat_lock(AF_INET);
1115 #endif
1116         t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1117                                     "iptable_%s", name);
1118         if (t && !IS_ERR(t)) {
1119                 struct ipt_getinfo info;
1120                 const struct xt_table_info *private = t->private;
1121
1122 #ifdef CONFIG_COMPAT
1123                 if (compat) {
1124                         struct xt_table_info tmp;
1125                         ret = compat_table_info(private, &tmp);
1126                         xt_compat_flush_offsets(AF_INET);
1127                         private = &tmp;
1128                 }
1129 #endif
1130                 info.valid_hooks = t->valid_hooks;
1131                 memcpy(info.hook_entry, private->hook_entry,
1132                        sizeof(info.hook_entry));
1133                 memcpy(info.underflow, private->underflow,
1134                        sizeof(info.underflow));
1135                 info.num_entries = private->number;
1136                 info.size = private->size;
1137                 strcpy(info.name, name);
1138
1139                 if (copy_to_user(user, &info, *len) != 0)
1140                         ret = -EFAULT;
1141                 else
1142                         ret = 0;
1143
1144                 xt_table_unlock(t);
1145                 module_put(t->me);
1146         } else
1147                 ret = t ? PTR_ERR(t) : -ENOENT;
1148 #ifdef CONFIG_COMPAT
1149         if (compat)
1150                 xt_compat_unlock(AF_INET);
1151 #endif
1152         return ret;
1153 }
1154
1155 static int
1156 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1157 {
1158         int ret;
1159         struct ipt_get_entries get;
1160         struct xt_table *t;
1161
1162         if (*len < sizeof(get)) {
1163                 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1164                 return -EINVAL;
1165         }
1166         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1167                 return -EFAULT;
1168         if (*len != sizeof(struct ipt_get_entries) + get.size) {
1169                 duprintf("get_entries: %u != %zu\n",
1170                          *len, sizeof(get) + get.size);
1171                 return -EINVAL;
1172         }
1173
1174         t = xt_find_table_lock(net, AF_INET, get.name);
1175         if (t && !IS_ERR(t)) {
1176                 const struct xt_table_info *private = t->private;
1177                 duprintf("t->private->number = %u\n", private->number);
1178                 if (get.size == private->size)
1179                         ret = copy_entries_to_user(private->size,
1180                                                    t, uptr->entrytable);
1181                 else {
1182                         duprintf("get_entries: I've got %u not %u!\n",
1183                                  private->size, get.size);
1184                         ret = -EAGAIN;
1185                 }
1186                 module_put(t->me);
1187                 xt_table_unlock(t);
1188         } else
1189                 ret = t ? PTR_ERR(t) : -ENOENT;
1190
1191         return ret;
1192 }
1193
1194 static int
1195 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1196              struct xt_table_info *newinfo, unsigned int num_counters,
1197              void __user *counters_ptr)
1198 {
1199         int ret;
1200         struct xt_table *t;
1201         struct xt_table_info *oldinfo;
1202         struct xt_counters *counters;
1203         void *loc_cpu_old_entry;
1204
1205         ret = 0;
1206         counters = vmalloc(num_counters * sizeof(struct xt_counters));
1207         if (!counters) {
1208                 ret = -ENOMEM;
1209                 goto out;
1210         }
1211
1212         t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1213                                     "iptable_%s", name);
1214         if (!t || IS_ERR(t)) {
1215                 ret = t ? PTR_ERR(t) : -ENOENT;
1216                 goto free_newinfo_counters_untrans;
1217         }
1218
1219         /* You lied! */
1220         if (valid_hooks != t->valid_hooks) {
1221                 duprintf("Valid hook crap: %08X vs %08X\n",
1222                          valid_hooks, t->valid_hooks);
1223                 ret = -EINVAL;
1224                 goto put_module;
1225         }
1226
1227         oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1228         if (!oldinfo)
1229                 goto put_module;
1230
1231         /* Update module usage count based on number of rules */
1232         duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1233                 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1234         if ((oldinfo->number > oldinfo->initial_entries) ||
1235             (newinfo->number <= oldinfo->initial_entries))
1236                 module_put(t->me);
1237         if ((oldinfo->number > oldinfo->initial_entries) &&
1238             (newinfo->number <= oldinfo->initial_entries))
1239                 module_put(t->me);
1240
1241         /* Get the old counters, and synchronize with replace */
1242         get_counters(oldinfo, counters);
1243
1244         /* Decrease module usage counts and free resource */
1245         loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1246         IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1247                           NULL);
1248         xt_free_table_info(oldinfo);
1249         if (copy_to_user(counters_ptr, counters,
1250                          sizeof(struct xt_counters) * num_counters) != 0)
1251                 ret = -EFAULT;
1252         vfree(counters);
1253         xt_table_unlock(t);
1254         return ret;
1255
1256  put_module:
1257         module_put(t->me);
1258         xt_table_unlock(t);
1259  free_newinfo_counters_untrans:
1260         vfree(counters);
1261  out:
1262         return ret;
1263 }
1264
1265 static int
1266 do_replace(struct net *net, void __user *user, unsigned int len)
1267 {
1268         int ret;
1269         struct ipt_replace tmp;
1270         struct xt_table_info *newinfo;
1271         void *loc_cpu_entry;
1272
1273         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1274                 return -EFAULT;
1275
1276         /* overflow check */
1277         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1278                 return -ENOMEM;
1279
1280         newinfo = xt_alloc_table_info(tmp.size);
1281         if (!newinfo)
1282                 return -ENOMEM;
1283
1284         /* choose the copy that is on our node/cpu */
1285         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1286         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1287                            tmp.size) != 0) {
1288                 ret = -EFAULT;
1289                 goto free_newinfo;
1290         }
1291
1292         ret = translate_table(tmp.name, tmp.valid_hooks,
1293                               newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1294                               tmp.hook_entry, tmp.underflow);
1295         if (ret != 0)
1296                 goto free_newinfo;
1297
1298         duprintf("ip_tables: Translated table\n");
1299
1300         ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1301                            tmp.num_counters, tmp.counters);
1302         if (ret)
1303                 goto free_newinfo_untrans;
1304         return 0;
1305
1306  free_newinfo_untrans:
1307         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1308  free_newinfo:
1309         xt_free_table_info(newinfo);
1310         return ret;
1311 }
1312
1313 /* We're lazy, and add to the first CPU; overflow works its fey magic
1314  * and everything is OK. */
1315 static int
1316 add_counter_to_entry(struct ipt_entry *e,
1317                      const struct xt_counters addme[],
1318                      unsigned int *i)
1319 {
1320         ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1321
1322         (*i)++;
1323         return 0;
1324 }
1325
1326 static int
1327 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1328 {
1329         unsigned int i, curcpu;
1330         struct xt_counters_info tmp;
1331         struct xt_counters *paddc;
1332         unsigned int num_counters;
1333         const char *name;
1334         int size;
1335         void *ptmp;
1336         struct xt_table *t;
1337         const struct xt_table_info *private;
1338         int ret = 0;
1339         void *loc_cpu_entry;
1340 #ifdef CONFIG_COMPAT
1341         struct compat_xt_counters_info compat_tmp;
1342
1343         if (compat) {
1344                 ptmp = &compat_tmp;
1345                 size = sizeof(struct compat_xt_counters_info);
1346         } else
1347 #endif
1348         {
1349                 ptmp = &tmp;
1350                 size = sizeof(struct xt_counters_info);
1351         }
1352
1353         if (copy_from_user(ptmp, user, size) != 0)
1354                 return -EFAULT;
1355
1356 #ifdef CONFIG_COMPAT
1357         if (compat) {
1358                 num_counters = compat_tmp.num_counters;
1359                 name = compat_tmp.name;
1360         } else
1361 #endif
1362         {
1363                 num_counters = tmp.num_counters;
1364                 name = tmp.name;
1365         }
1366
1367         if (len != size + num_counters * sizeof(struct xt_counters))
1368                 return -EINVAL;
1369
1370         paddc = vmalloc_node(len - size, numa_node_id());
1371         if (!paddc)
1372                 return -ENOMEM;
1373
1374         if (copy_from_user(paddc, user + size, len - size) != 0) {
1375                 ret = -EFAULT;
1376                 goto free;
1377         }
1378
1379         t = xt_find_table_lock(net, AF_INET, name);
1380         if (!t || IS_ERR(t)) {
1381                 ret = t ? PTR_ERR(t) : -ENOENT;
1382                 goto free;
1383         }
1384
1385         local_bh_disable();
1386         private = t->private;
1387         if (private->number != num_counters) {
1388                 ret = -EINVAL;
1389                 goto unlock_up_free;
1390         }
1391
1392         i = 0;
1393         /* Choose the copy that is on our node */
1394         curcpu = smp_processor_id();
1395         loc_cpu_entry = private->entries[curcpu];
1396         xt_info_wrlock(curcpu);
1397         IPT_ENTRY_ITERATE(loc_cpu_entry,
1398                           private->size,
1399                           add_counter_to_entry,
1400                           paddc,
1401                           &i);
1402         xt_info_wrunlock(curcpu);
1403  unlock_up_free:
1404         local_bh_enable();
1405         xt_table_unlock(t);
1406         module_put(t->me);
1407  free:
1408         vfree(paddc);
1409
1410         return ret;
1411 }
1412
1413 #ifdef CONFIG_COMPAT
1414 struct compat_ipt_replace {
1415         char                    name[IPT_TABLE_MAXNAMELEN];
1416         u32                     valid_hooks;
1417         u32                     num_entries;
1418         u32                     size;
1419         u32                     hook_entry[NF_INET_NUMHOOKS];
1420         u32                     underflow[NF_INET_NUMHOOKS];
1421         u32                     num_counters;
1422         compat_uptr_t           counters;       /* struct ipt_counters * */
1423         struct compat_ipt_entry entries[0];
1424 };
1425
1426 static int
1427 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1428                           unsigned int *size, struct xt_counters *counters,
1429                           unsigned int *i)
1430 {
1431         struct ipt_entry_target *t;
1432         struct compat_ipt_entry __user *ce;
1433         u_int16_t target_offset, next_offset;
1434         compat_uint_t origsize;
1435         int ret;
1436
1437         ret = -EFAULT;
1438         origsize = *size;
1439         ce = (struct compat_ipt_entry __user *)*dstptr;
1440         if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1441                 goto out;
1442
1443         if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1444                 goto out;
1445
1446         *dstptr += sizeof(struct compat_ipt_entry);
1447         *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1448
1449         ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1450         target_offset = e->target_offset - (origsize - *size);
1451         if (ret)
1452                 goto out;
1453         t = ipt_get_target(e);
1454         ret = xt_compat_target_to_user(t, dstptr, size);
1455         if (ret)
1456                 goto out;
1457         ret = -EFAULT;
1458         next_offset = e->next_offset - (origsize - *size);
1459         if (put_user(target_offset, &ce->target_offset))
1460                 goto out;
1461         if (put_user(next_offset, &ce->next_offset))
1462                 goto out;
1463
1464         (*i)++;
1465         return 0;
1466 out:
1467         return ret;
1468 }
1469
1470 static int
1471 compat_find_calc_match(struct ipt_entry_match *m,
1472                        const char *name,
1473                        const struct ipt_ip *ip,
1474                        unsigned int hookmask,
1475                        int *size, unsigned int *i)
1476 {
1477         struct xt_match *match;
1478
1479         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1480                                                       m->u.user.revision),
1481                                         "ipt_%s", m->u.user.name);
1482         if (IS_ERR(match) || !match) {
1483                 duprintf("compat_check_calc_match: `%s' not found\n",
1484                          m->u.user.name);
1485                 return match ? PTR_ERR(match) : -ENOENT;
1486         }
1487         m->u.kernel.match = match;
1488         *size += xt_compat_match_offset(match);
1489
1490         (*i)++;
1491         return 0;
1492 }
1493
1494 static int
1495 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1496 {
1497         if (i && (*i)-- == 0)
1498                 return 1;
1499
1500         module_put(m->u.kernel.match->me);
1501         return 0;
1502 }
1503
1504 static int
1505 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1506 {
1507         struct ipt_entry_target *t;
1508
1509         if (i && (*i)-- == 0)
1510                 return 1;
1511
1512         /* Cleanup all matches */
1513         COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1514         t = compat_ipt_get_target(e);
1515         module_put(t->u.kernel.target->me);
1516         return 0;
1517 }
1518
1519 static int
1520 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1521                                   struct xt_table_info *newinfo,
1522                                   unsigned int *size,
1523                                   unsigned char *base,
1524                                   unsigned char *limit,
1525                                   unsigned int *hook_entries,
1526                                   unsigned int *underflows,
1527                                   unsigned int *i,
1528                                   const char *name)
1529 {
1530         struct ipt_entry_target *t;
1531         struct xt_target *target;
1532         unsigned int entry_offset;
1533         unsigned int j;
1534         int ret, off, h;
1535
1536         duprintf("check_compat_entry_size_and_hooks %p\n", e);
1537         if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1538             || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1539                 duprintf("Bad offset %p, limit = %p\n", e, limit);
1540                 return -EINVAL;
1541         }
1542
1543         if (e->next_offset < sizeof(struct compat_ipt_entry) +
1544                              sizeof(struct compat_xt_entry_target)) {
1545                 duprintf("checking: element %p size %u\n",
1546                          e, e->next_offset);
1547                 return -EINVAL;
1548         }
1549
1550         /* For purposes of check_entry casting the compat entry is fine */
1551         ret = check_entry((struct ipt_entry *)e, name);
1552         if (ret)
1553                 return ret;
1554
1555         off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1556         entry_offset = (void *)e - (void *)base;
1557         j = 0;
1558         ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1559                                        &e->ip, e->comefrom, &off, &j);
1560         if (ret != 0)
1561                 goto release_matches;
1562
1563         t = compat_ipt_get_target(e);
1564         target = try_then_request_module(xt_find_target(AF_INET,
1565                                                         t->u.user.name,
1566                                                         t->u.user.revision),
1567                                          "ipt_%s", t->u.user.name);
1568         if (IS_ERR(target) || !target) {
1569                 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1570                          t->u.user.name);
1571                 ret = target ? PTR_ERR(target) : -ENOENT;
1572                 goto release_matches;
1573         }
1574         t->u.kernel.target = target;
1575
1576         off += xt_compat_target_offset(target);
1577         *size += off;
1578         ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1579         if (ret)
1580                 goto out;
1581
1582         /* Check hooks & underflows */
1583         for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1584                 if ((unsigned char *)e - base == hook_entries[h])
1585                         newinfo->hook_entry[h] = hook_entries[h];
1586                 if ((unsigned char *)e - base == underflows[h])
1587                         newinfo->underflow[h] = underflows[h];
1588         }
1589
1590         /* Clear counters and comefrom */
1591         memset(&e->counters, 0, sizeof(e->counters));
1592         e->comefrom = 0;
1593
1594         (*i)++;
1595         return 0;
1596
1597 out:
1598         module_put(t->u.kernel.target->me);
1599 release_matches:
1600         IPT_MATCH_ITERATE(e, compat_release_match, &j);
1601         return ret;
1602 }
1603
1604 static int
1605 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1606                             unsigned int *size, const char *name,
1607                             struct xt_table_info *newinfo, unsigned char *base)
1608 {
1609         struct ipt_entry_target *t;
1610         struct xt_target *target;
1611         struct ipt_entry *de;
1612         unsigned int origsize;
1613         int ret, h;
1614
1615         ret = 0;
1616         origsize = *size;
1617         de = (struct ipt_entry *)*dstptr;
1618         memcpy(de, e, sizeof(struct ipt_entry));
1619         memcpy(&de->counters, &e->counters, sizeof(e->counters));
1620
1621         *dstptr += sizeof(struct ipt_entry);
1622         *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1623
1624         ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1625                                        dstptr, size);
1626         if (ret)
1627                 return ret;
1628         de->target_offset = e->target_offset - (origsize - *size);
1629         t = compat_ipt_get_target(e);
1630         target = t->u.kernel.target;
1631         xt_compat_target_from_user(t, dstptr, size);
1632
1633         de->next_offset = e->next_offset - (origsize - *size);
1634         for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1635                 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1636                         newinfo->hook_entry[h] -= origsize - *size;
1637                 if ((unsigned char *)de - base < newinfo->underflow[h])
1638                         newinfo->underflow[h] -= origsize - *size;
1639         }
1640         return ret;
1641 }
1642
1643 static int
1644 compat_check_entry(struct ipt_entry *e, const char *name,
1645                                      unsigned int *i)
1646 {
1647         struct xt_mtchk_param mtpar;
1648         unsigned int j;
1649         int ret;
1650
1651         j = 0;
1652         mtpar.table     = name;
1653         mtpar.entryinfo = &e->ip;
1654         mtpar.hook_mask = e->comefrom;
1655         mtpar.family    = NFPROTO_IPV4;
1656         ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
1657         if (ret)
1658                 goto cleanup_matches;
1659
1660         ret = check_target(e, name);
1661         if (ret)
1662                 goto cleanup_matches;
1663
1664         (*i)++;
1665         return 0;
1666
1667  cleanup_matches:
1668         IPT_MATCH_ITERATE(e, cleanup_match, &j);
1669         return ret;
1670 }
1671
1672 static int
1673 translate_compat_table(const char *name,
1674                        unsigned int valid_hooks,
1675                        struct xt_table_info **pinfo,
1676                        void **pentry0,
1677                        unsigned int total_size,
1678                        unsigned int number,
1679                        unsigned int *hook_entries,
1680                        unsigned int *underflows)
1681 {
1682         unsigned int i, j;
1683         struct xt_table_info *newinfo, *info;
1684         void *pos, *entry0, *entry1;
1685         unsigned int size;
1686         int ret;
1687
1688         info = *pinfo;
1689         entry0 = *pentry0;
1690         size = total_size;
1691         info->number = number;
1692
1693         /* Init all hooks to impossible value. */
1694         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1695                 info->hook_entry[i] = 0xFFFFFFFF;
1696                 info->underflow[i] = 0xFFFFFFFF;
1697         }
1698
1699         duprintf("translate_compat_table: size %u\n", info->size);
1700         j = 0;
1701         xt_compat_lock(AF_INET);
1702         /* Walk through entries, checking offsets. */
1703         ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1704                                        check_compat_entry_size_and_hooks,
1705                                        info, &size, entry0,
1706                                        entry0 + total_size,
1707                                        hook_entries, underflows, &j, name);
1708         if (ret != 0)
1709                 goto out_unlock;
1710
1711         ret = -EINVAL;
1712         if (j != number) {
1713                 duprintf("translate_compat_table: %u not %u entries\n",
1714                          j, number);
1715                 goto out_unlock;
1716         }
1717
1718         /* Check hooks all assigned */
1719         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1720                 /* Only hooks which are valid */
1721                 if (!(valid_hooks & (1 << i)))
1722                         continue;
1723                 if (info->hook_entry[i] == 0xFFFFFFFF) {
1724                         duprintf("Invalid hook entry %u %u\n",
1725                                  i, hook_entries[i]);
1726                         goto out_unlock;
1727                 }
1728                 if (info->underflow[i] == 0xFFFFFFFF) {
1729                         duprintf("Invalid underflow %u %u\n",
1730                                  i, underflows[i]);
1731                         goto out_unlock;
1732                 }
1733         }
1734
1735         ret = -ENOMEM;
1736         newinfo = xt_alloc_table_info(size);
1737         if (!newinfo)
1738                 goto out_unlock;
1739
1740         newinfo->number = number;
1741         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1742                 newinfo->hook_entry[i] = info->hook_entry[i];
1743                 newinfo->underflow[i] = info->underflow[i];
1744         }
1745         entry1 = newinfo->entries[raw_smp_processor_id()];
1746         pos = entry1;
1747         size = total_size;
1748         ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1749                                        compat_copy_entry_from_user,
1750                                        &pos, &size, name, newinfo, entry1);
1751         xt_compat_flush_offsets(AF_INET);
1752         xt_compat_unlock(AF_INET);
1753         if (ret)
1754                 goto free_newinfo;
1755
1756         ret = -ELOOP;
1757         if (!mark_source_chains(newinfo, valid_hooks, entry1))
1758                 goto free_newinfo;
1759
1760         i = 0;
1761         ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1762                                 name, &i);
1763         if (ret) {
1764                 j -= i;
1765                 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1766                                                   compat_release_entry, &j);
1767                 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1768                 xt_free_table_info(newinfo);
1769                 return ret;
1770         }
1771
1772         /* And one copy for every other CPU */
1773         for_each_possible_cpu(i)
1774                 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1775                         memcpy(newinfo->entries[i], entry1, newinfo->size);
1776
1777         *pinfo = newinfo;
1778         *pentry0 = entry1;
1779         xt_free_table_info(info);
1780         return 0;
1781
1782 free_newinfo:
1783         xt_free_table_info(newinfo);
1784 out:
1785         COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1786         return ret;
1787 out_unlock:
1788         xt_compat_flush_offsets(AF_INET);
1789         xt_compat_unlock(AF_INET);
1790         goto out;
1791 }
1792
1793 static int
1794 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1795 {
1796         int ret;
1797         struct compat_ipt_replace tmp;
1798         struct xt_table_info *newinfo;
1799         void *loc_cpu_entry;
1800
1801         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1802                 return -EFAULT;
1803
1804         /* overflow check */
1805         if (tmp.size >= INT_MAX / num_possible_cpus())
1806                 return -ENOMEM;
1807         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1808                 return -ENOMEM;
1809
1810         newinfo = xt_alloc_table_info(tmp.size);
1811         if (!newinfo)
1812                 return -ENOMEM;
1813
1814         /* choose the copy that is on our node/cpu */
1815         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1816         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1817                            tmp.size) != 0) {
1818                 ret = -EFAULT;
1819                 goto free_newinfo;
1820         }
1821
1822         ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1823                                      &newinfo, &loc_cpu_entry, tmp.size,
1824                                      tmp.num_entries, tmp.hook_entry,
1825                                      tmp.underflow);
1826         if (ret != 0)
1827                 goto free_newinfo;
1828
1829         duprintf("compat_do_replace: Translated table\n");
1830
1831         ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1832                            tmp.num_counters, compat_ptr(tmp.counters));
1833         if (ret)
1834                 goto free_newinfo_untrans;
1835         return 0;
1836
1837  free_newinfo_untrans:
1838         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1839  free_newinfo:
1840         xt_free_table_info(newinfo);
1841         return ret;
1842 }
1843
1844 static int
1845 compat_do_ipt_set_ctl(struct sock *sk,  int cmd, void __user *user,
1846                       unsigned int len)
1847 {
1848         int ret;
1849
1850         if (!capable(CAP_NET_ADMIN))
1851                 return -EPERM;
1852
1853         switch (cmd) {
1854         case IPT_SO_SET_REPLACE:
1855                 ret = compat_do_replace(sock_net(sk), user, len);
1856                 break;
1857
1858         case IPT_SO_SET_ADD_COUNTERS:
1859                 ret = do_add_counters(sock_net(sk), user, len, 1);
1860                 break;
1861
1862         default:
1863                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1864                 ret = -EINVAL;
1865         }
1866
1867         return ret;
1868 }
1869
1870 struct compat_ipt_get_entries {
1871         char name[IPT_TABLE_MAXNAMELEN];
1872         compat_uint_t size;
1873         struct compat_ipt_entry entrytable[0];
1874 };
1875
1876 static int
1877 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1878                             void __user *userptr)
1879 {
1880         struct xt_counters *counters;
1881         const struct xt_table_info *private = table->private;
1882         void __user *pos;
1883         unsigned int size;
1884         int ret = 0;
1885         const void *loc_cpu_entry;
1886         unsigned int i = 0;
1887
1888         counters = alloc_counters(table);
1889         if (IS_ERR(counters))
1890                 return PTR_ERR(counters);
1891
1892         /* choose the copy that is on our node/cpu, ...
1893          * This choice is lazy (because current thread is
1894          * allowed to migrate to another cpu)
1895          */
1896         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1897         pos = userptr;
1898         size = total_size;
1899         ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1900                                 compat_copy_entry_to_user,
1901                                 &pos, &size, counters, &i);
1902
1903         vfree(counters);
1904         return ret;
1905 }
1906
1907 static int
1908 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1909                    int *len)
1910 {
1911         int ret;
1912         struct compat_ipt_get_entries get;
1913         struct xt_table *t;
1914
1915         if (*len < sizeof(get)) {
1916                 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1917                 return -EINVAL;
1918         }
1919
1920         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1921                 return -EFAULT;
1922
1923         if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1924                 duprintf("compat_get_entries: %u != %zu\n",
1925                          *len, sizeof(get) + get.size);
1926                 return -EINVAL;
1927         }
1928
1929         xt_compat_lock(AF_INET);
1930         t = xt_find_table_lock(net, AF_INET, get.name);
1931         if (t && !IS_ERR(t)) {
1932                 const struct xt_table_info *private = t->private;
1933                 struct xt_table_info info;
1934                 duprintf("t->private->number = %u\n", private->number);
1935                 ret = compat_table_info(private, &info);
1936                 if (!ret && get.size == info.size) {
1937                         ret = compat_copy_entries_to_user(private->size,
1938                                                           t, uptr->entrytable);
1939                 } else if (!ret) {
1940                         duprintf("compat_get_entries: I've got %u not %u!\n",
1941                                  private->size, get.size);
1942                         ret = -EAGAIN;
1943                 }
1944                 xt_compat_flush_offsets(AF_INET);
1945                 module_put(t->me);
1946                 xt_table_unlock(t);
1947         } else
1948                 ret = t ? PTR_ERR(t) : -ENOENT;
1949
1950         xt_compat_unlock(AF_INET);
1951         return ret;
1952 }
1953
1954 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1955
1956 static int
1957 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1958 {
1959         int ret;
1960
1961         if (!capable(CAP_NET_ADMIN))
1962                 return -EPERM;
1963
1964         switch (cmd) {
1965         case IPT_SO_GET_INFO:
1966                 ret = get_info(sock_net(sk), user, len, 1);
1967                 break;
1968         case IPT_SO_GET_ENTRIES:
1969                 ret = compat_get_entries(sock_net(sk), user, len);
1970                 break;
1971         default:
1972                 ret = do_ipt_get_ctl(sk, cmd, user, len);
1973         }
1974         return ret;
1975 }
1976 #endif
1977
1978 static int
1979 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1980 {
1981         int ret;
1982
1983         if (!capable(CAP_NET_ADMIN))
1984                 return -EPERM;
1985
1986         switch (cmd) {
1987         case IPT_SO_SET_REPLACE:
1988                 ret = do_replace(sock_net(sk), user, len);
1989                 break;
1990
1991         case IPT_SO_SET_ADD_COUNTERS:
1992                 ret = do_add_counters(sock_net(sk), user, len, 0);
1993                 break;
1994
1995         default:
1996                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1997                 ret = -EINVAL;
1998         }
1999
2000         return ret;
2001 }
2002
2003 static int
2004 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2005 {
2006         int ret;
2007
2008         if (!capable(CAP_NET_ADMIN))
2009                 return -EPERM;
2010
2011         switch (cmd) {
2012         case IPT_SO_GET_INFO:
2013                 ret = get_info(sock_net(sk), user, len, 0);
2014                 break;
2015
2016         case IPT_SO_GET_ENTRIES:
2017                 ret = get_entries(sock_net(sk), user, len);
2018                 break;
2019
2020         case IPT_SO_GET_REVISION_MATCH:
2021         case IPT_SO_GET_REVISION_TARGET: {
2022                 struct ipt_get_revision rev;
2023                 int target;
2024
2025                 if (*len != sizeof(rev)) {
2026                         ret = -EINVAL;
2027                         break;
2028                 }
2029                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2030                         ret = -EFAULT;
2031                         break;
2032                 }
2033
2034                 if (cmd == IPT_SO_GET_REVISION_TARGET)
2035                         target = 1;
2036                 else
2037                         target = 0;
2038
2039                 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2040                                                          rev.revision,
2041                                                          target, &ret),
2042                                         "ipt_%s", rev.name);
2043                 break;
2044         }
2045
2046         default:
2047                 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2048                 ret = -EINVAL;
2049         }
2050
2051         return ret;
2052 }
2053
2054 struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
2055                                     const struct ipt_replace *repl)
2056 {
2057         int ret;
2058         struct xt_table_info *newinfo;
2059         struct xt_table_info bootstrap
2060                 = { 0, 0, 0, { 0 }, { 0 }, { } };
2061         void *loc_cpu_entry;
2062         struct xt_table *new_table;
2063
2064         newinfo = xt_alloc_table_info(repl->size);
2065         if (!newinfo) {
2066                 ret = -ENOMEM;
2067                 goto out;
2068         }
2069
2070         /* choose the copy on our node/cpu, but dont care about preemption */
2071         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2072         memcpy(loc_cpu_entry, repl->entries, repl->size);
2073
2074         ret = translate_table(table->name, table->valid_hooks,
2075                               newinfo, loc_cpu_entry, repl->size,
2076                               repl->num_entries,
2077                               repl->hook_entry,
2078                               repl->underflow);
2079         if (ret != 0)
2080                 goto out_free;
2081
2082         new_table = xt_register_table(net, table, &bootstrap, newinfo);
2083         if (IS_ERR(new_table)) {
2084                 ret = PTR_ERR(new_table);
2085                 goto out_free;
2086         }
2087
2088         return new_table;
2089
2090 out_free:
2091         xt_free_table_info(newinfo);
2092 out:
2093         return ERR_PTR(ret);
2094 }
2095
2096 void ipt_unregister_table(struct xt_table *table)
2097 {
2098         struct xt_table_info *private;
2099         void *loc_cpu_entry;
2100         struct module *table_owner = table->me;
2101
2102         private = xt_unregister_table(table);
2103
2104         /* Decrease module usage counts and free resources */
2105         loc_cpu_entry = private->entries[raw_smp_processor_id()];
2106         IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2107         if (private->number > private->initial_entries)
2108                 module_put(table_owner);
2109         xt_free_table_info(private);
2110 }
2111
2112 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2113 static inline bool
2114 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2115                      u_int8_t type, u_int8_t code,
2116                      bool invert)
2117 {
2118         return ((test_type == 0xFF) ||
2119                 (type == test_type && code >= min_code && code <= max_code))
2120                 ^ invert;
2121 }
2122
2123 static bool
2124 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2125 {
2126         const struct icmphdr *ic;
2127         struct icmphdr _icmph;
2128         const struct ipt_icmp *icmpinfo = par->matchinfo;
2129
2130         /* Must not be a fragment. */
2131         if (par->fragoff != 0)
2132                 return false;
2133
2134         ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2135         if (ic == NULL) {
2136                 /* We've been asked to examine this packet, and we
2137                  * can't.  Hence, no choice but to drop.
2138                  */
2139                 duprintf("Dropping evil ICMP tinygram.\n");
2140                 *par->hotdrop = true;
2141                 return false;
2142         }
2143
2144         return icmp_type_code_match(icmpinfo->type,
2145                                     icmpinfo->code[0],
2146                                     icmpinfo->code[1],
2147                                     ic->type, ic->code,
2148                                     !!(icmpinfo->invflags&IPT_ICMP_INV));
2149 }
2150
2151 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2152 {
2153         const struct ipt_icmp *icmpinfo = par->matchinfo;
2154
2155         /* Must specify no unknown invflags */
2156         return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2157 }
2158
2159 /* The built-in targets: standard (NULL) and error. */
2160 static struct xt_target ipt_standard_target __read_mostly = {
2161         .name           = IPT_STANDARD_TARGET,
2162         .targetsize     = sizeof(int),
2163         .family         = NFPROTO_IPV4,
2164 #ifdef CONFIG_COMPAT
2165         .compatsize     = sizeof(compat_int_t),
2166         .compat_from_user = compat_standard_from_user,
2167         .compat_to_user = compat_standard_to_user,
2168 #endif
2169 };
2170
2171 static struct xt_target ipt_error_target __read_mostly = {
2172         .name           = IPT_ERROR_TARGET,
2173         .target         = ipt_error,
2174         .targetsize     = IPT_FUNCTION_MAXNAMELEN,
2175         .family         = NFPROTO_IPV4,
2176 };
2177
2178 static struct nf_sockopt_ops ipt_sockopts = {
2179         .pf             = PF_INET,
2180         .set_optmin     = IPT_BASE_CTL,
2181         .set_optmax     = IPT_SO_SET_MAX+1,
2182         .set            = do_ipt_set_ctl,
2183 #ifdef CONFIG_COMPAT
2184         .compat_set     = compat_do_ipt_set_ctl,
2185 #endif
2186         .get_optmin     = IPT_BASE_CTL,
2187         .get_optmax     = IPT_SO_GET_MAX+1,
2188         .get            = do_ipt_get_ctl,
2189 #ifdef CONFIG_COMPAT
2190         .compat_get     = compat_do_ipt_get_ctl,
2191 #endif
2192         .owner          = THIS_MODULE,
2193 };
2194
2195 static struct xt_match icmp_matchstruct __read_mostly = {
2196         .name           = "icmp",
2197         .match          = icmp_match,
2198         .matchsize      = sizeof(struct ipt_icmp),
2199         .checkentry     = icmp_checkentry,
2200         .proto          = IPPROTO_ICMP,
2201         .family         = NFPROTO_IPV4,
2202 };
2203
2204 static int __net_init ip_tables_net_init(struct net *net)
2205 {
2206         return xt_proto_init(net, NFPROTO_IPV4);
2207 }
2208
2209 static void __net_exit ip_tables_net_exit(struct net *net)
2210 {
2211         xt_proto_fini(net, NFPROTO_IPV4);
2212 }
2213
2214 static struct pernet_operations ip_tables_net_ops = {
2215         .init = ip_tables_net_init,
2216         .exit = ip_tables_net_exit,
2217 };
2218
2219 static int __init ip_tables_init(void)
2220 {
2221         int ret;
2222
2223         ret = register_pernet_subsys(&ip_tables_net_ops);
2224         if (ret < 0)
2225                 goto err1;
2226
2227         /* Noone else will be downing sem now, so we won't sleep */
2228         ret = xt_register_target(&ipt_standard_target);
2229         if (ret < 0)
2230                 goto err2;
2231         ret = xt_register_target(&ipt_error_target);
2232         if (ret < 0)
2233                 goto err3;
2234         ret = xt_register_match(&icmp_matchstruct);
2235         if (ret < 0)
2236                 goto err4;
2237
2238         /* Register setsockopt */
2239         ret = nf_register_sockopt(&ipt_sockopts);
2240         if (ret < 0)
2241                 goto err5;
2242
2243         printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2244         return 0;
2245
2246 err5:
2247         xt_unregister_match(&icmp_matchstruct);
2248 err4:
2249         xt_unregister_target(&ipt_error_target);
2250 err3:
2251         xt_unregister_target(&ipt_standard_target);
2252 err2:
2253         unregister_pernet_subsys(&ip_tables_net_ops);
2254 err1:
2255         return ret;
2256 }
2257
2258 static void __exit ip_tables_fini(void)
2259 {
2260         nf_unregister_sockopt(&ipt_sockopts);
2261
2262         xt_unregister_match(&icmp_matchstruct);
2263         xt_unregister_target(&ipt_error_target);
2264         xt_unregister_target(&ipt_standard_target);
2265
2266         unregister_pernet_subsys(&ip_tables_net_ops);
2267 }
2268
2269 EXPORT_SYMBOL(ipt_register_table);
2270 EXPORT_SYMBOL(ipt_unregister_table);
2271 EXPORT_SYMBOL(ipt_do_table);
2272 module_init(ip_tables_init);
2273 module_exit(ip_tables_fini);