2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
15 * - new extension header parser code
18 #include <linux/capability.h>
19 #include <linux/config.h>
21 #include <linux/skbuff.h>
22 #include <linux/kmod.h>
23 #include <linux/vmalloc.h>
24 #include <linux/netdevice.h>
25 #include <linux/module.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/icmpv6.h>
30 #include <asm/uaccess.h>
31 #include <asm/semaphore.h>
32 #include <linux/proc_fs.h>
33 #include <linux/cpumask.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
39 MODULE_DESCRIPTION("IPv6 packet filter");
41 #define IPV6_HDR_LEN (sizeof(struct ipv6hdr))
42 #define IPV6_OPTHDR_LEN (sizeof(struct ipv6_opt_hdr))
44 /*#define DEBUG_IP_FIREWALL*/
45 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
46 /*#define DEBUG_IP_FIREWALL_USER*/
48 #ifdef DEBUG_IP_FIREWALL
49 #define dprintf(format, args...) printk(format , ## args)
51 #define dprintf(format, args...)
54 #ifdef DEBUG_IP_FIREWALL_USER
55 #define duprintf(format, args...) printk(format , ## args)
57 #define duprintf(format, args...)
60 #ifdef CONFIG_NETFILTER_DEBUG
61 #define IP_NF_ASSERT(x) \
64 printk("IP_NF_ASSERT: %s:%s:%u\n", \
65 __FUNCTION__, __FILE__, __LINE__); \
68 #define IP_NF_ASSERT(x)
70 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
72 static DECLARE_MUTEX(ip6t_mutex);
75 #define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ip6t_mutex) != 0)
76 #define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ip6t_mutex) != 0)
77 #include <linux/netfilter_ipv4/listhelp.h>
80 /* All the better to debug you with... */
86 We keep a set of rules for each CPU, so we can avoid write-locking
87 them in the softirq when updating the counters and therefore
88 only need to read-lock in the softirq; doing a write_lock_bh() in user
89 context stops packets coming through and allows user context to read
90 the counters or update the rules.
92 Hence the start of any table is given by get_table() below. */
94 /* The table itself */
95 struct ip6t_table_info
99 /* Number of entries: FIXME. --RR */
101 /* Initial number of entries. Needed for module usage count */
102 unsigned int initial_entries;
104 /* Entry points and underflows */
105 unsigned int hook_entry[NF_IP6_NUMHOOKS];
106 unsigned int underflow[NF_IP6_NUMHOOKS];
108 /* ip6t_entry tables: one per CPU */
109 void *entries[NR_CPUS];
112 static LIST_HEAD(ip6t_target);
113 static LIST_HEAD(ip6t_match);
114 static LIST_HEAD(ip6t_tables);
115 #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
116 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
119 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
120 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
121 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
125 ip6_masked_addrcmp(const struct in6_addr *addr1, const struct in6_addr *mask,
126 const struct in6_addr *addr2)
129 for( i = 0; i < 16; i++){
130 if((addr1->s6_addr[i] & mask->s6_addr[i]) !=
131 (addr2->s6_addr[i] & mask->s6_addr[i]))
137 /* Check for an extension */
139 ip6t_ext_hdr(u8 nexthdr)
141 return ( (nexthdr == IPPROTO_HOPOPTS) ||
142 (nexthdr == IPPROTO_ROUTING) ||
143 (nexthdr == IPPROTO_FRAGMENT) ||
144 (nexthdr == IPPROTO_ESP) ||
145 (nexthdr == IPPROTO_AH) ||
146 (nexthdr == IPPROTO_NONE) ||
147 (nexthdr == IPPROTO_DSTOPTS) );
150 /* Returns whether matches rule or not. */
152 ip6_packet_match(const struct sk_buff *skb,
155 const struct ip6t_ip6 *ip6info,
156 unsigned int *protoff,
161 const struct ipv6hdr *ipv6 = skb->nh.ipv6h;
163 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
165 if (FWINV(ip6_masked_addrcmp(&ipv6->saddr, &ip6info->smsk,
166 &ip6info->src), IP6T_INV_SRCIP)
167 || FWINV(ip6_masked_addrcmp(&ipv6->daddr, &ip6info->dmsk,
168 &ip6info->dst), IP6T_INV_DSTIP)) {
169 dprintf("Source or dest mismatch.\n");
171 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
172 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
173 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
174 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
175 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
176 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
180 /* Look for ifname matches; this should unroll nicely. */
181 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
182 ret |= (((const unsigned long *)indev)[i]
183 ^ ((const unsigned long *)ip6info->iniface)[i])
184 & ((const unsigned long *)ip6info->iniface_mask)[i];
187 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
188 dprintf("VIA in mismatch (%s vs %s).%s\n",
189 indev, ip6info->iniface,
190 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
194 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
195 ret |= (((const unsigned long *)outdev)[i]
196 ^ ((const unsigned long *)ip6info->outiface)[i])
197 & ((const unsigned long *)ip6info->outiface_mask)[i];
200 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
201 dprintf("VIA out mismatch (%s vs %s).%s\n",
202 outdev, ip6info->outiface,
203 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
207 /* ... might want to do something with class and flowlabel here ... */
209 /* look for the desired protocol header */
210 if((ip6info->flags & IP6T_F_PROTO)) {
212 unsigned short _frag_off;
214 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
218 *fragoff = _frag_off;
220 dprintf("Packet protocol %hi ?= %s%hi.\n",
222 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
225 if (ip6info->proto == protohdr) {
226 if(ip6info->invflags & IP6T_INV_PROTO) {
232 /* We need match for the '-p all', too! */
233 if ((ip6info->proto != 0) &&
234 !(ip6info->invflags & IP6T_INV_PROTO))
240 /* should be ip6 safe */
242 ip6_checkentry(const struct ip6t_ip6 *ipv6)
244 if (ipv6->flags & ~IP6T_F_MASK) {
245 duprintf("Unknown flag bits set: %08X\n",
246 ipv6->flags & ~IP6T_F_MASK);
249 if (ipv6->invflags & ~IP6T_INV_MASK) {
250 duprintf("Unknown invflag bits set: %08X\n",
251 ipv6->invflags & ~IP6T_INV_MASK);
258 ip6t_error(struct sk_buff **pskb,
259 const struct net_device *in,
260 const struct net_device *out,
261 unsigned int hooknum,
262 const void *targinfo,
266 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
272 int do_match(struct ip6t_entry_match *m,
273 const struct sk_buff *skb,
274 const struct net_device *in,
275 const struct net_device *out,
277 unsigned int protoff,
280 /* Stop iteration if it doesn't match */
281 if (!m->u.kernel.match->match(skb, in, out, m->data,
282 offset, protoff, hotdrop))
288 static inline struct ip6t_entry *
289 get_entry(void *base, unsigned int offset)
291 return (struct ip6t_entry *)(base + offset);
294 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
296 ip6t_do_table(struct sk_buff **pskb,
298 const struct net_device *in,
299 const struct net_device *out,
300 struct ip6t_table *table,
303 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
305 unsigned int protoff = 0;
307 /* Initializing verdict to NF_DROP keeps gcc happy. */
308 unsigned int verdict = NF_DROP;
309 const char *indev, *outdev;
311 struct ip6t_entry *e, *back;
314 indev = in ? in->name : nulldevname;
315 outdev = out ? out->name : nulldevname;
316 /* We handle fragments by dealing with the first fragment as
317 * if it was a normal packet. All other fragments are treated
318 * normally, except that they will NEVER match rules that ask
319 * things we don't know, ie. tcp syn flag or ports). If the
320 * rule is also a fragment-specific rule, non-fragments won't
323 read_lock_bh(&table->lock);
324 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
325 table_base = (void *)table->private->entries[smp_processor_id()];
326 e = get_entry(table_base, table->private->hook_entry[hook]);
328 #ifdef CONFIG_NETFILTER_DEBUG
329 /* Check noone else using our table */
330 if (((struct ip6t_entry *)table_base)->comefrom != 0xdead57ac
331 && ((struct ip6t_entry *)table_base)->comefrom != 0xeeeeeeec) {
332 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
335 &((struct ip6t_entry *)table_base)->comefrom,
336 ((struct ip6t_entry *)table_base)->comefrom);
338 ((struct ip6t_entry *)table_base)->comefrom = 0x57acc001;
341 /* For return from builtin chain */
342 back = get_entry(table_base, table->private->underflow[hook]);
347 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6,
348 &protoff, &offset)) {
349 struct ip6t_entry_target *t;
351 if (IP6T_MATCH_ITERATE(e, do_match,
353 offset, protoff, &hotdrop) != 0)
356 ADD_COUNTER(e->counters,
357 ntohs((*pskb)->nh.ipv6h->payload_len)
361 t = ip6t_get_target(e);
362 IP_NF_ASSERT(t->u.kernel.target);
363 /* Standard target? */
364 if (!t->u.kernel.target->target) {
367 v = ((struct ip6t_standard_target *)t)->verdict;
369 /* Pop from stack? */
370 if (v != IP6T_RETURN) {
371 verdict = (unsigned)(-v) - 1;
375 back = get_entry(table_base,
379 if (table_base + v != (void *)e + e->next_offset
380 && !(e->ipv6.flags & IP6T_F_GOTO)) {
381 /* Save old back ptr in next entry */
382 struct ip6t_entry *next
383 = (void *)e + e->next_offset;
385 = (void *)back - table_base;
386 /* set back pointer to next entry */
390 e = get_entry(table_base, v);
392 /* Targets which reenter must return
394 #ifdef CONFIG_NETFILTER_DEBUG
395 ((struct ip6t_entry *)table_base)->comefrom
398 verdict = t->u.kernel.target->target(pskb,
404 #ifdef CONFIG_NETFILTER_DEBUG
405 if (((struct ip6t_entry *)table_base)->comefrom
407 && verdict == IP6T_CONTINUE) {
408 printk("Target %s reentered!\n",
409 t->u.kernel.target->name);
412 ((struct ip6t_entry *)table_base)->comefrom
415 if (verdict == IP6T_CONTINUE)
416 e = (void *)e + e->next_offset;
424 e = (void *)e + e->next_offset;
428 #ifdef CONFIG_NETFILTER_DEBUG
429 ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac;
431 read_unlock_bh(&table->lock);
433 #ifdef DEBUG_ALLOW_ALL
443 * These are weird, but module loading must not be done with mutex
444 * held (since they will register), and we have to have a single
445 * function to use try_then_request_module().
448 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
449 static inline struct ip6t_table *find_table_lock(const char *name)
451 struct ip6t_table *t;
453 if (down_interruptible(&ip6t_mutex) != 0)
454 return ERR_PTR(-EINTR);
456 list_for_each_entry(t, &ip6t_tables, list)
457 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
463 /* Find match, grabs ref. Returns ERR_PTR() on error. */
464 static inline struct ip6t_match *find_match(const char *name, u8 revision)
466 struct ip6t_match *m;
469 if (down_interruptible(&ip6t_mutex) != 0)
470 return ERR_PTR(-EINTR);
472 list_for_each_entry(m, &ip6t_match, list) {
473 if (strcmp(m->name, name) == 0) {
474 if (m->revision == revision) {
475 if (try_module_get(m->me)) {
480 err = -EPROTOTYPE; /* Found something. */
487 /* Find target, grabs ref. Returns ERR_PTR() on error. */
488 static inline struct ip6t_target *find_target(const char *name, u8 revision)
490 struct ip6t_target *t;
493 if (down_interruptible(&ip6t_mutex) != 0)
494 return ERR_PTR(-EINTR);
496 list_for_each_entry(t, &ip6t_target, list) {
497 if (strcmp(t->name, name) == 0) {
498 if (t->revision == revision) {
499 if (try_module_get(t->me)) {
504 err = -EPROTOTYPE; /* Found something. */
511 struct ip6t_target *ip6t_find_target(const char *name, u8 revision)
513 struct ip6t_target *target;
515 target = try_then_request_module(find_target(name, revision),
517 if (IS_ERR(target) || !target)
522 static int match_revfn(const char *name, u8 revision, int *bestp)
524 struct ip6t_match *m;
527 list_for_each_entry(m, &ip6t_match, list) {
528 if (strcmp(m->name, name) == 0) {
529 if (m->revision > *bestp)
530 *bestp = m->revision;
531 if (m->revision == revision)
538 static int target_revfn(const char *name, u8 revision, int *bestp)
540 struct ip6t_target *t;
543 list_for_each_entry(t, &ip6t_target, list) {
544 if (strcmp(t->name, name) == 0) {
545 if (t->revision > *bestp)
546 *bestp = t->revision;
547 if (t->revision == revision)
554 /* Returns true or fals (if no such extension at all) */
555 static inline int find_revision(const char *name, u8 revision,
556 int (*revfn)(const char *, u8, int *),
559 int have_rev, best = -1;
561 if (down_interruptible(&ip6t_mutex) != 0) {
565 have_rev = revfn(name, revision, &best);
568 /* Nothing at all? Return 0 to try loading module. */
576 *err = -EPROTONOSUPPORT;
581 /* All zeroes == unconditional rule. */
583 unconditional(const struct ip6t_ip6 *ipv6)
587 for (i = 0; i < sizeof(*ipv6); i++)
588 if (((char *)ipv6)[i])
591 return (i == sizeof(*ipv6));
594 /* Figures out from what hook each rule can be called: returns 0 if
595 there are loops. Puts hook bitmask in comefrom. */
597 mark_source_chains(struct ip6t_table_info *newinfo,
598 unsigned int valid_hooks, void *entry0)
602 /* No recursion; use packet counter to save back ptrs (reset
603 to 0 as we leave), and comefrom to save source hook bitmask */
604 for (hook = 0; hook < NF_IP6_NUMHOOKS; hook++) {
605 unsigned int pos = newinfo->hook_entry[hook];
607 = (struct ip6t_entry *)(entry0 + pos);
609 if (!(valid_hooks & (1 << hook)))
612 /* Set initial back pointer. */
613 e->counters.pcnt = pos;
616 struct ip6t_standard_target *t
617 = (void *)ip6t_get_target(e);
619 if (e->comefrom & (1 << NF_IP6_NUMHOOKS)) {
620 printk("iptables: loop hook %u pos %u %08X.\n",
621 hook, pos, e->comefrom);
625 |= ((1 << hook) | (1 << NF_IP6_NUMHOOKS));
627 /* Unconditional return/END. */
628 if (e->target_offset == sizeof(struct ip6t_entry)
629 && (strcmp(t->target.u.user.name,
630 IP6T_STANDARD_TARGET) == 0)
632 && unconditional(&e->ipv6)) {
633 unsigned int oldpos, size;
635 /* Return: backtrack through the last
638 e->comefrom ^= (1<<NF_IP6_NUMHOOKS);
639 #ifdef DEBUG_IP_FIREWALL_USER
641 & (1 << NF_IP6_NUMHOOKS)) {
642 duprintf("Back unset "
649 pos = e->counters.pcnt;
650 e->counters.pcnt = 0;
652 /* We're at the start. */
656 e = (struct ip6t_entry *)
658 } while (oldpos == pos + e->next_offset);
661 size = e->next_offset;
662 e = (struct ip6t_entry *)
663 (entry0 + pos + size);
664 e->counters.pcnt = pos;
667 int newpos = t->verdict;
669 if (strcmp(t->target.u.user.name,
670 IP6T_STANDARD_TARGET) == 0
672 /* This a jump; chase it. */
673 duprintf("Jump rule %u -> %u\n",
676 /* ... this is a fallthru */
677 newpos = pos + e->next_offset;
679 e = (struct ip6t_entry *)
681 e->counters.pcnt = pos;
686 duprintf("Finished chain %u\n", hook);
692 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
694 if (i && (*i)-- == 0)
697 if (m->u.kernel.match->destroy)
698 m->u.kernel.match->destroy(m->data,
699 m->u.match_size - sizeof(*m));
700 module_put(m->u.kernel.match->me);
705 standard_check(const struct ip6t_entry_target *t,
706 unsigned int max_offset)
708 struct ip6t_standard_target *targ = (void *)t;
710 /* Check standard info. */
712 != IP6T_ALIGN(sizeof(struct ip6t_standard_target))) {
713 duprintf("standard_check: target size %u != %u\n",
715 IP6T_ALIGN(sizeof(struct ip6t_standard_target)));
719 if (targ->verdict >= 0
720 && targ->verdict > max_offset - sizeof(struct ip6t_entry)) {
721 duprintf("ip6t_standard_check: bad verdict (%i)\n",
726 if (targ->verdict < -NF_MAX_VERDICT - 1) {
727 duprintf("ip6t_standard_check: bad negative verdict (%i)\n",
735 check_match(struct ip6t_entry_match *m,
737 const struct ip6t_ip6 *ipv6,
738 unsigned int hookmask,
741 struct ip6t_match *match;
743 match = try_then_request_module(find_match(m->u.user.name,
745 "ip6t_%s", m->u.user.name);
746 if (IS_ERR(match) || !match) {
747 duprintf("check_match: `%s' not found\n", m->u.user.name);
748 return match ? PTR_ERR(match) : -ENOENT;
750 m->u.kernel.match = match;
752 if (m->u.kernel.match->checkentry
753 && !m->u.kernel.match->checkentry(name, ipv6, m->data,
754 m->u.match_size - sizeof(*m),
756 module_put(m->u.kernel.match->me);
757 duprintf("ip_tables: check failed for `%s'.\n",
758 m->u.kernel.match->name);
766 static struct ip6t_target ip6t_standard_target;
769 check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
772 struct ip6t_entry_target *t;
773 struct ip6t_target *target;
777 if (!ip6_checkentry(&e->ipv6)) {
778 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
783 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j);
785 goto cleanup_matches;
787 t = ip6t_get_target(e);
788 target = try_then_request_module(find_target(t->u.user.name,
790 "ip6t_%s", t->u.user.name);
791 if (IS_ERR(target) || !target) {
792 duprintf("check_entry: `%s' not found\n", t->u.user.name);
793 ret = target ? PTR_ERR(target) : -ENOENT;
794 goto cleanup_matches;
796 t->u.kernel.target = target;
798 if (t->u.kernel.target == &ip6t_standard_target) {
799 if (!standard_check(t, size)) {
801 goto cleanup_matches;
803 } else if (t->u.kernel.target->checkentry
804 && !t->u.kernel.target->checkentry(name, e, t->data,
808 module_put(t->u.kernel.target->me);
809 duprintf("ip_tables: check failed for `%s'.\n",
810 t->u.kernel.target->name);
812 goto cleanup_matches;
819 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
824 check_entry_size_and_hooks(struct ip6t_entry *e,
825 struct ip6t_table_info *newinfo,
827 unsigned char *limit,
828 const unsigned int *hook_entries,
829 const unsigned int *underflows,
834 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
835 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
836 duprintf("Bad offset %p\n", e);
841 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
842 duprintf("checking: element %p size %u\n",
847 /* Check hooks & underflows */
848 for (h = 0; h < NF_IP6_NUMHOOKS; h++) {
849 if ((unsigned char *)e - base == hook_entries[h])
850 newinfo->hook_entry[h] = hook_entries[h];
851 if ((unsigned char *)e - base == underflows[h])
852 newinfo->underflow[h] = underflows[h];
855 /* FIXME: underflows must be unconditional, standard verdicts
856 < 0 (not IP6T_RETURN). --RR */
858 /* Clear counters and comefrom */
859 e->counters = ((struct ip6t_counters) { 0, 0 });
867 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
869 struct ip6t_entry_target *t;
871 if (i && (*i)-- == 0)
874 /* Cleanup all matches */
875 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
876 t = ip6t_get_target(e);
877 if (t->u.kernel.target->destroy)
878 t->u.kernel.target->destroy(t->data,
879 t->u.target_size - sizeof(*t));
880 module_put(t->u.kernel.target->me);
884 /* Checks and translates the user-supplied table segment (held in
887 translate_table(const char *name,
888 unsigned int valid_hooks,
889 struct ip6t_table_info *newinfo,
893 const unsigned int *hook_entries,
894 const unsigned int *underflows)
899 newinfo->size = size;
900 newinfo->number = number;
902 /* Init all hooks to impossible value. */
903 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
904 newinfo->hook_entry[i] = 0xFFFFFFFF;
905 newinfo->underflow[i] = 0xFFFFFFFF;
908 duprintf("translate_table: size %u\n", newinfo->size);
910 /* Walk through entries, checking offsets. */
911 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
912 check_entry_size_and_hooks,
916 hook_entries, underflows, &i);
921 duprintf("translate_table: %u not %u entries\n",
926 /* Check hooks all assigned */
927 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
928 /* Only hooks which are valid */
929 if (!(valid_hooks & (1 << i)))
931 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
932 duprintf("Invalid hook entry %u %u\n",
936 if (newinfo->underflow[i] == 0xFFFFFFFF) {
937 duprintf("Invalid underflow %u %u\n",
943 if (!mark_source_chains(newinfo, valid_hooks, entry0))
946 /* Finally, each sanity check must pass */
948 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
949 check_entry, name, size, &i);
952 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
957 /* And one copy for every other CPU */
959 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
960 memcpy(newinfo->entries[i], entry0, newinfo->size);
966 static struct ip6t_table_info *
967 replace_table(struct ip6t_table *table,
968 unsigned int num_counters,
969 struct ip6t_table_info *newinfo,
972 struct ip6t_table_info *oldinfo;
974 #ifdef CONFIG_NETFILTER_DEBUG
979 struct ip6t_entry *table_base = newinfo->entries[cpu];
981 table_base->comefrom = 0xdead57ac;
986 /* Do the substitution. */
987 write_lock_bh(&table->lock);
988 /* Check inside lock: is the old number correct? */
989 if (num_counters != table->private->number) {
990 duprintf("num_counters != table->private->number (%u/%u)\n",
991 num_counters, table->private->number);
992 write_unlock_bh(&table->lock);
996 oldinfo = table->private;
997 table->private = newinfo;
998 newinfo->initial_entries = oldinfo->initial_entries;
999 write_unlock_bh(&table->lock);
1004 /* Gets counters. */
1006 add_entry_to_counter(const struct ip6t_entry *e,
1007 struct ip6t_counters total[],
1010 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
1017 set_entry_to_counter(const struct ip6t_entry *e,
1018 struct ip6t_counters total[],
1021 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
1028 get_counters(const struct ip6t_table_info *t,
1029 struct ip6t_counters counters[])
1033 unsigned int curcpu;
1035 /* Instead of clearing (by a previous call to memset())
1036 * the counters and using adds, we set the counters
1037 * with data used by 'current' CPU
1038 * We dont care about preemption here.
1040 curcpu = raw_smp_processor_id();
1043 IP6T_ENTRY_ITERATE(t->entries[curcpu],
1045 set_entry_to_counter,
1053 IP6T_ENTRY_ITERATE(t->entries[cpu],
1055 add_entry_to_counter,
1062 copy_entries_to_user(unsigned int total_size,
1063 struct ip6t_table *table,
1064 void __user *userptr)
1066 unsigned int off, num, countersize;
1067 struct ip6t_entry *e;
1068 struct ip6t_counters *counters;
1070 void *loc_cpu_entry;
1072 /* We need atomic snapshot of counters: rest doesn't change
1073 (other than comefrom, which userspace doesn't care
1075 countersize = sizeof(struct ip6t_counters) * table->private->number;
1076 counters = vmalloc(countersize);
1078 if (counters == NULL)
1081 /* First, sum counters... */
1082 write_lock_bh(&table->lock);
1083 get_counters(table->private, counters);
1084 write_unlock_bh(&table->lock);
1086 /* choose the copy that is on ourc node/cpu */
1087 loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
1088 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1093 /* FIXME: use iterator macros --RR */
1094 /* ... then go back and fix counters and names */
1095 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1097 struct ip6t_entry_match *m;
1098 struct ip6t_entry_target *t;
1100 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1101 if (copy_to_user(userptr + off
1102 + offsetof(struct ip6t_entry, counters),
1104 sizeof(counters[num])) != 0) {
1109 for (i = sizeof(struct ip6t_entry);
1110 i < e->target_offset;
1111 i += m->u.match_size) {
1114 if (copy_to_user(userptr + off + i
1115 + offsetof(struct ip6t_entry_match,
1117 m->u.kernel.match->name,
1118 strlen(m->u.kernel.match->name)+1)
1125 t = ip6t_get_target(e);
1126 if (copy_to_user(userptr + off + e->target_offset
1127 + offsetof(struct ip6t_entry_target,
1129 t->u.kernel.target->name,
1130 strlen(t->u.kernel.target->name)+1) != 0) {
1142 get_entries(const struct ip6t_get_entries *entries,
1143 struct ip6t_get_entries __user *uptr)
1146 struct ip6t_table *t;
1148 t = find_table_lock(entries->name);
1149 if (t && !IS_ERR(t)) {
1150 duprintf("t->private->number = %u\n",
1151 t->private->number);
1152 if (entries->size == t->private->size)
1153 ret = copy_entries_to_user(t->private->size,
1154 t, uptr->entrytable);
1156 duprintf("get_entries: I've got %u not %u!\n",
1164 ret = t ? PTR_ERR(t) : -ENOENT;
1169 static void free_table_info(struct ip6t_table_info *info)
1173 if (info->size <= PAGE_SIZE)
1174 kfree(info->entries[cpu]);
1176 vfree(info->entries[cpu]);
1181 static struct ip6t_table_info *alloc_table_info(unsigned int size)
1183 struct ip6t_table_info *newinfo;
1186 newinfo = kzalloc(sizeof(struct ip6t_table_info), GFP_KERNEL);
1190 newinfo->size = size;
1193 if (size <= PAGE_SIZE)
1194 newinfo->entries[cpu] = kmalloc_node(size,
1198 newinfo->entries[cpu] = vmalloc_node(size,
1200 if (newinfo->entries[cpu] == NULL) {
1201 free_table_info(newinfo);
1210 do_replace(void __user *user, unsigned int len)
1213 struct ip6t_replace tmp;
1214 struct ip6t_table *t;
1215 struct ip6t_table_info *newinfo, *oldinfo;
1216 struct ip6t_counters *counters;
1217 void *loc_cpu_entry, *loc_cpu_old_entry;
1219 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1222 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1223 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1226 newinfo = alloc_table_info(tmp.size);
1230 /* choose the copy that is on our node/cpu */
1231 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1232 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1238 counters = vmalloc(tmp.num_counters * sizeof(struct ip6t_counters));
1244 ret = translate_table(tmp.name, tmp.valid_hooks,
1245 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1246 tmp.hook_entry, tmp.underflow);
1248 goto free_newinfo_counters;
1250 duprintf("ip_tables: Translated table\n");
1252 t = try_then_request_module(find_table_lock(tmp.name),
1253 "ip6table_%s", tmp.name);
1254 if (!t || IS_ERR(t)) {
1255 ret = t ? PTR_ERR(t) : -ENOENT;
1256 goto free_newinfo_counters_untrans;
1260 if (tmp.valid_hooks != t->valid_hooks) {
1261 duprintf("Valid hook crap: %08X vs %08X\n",
1262 tmp.valid_hooks, t->valid_hooks);
1267 oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
1271 /* Update module usage count based on number of rules */
1272 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1273 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1274 if ((oldinfo->number > oldinfo->initial_entries) ||
1275 (newinfo->number <= oldinfo->initial_entries))
1277 if ((oldinfo->number > oldinfo->initial_entries) &&
1278 (newinfo->number <= oldinfo->initial_entries))
1281 /* Get the old counters. */
1282 get_counters(oldinfo, counters);
1283 /* Decrease module usage counts and free resource */
1284 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1285 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1286 free_table_info(oldinfo);
1287 if (copy_to_user(tmp.counters, counters,
1288 sizeof(struct ip6t_counters) * tmp.num_counters) != 0)
1297 free_newinfo_counters_untrans:
1298 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1299 free_newinfo_counters:
1302 free_table_info(newinfo);
1306 /* We're lazy, and add to the first CPU; overflow works its fey magic
1307 * and everything is OK. */
1309 add_counter_to_entry(struct ip6t_entry *e,
1310 const struct ip6t_counters addme[],
1314 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1316 (long unsigned int)e->counters.pcnt,
1317 (long unsigned int)e->counters.bcnt,
1318 (long unsigned int)addme[*i].pcnt,
1319 (long unsigned int)addme[*i].bcnt);
1322 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1329 do_add_counters(void __user *user, unsigned int len)
1332 struct ip6t_counters_info tmp, *paddc;
1333 struct ip6t_table *t;
1335 void *loc_cpu_entry;
1337 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1340 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ip6t_counters))
1343 paddc = vmalloc(len);
1347 if (copy_from_user(paddc, user, len) != 0) {
1352 t = find_table_lock(tmp.name);
1353 if (!t || IS_ERR(t)) {
1354 ret = t ? PTR_ERR(t) : -ENOENT;
1358 write_lock_bh(&t->lock);
1359 if (t->private->number != paddc->num_counters) {
1361 goto unlock_up_free;
1365 /* Choose the copy that is on our node */
1366 loc_cpu_entry = t->private->entries[smp_processor_id()];
1367 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1369 add_counter_to_entry,
1373 write_unlock_bh(&t->lock);
1383 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1387 if (!capable(CAP_NET_ADMIN))
1391 case IP6T_SO_SET_REPLACE:
1392 ret = do_replace(user, len);
1395 case IP6T_SO_SET_ADD_COUNTERS:
1396 ret = do_add_counters(user, len);
1400 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1408 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1412 if (!capable(CAP_NET_ADMIN))
1416 case IP6T_SO_GET_INFO: {
1417 char name[IP6T_TABLE_MAXNAMELEN];
1418 struct ip6t_table *t;
1420 if (*len != sizeof(struct ip6t_getinfo)) {
1421 duprintf("length %u != %u\n", *len,
1422 sizeof(struct ip6t_getinfo));
1427 if (copy_from_user(name, user, sizeof(name)) != 0) {
1431 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1433 t = try_then_request_module(find_table_lock(name),
1434 "ip6table_%s", name);
1435 if (t && !IS_ERR(t)) {
1436 struct ip6t_getinfo info;
1438 info.valid_hooks = t->valid_hooks;
1439 memcpy(info.hook_entry, t->private->hook_entry,
1440 sizeof(info.hook_entry));
1441 memcpy(info.underflow, t->private->underflow,
1442 sizeof(info.underflow));
1443 info.num_entries = t->private->number;
1444 info.size = t->private->size;
1445 memcpy(info.name, name, sizeof(info.name));
1447 if (copy_to_user(user, &info, *len) != 0)
1454 ret = t ? PTR_ERR(t) : -ENOENT;
1458 case IP6T_SO_GET_ENTRIES: {
1459 struct ip6t_get_entries get;
1461 if (*len < sizeof(get)) {
1462 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1464 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1466 } else if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1467 duprintf("get_entries: %u != %u\n", *len,
1468 sizeof(struct ip6t_get_entries) + get.size);
1471 ret = get_entries(&get, user);
1475 case IP6T_SO_GET_REVISION_MATCH:
1476 case IP6T_SO_GET_REVISION_TARGET: {
1477 struct ip6t_get_revision rev;
1478 int (*revfn)(const char *, u8, int *);
1480 if (*len != sizeof(rev)) {
1484 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1489 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1490 revfn = target_revfn;
1492 revfn = match_revfn;
1494 try_then_request_module(find_revision(rev.name, rev.revision,
1496 "ip6t_%s", rev.name);
1501 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1508 /* Registration hooks for targets. */
1510 ip6t_register_target(struct ip6t_target *target)
1514 ret = down_interruptible(&ip6t_mutex);
1517 list_add(&target->list, &ip6t_target);
1523 ip6t_unregister_target(struct ip6t_target *target)
1526 LIST_DELETE(&ip6t_target, target);
1531 ip6t_register_match(struct ip6t_match *match)
1535 ret = down_interruptible(&ip6t_mutex);
1539 list_add(&match->list, &ip6t_match);
1546 ip6t_unregister_match(struct ip6t_match *match)
1549 LIST_DELETE(&ip6t_match, match);
1553 int ip6t_register_table(struct ip6t_table *table,
1554 const struct ip6t_replace *repl)
1557 struct ip6t_table_info *newinfo;
1558 static struct ip6t_table_info bootstrap
1559 = { 0, 0, 0, { 0 }, { 0 }, { } };
1560 void *loc_cpu_entry;
1562 newinfo = alloc_table_info(repl->size);
1566 /* choose the copy on our node/cpu */
1567 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1568 memcpy(loc_cpu_entry, repl->entries, repl->size);
1570 ret = translate_table(table->name, table->valid_hooks,
1571 newinfo, loc_cpu_entry, repl->size,
1576 free_table_info(newinfo);
1580 ret = down_interruptible(&ip6t_mutex);
1582 free_table_info(newinfo);
1586 /* Don't autoload: we'd eat our tail... */
1587 if (list_named_find(&ip6t_tables, table->name)) {
1592 /* Simplifies replace_table code. */
1593 table->private = &bootstrap;
1594 if (!replace_table(table, 0, newinfo, &ret))
1597 duprintf("table->private->number = %u\n",
1598 table->private->number);
1600 /* save number of initial entries */
1601 table->private->initial_entries = table->private->number;
1603 rwlock_init(&table->lock);
1604 list_prepend(&ip6t_tables, table);
1611 free_table_info(newinfo);
1615 void ip6t_unregister_table(struct ip6t_table *table)
1617 void *loc_cpu_entry;
1620 LIST_DELETE(&ip6t_tables, table);
1623 /* Decrease module usage counts and free resources */
1624 loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
1625 IP6T_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
1626 cleanup_entry, NULL);
1627 free_table_info(table->private);
1630 /* Returns 1 if the port is matched by the range, 0 otherwise */
1632 port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
1636 ret = (port >= min && port <= max) ^ invert;
1641 tcp_find_option(u_int8_t option,
1642 const struct sk_buff *skb,
1643 unsigned int tcpoff,
1644 unsigned int optlen,
1648 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
1649 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
1652 duprintf("tcp_match: finding option\n");
1655 /* If we don't have the whole header, drop packet. */
1656 op = skb_header_pointer(skb, tcpoff + sizeof(struct tcphdr), optlen,
1663 for (i = 0; i < optlen; ) {
1664 if (op[i] == option) return !invert;
1666 else i += op[i+1]?:1;
1673 tcp_match(const struct sk_buff *skb,
1674 const struct net_device *in,
1675 const struct net_device *out,
1676 const void *matchinfo,
1678 unsigned int protoff,
1681 struct tcphdr _tcph, *th;
1682 const struct ip6t_tcp *tcpinfo = matchinfo;
1687 Don't allow a fragment of TCP 8 bytes in. Nobody normal
1688 causes this. Its a cracker trying to break in by doing a
1689 flag overwrite to pass the direction checks.
1692 duprintf("Dropping evil TCP offset=1 frag.\n");
1695 /* Must not be a fragment. */
1699 #define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
1701 th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
1703 /* We've been asked to examine this packet, and we
1704 can't. Hence, no choice but to drop. */
1705 duprintf("Dropping evil TCP offset=0 tinygram.\n");
1710 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
1712 !!(tcpinfo->invflags & IP6T_TCP_INV_SRCPT)))
1714 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
1716 !!(tcpinfo->invflags & IP6T_TCP_INV_DSTPT)))
1718 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
1719 == tcpinfo->flg_cmp,
1720 IP6T_TCP_INV_FLAGS))
1722 if (tcpinfo->option) {
1723 if (th->doff * 4 < sizeof(_tcph)) {
1727 if (!tcp_find_option(tcpinfo->option, skb, protoff,
1728 th->doff*4 - sizeof(*th),
1729 tcpinfo->invflags & IP6T_TCP_INV_OPTION,
1736 /* Called when user tries to insert an entry of this type. */
1738 tcp_checkentry(const char *tablename,
1739 const struct ip6t_ip6 *ipv6,
1741 unsigned int matchsize,
1742 unsigned int hook_mask)
1744 const struct ip6t_tcp *tcpinfo = matchinfo;
1746 /* Must specify proto == TCP, and no unknown invflags */
1747 return ipv6->proto == IPPROTO_TCP
1748 && !(ipv6->invflags & IP6T_INV_PROTO)
1749 && matchsize == IP6T_ALIGN(sizeof(struct ip6t_tcp))
1750 && !(tcpinfo->invflags & ~IP6T_TCP_INV_MASK);
1754 udp_match(const struct sk_buff *skb,
1755 const struct net_device *in,
1756 const struct net_device *out,
1757 const void *matchinfo,
1759 unsigned int protoff,
1762 struct udphdr _udph, *uh;
1763 const struct ip6t_udp *udpinfo = matchinfo;
1765 /* Must not be a fragment. */
1769 uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph);
1771 /* We've been asked to examine this packet, and we
1772 can't. Hence, no choice but to drop. */
1773 duprintf("Dropping evil UDP tinygram.\n");
1778 return port_match(udpinfo->spts[0], udpinfo->spts[1],
1780 !!(udpinfo->invflags & IP6T_UDP_INV_SRCPT))
1781 && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
1783 !!(udpinfo->invflags & IP6T_UDP_INV_DSTPT));
1786 /* Called when user tries to insert an entry of this type. */
1788 udp_checkentry(const char *tablename,
1789 const struct ip6t_ip6 *ipv6,
1791 unsigned int matchinfosize,
1792 unsigned int hook_mask)
1794 const struct ip6t_udp *udpinfo = matchinfo;
1796 /* Must specify proto == UDP, and no unknown invflags */
1797 if (ipv6->proto != IPPROTO_UDP || (ipv6->invflags & IP6T_INV_PROTO)) {
1798 duprintf("ip6t_udp: Protocol %u != %u\n", ipv6->proto,
1802 if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_udp))) {
1803 duprintf("ip6t_udp: matchsize %u != %u\n",
1804 matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_udp)));
1807 if (udpinfo->invflags & ~IP6T_UDP_INV_MASK) {
1808 duprintf("ip6t_udp: unknown flags %X\n",
1816 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1818 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1819 u_int8_t type, u_int8_t code,
1822 return (type == test_type && code >= min_code && code <= max_code)
1827 icmp6_match(const struct sk_buff *skb,
1828 const struct net_device *in,
1829 const struct net_device *out,
1830 const void *matchinfo,
1832 unsigned int protoff,
1835 struct icmp6hdr _icmp, *ic;
1836 const struct ip6t_icmp *icmpinfo = matchinfo;
1838 /* Must not be a fragment. */
1842 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
1844 /* We've been asked to examine this packet, and we
1845 can't. Hence, no choice but to drop. */
1846 duprintf("Dropping evil ICMP tinygram.\n");
1851 return icmp6_type_code_match(icmpinfo->type,
1854 ic->icmp6_type, ic->icmp6_code,
1855 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1858 /* Called when user tries to insert an entry of this type. */
1860 icmp6_checkentry(const char *tablename,
1861 const struct ip6t_ip6 *ipv6,
1863 unsigned int matchsize,
1864 unsigned int hook_mask)
1866 const struct ip6t_icmp *icmpinfo = matchinfo;
1868 /* Must specify proto == ICMP, and no unknown invflags */
1869 return ipv6->proto == IPPROTO_ICMPV6
1870 && !(ipv6->invflags & IP6T_INV_PROTO)
1871 && matchsize == IP6T_ALIGN(sizeof(struct ip6t_icmp))
1872 && !(icmpinfo->invflags & ~IP6T_ICMP_INV);
1875 /* The built-in targets: standard (NULL) and error. */
1876 static struct ip6t_target ip6t_standard_target = {
1877 .name = IP6T_STANDARD_TARGET,
1880 static struct ip6t_target ip6t_error_target = {
1881 .name = IP6T_ERROR_TARGET,
1882 .target = ip6t_error,
1885 static struct nf_sockopt_ops ip6t_sockopts = {
1887 .set_optmin = IP6T_BASE_CTL,
1888 .set_optmax = IP6T_SO_SET_MAX+1,
1889 .set = do_ip6t_set_ctl,
1890 .get_optmin = IP6T_BASE_CTL,
1891 .get_optmax = IP6T_SO_GET_MAX+1,
1892 .get = do_ip6t_get_ctl,
1895 static struct ip6t_match tcp_matchstruct = {
1897 .match = &tcp_match,
1898 .checkentry = &tcp_checkentry,
1901 static struct ip6t_match udp_matchstruct = {
1903 .match = &udp_match,
1904 .checkentry = &udp_checkentry,
1907 static struct ip6t_match icmp6_matchstruct = {
1909 .match = &icmp6_match,
1910 .checkentry = &icmp6_checkentry,
1913 #ifdef CONFIG_PROC_FS
1914 static inline int print_name(const char *i,
1915 off_t start_offset, char *buffer, int length,
1916 off_t *pos, unsigned int *count)
1918 if ((*count)++ >= start_offset) {
1919 unsigned int namelen;
1921 namelen = sprintf(buffer + *pos, "%s\n",
1922 i + sizeof(struct list_head));
1923 if (*pos + namelen > length) {
1924 /* Stop iterating */
1932 static inline int print_target(const struct ip6t_target *t,
1933 off_t start_offset, char *buffer, int length,
1934 off_t *pos, unsigned int *count)
1936 if (t == &ip6t_standard_target || t == &ip6t_error_target)
1938 return print_name((char *)t, start_offset, buffer, length, pos, count);
1941 static int ip6t_get_tables(char *buffer, char **start, off_t offset, int length)
1944 unsigned int count = 0;
1946 if (down_interruptible(&ip6t_mutex) != 0)
1949 LIST_FIND(&ip6t_tables, print_name, char *,
1950 offset, buffer, length, &pos, &count);
1954 /* `start' hack - see fs/proc/generic.c line ~105 */
1955 *start=(char *)((unsigned long)count-offset);
1959 static int ip6t_get_targets(char *buffer, char **start, off_t offset, int length)
1962 unsigned int count = 0;
1964 if (down_interruptible(&ip6t_mutex) != 0)
1967 LIST_FIND(&ip6t_target, print_target, struct ip6t_target *,
1968 offset, buffer, length, &pos, &count);
1972 *start = (char *)((unsigned long)count - offset);
1976 static int ip6t_get_matches(char *buffer, char **start, off_t offset, int length)
1979 unsigned int count = 0;
1981 if (down_interruptible(&ip6t_mutex) != 0)
1984 LIST_FIND(&ip6t_match, print_name, char *,
1985 offset, buffer, length, &pos, &count);
1989 *start = (char *)((unsigned long)count - offset);
1993 static const struct { char *name; get_info_t *get_info; } ip6t_proc_entry[] =
1994 { { "ip6_tables_names", ip6t_get_tables },
1995 { "ip6_tables_targets", ip6t_get_targets },
1996 { "ip6_tables_matches", ip6t_get_matches },
1998 #endif /*CONFIG_PROC_FS*/
2000 static int __init init(void)
2004 /* Noone else will be downing sem now, so we won't sleep */
2006 list_append(&ip6t_target, &ip6t_standard_target);
2007 list_append(&ip6t_target, &ip6t_error_target);
2008 list_append(&ip6t_match, &tcp_matchstruct);
2009 list_append(&ip6t_match, &udp_matchstruct);
2010 list_append(&ip6t_match, &icmp6_matchstruct);
2013 /* Register setsockopt */
2014 ret = nf_register_sockopt(&ip6t_sockopts);
2016 duprintf("Unable to register sockopts.\n");
2020 #ifdef CONFIG_PROC_FS
2022 struct proc_dir_entry *proc;
2025 for (i = 0; ip6t_proc_entry[i].name; i++) {
2026 proc = proc_net_create(ip6t_proc_entry[i].name, 0,
2027 ip6t_proc_entry[i].get_info);
2030 proc_net_remove(ip6t_proc_entry[i].name);
2031 nf_unregister_sockopt(&ip6t_sockopts);
2034 proc->owner = THIS_MODULE;
2039 printk("ip6_tables: (C) 2000-2002 Netfilter core team\n");
2043 static void __exit fini(void)
2045 nf_unregister_sockopt(&ip6t_sockopts);
2046 #ifdef CONFIG_PROC_FS
2049 for (i = 0; ip6t_proc_entry[i].name; i++)
2050 proc_net_remove(ip6t_proc_entry[i].name);
2056 * find the offset to specified header or the protocol number of last header
2057 * if target < 0. "last header" is transport protocol header, ESP, or
2060 * If target header is found, its offset is set in *offset and return protocol
2061 * number. Otherwise, return -1.
2063 * Note that non-1st fragment is special case that "the protocol number
2064 * of last header" is "next header" field in Fragment header. In this case,
2065 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2069 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2070 int target, unsigned short *fragoff)
2072 unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data;
2073 u8 nexthdr = skb->nh.ipv6h->nexthdr;
2074 unsigned int len = skb->len - start;
2079 while (nexthdr != target) {
2080 struct ipv6_opt_hdr _hdr, *hp;
2081 unsigned int hdrlen;
2083 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2089 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2092 if (nexthdr == NEXTHDR_FRAGMENT) {
2093 unsigned short _frag_off, *fp;
2094 fp = skb_header_pointer(skb,
2095 start+offsetof(struct frag_hdr,
2102 _frag_off = ntohs(*fp) & ~0x7;
2105 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2106 nexthdr == NEXTHDR_NONE)) {
2108 *fragoff = _frag_off;
2114 } else if (nexthdr == NEXTHDR_AUTH)
2115 hdrlen = (hp->hdrlen + 2) << 2;
2117 hdrlen = ipv6_optlen(hp);
2119 nexthdr = hp->nexthdr;
2128 EXPORT_SYMBOL(ip6t_register_table);
2129 EXPORT_SYMBOL(ip6t_unregister_table);
2130 EXPORT_SYMBOL(ip6t_do_table);
2131 EXPORT_SYMBOL(ip6t_register_match);
2132 EXPORT_SYMBOL(ip6t_unregister_match);
2133 EXPORT_SYMBOL(ip6t_register_target);
2134 EXPORT_SYMBOL(ip6t_unregister_target);
2135 EXPORT_SYMBOL(ip6t_ext_hdr);
2136 EXPORT_SYMBOL(ipv6_find_hdr);
2137 EXPORT_SYMBOL(ip6_masked_addrcmp);