[NET_SCHED]: Propagate nla_parse return value
[linux-2.6.git] / net / sched / act_nat.c
1 /*
2  * Stateless NAT actions
3  *
4  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  */
11
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netfilter.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/tc_act/tc_nat.h>
23 #include <net/act_api.h>
24 #include <net/icmp.h>
25 #include <net/ip.h>
26 #include <net/netlink.h>
27 #include <net/tc_act/tc_nat.h>
28 #include <net/tcp.h>
29 #include <net/udp.h>
30
31
32 #define NAT_TAB_MASK    15
33 static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1];
34 static u32 nat_idx_gen;
35 static DEFINE_RWLOCK(nat_lock);
36
37 static struct tcf_hashinfo nat_hash_info = {
38         .htab   =       tcf_nat_ht,
39         .hmask  =       NAT_TAB_MASK,
40         .lock   =       &nat_lock,
41 };
42
43 static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
44                         struct tc_action *a, int ovr, int bind)
45 {
46         struct nlattr *tb[TCA_NAT_MAX + 1];
47         struct tc_nat *parm;
48         int ret = 0, err;
49         struct tcf_nat *p;
50         struct tcf_common *pc;
51
52         if (nla == NULL)
53                 return -EINVAL;
54
55         err = nla_parse_nested(tb, TCA_NAT_MAX, nla, NULL);
56         if (err < 0)
57                 return err;
58
59         if (tb[TCA_NAT_PARMS] == NULL ||
60             nla_len(tb[TCA_NAT_PARMS]) < sizeof(*parm))
61                 return -EINVAL;
62         parm = nla_data(tb[TCA_NAT_PARMS]);
63
64         pc = tcf_hash_check(parm->index, a, bind, &nat_hash_info);
65         if (!pc) {
66                 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
67                                      &nat_idx_gen, &nat_hash_info);
68                 if (unlikely(!pc))
69                         return -ENOMEM;
70                 p = to_tcf_nat(pc);
71                 ret = ACT_P_CREATED;
72         } else {
73                 p = to_tcf_nat(pc);
74                 if (!ovr) {
75                         tcf_hash_release(pc, bind, &nat_hash_info);
76                         return -EEXIST;
77                 }
78         }
79
80         spin_lock_bh(&p->tcf_lock);
81         p->old_addr = parm->old_addr;
82         p->new_addr = parm->new_addr;
83         p->mask = parm->mask;
84         p->flags = parm->flags;
85
86         p->tcf_action = parm->action;
87         spin_unlock_bh(&p->tcf_lock);
88
89         if (ret == ACT_P_CREATED)
90                 tcf_hash_insert(pc, &nat_hash_info);
91
92         return ret;
93 }
94
95 static int tcf_nat_cleanup(struct tc_action *a, int bind)
96 {
97         struct tcf_nat *p = a->priv;
98
99         return tcf_hash_release(&p->common, bind, &nat_hash_info);
100 }
101
102 static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
103                    struct tcf_result *res)
104 {
105         struct tcf_nat *p = a->priv;
106         struct iphdr *iph;
107         __be32 old_addr;
108         __be32 new_addr;
109         __be32 mask;
110         __be32 addr;
111         int egress;
112         int action;
113         int ihl;
114
115         spin_lock(&p->tcf_lock);
116
117         p->tcf_tm.lastuse = jiffies;
118         old_addr = p->old_addr;
119         new_addr = p->new_addr;
120         mask = p->mask;
121         egress = p->flags & TCA_NAT_FLAG_EGRESS;
122         action = p->tcf_action;
123
124         p->tcf_bstats.bytes += skb->len;
125         p->tcf_bstats.packets++;
126
127         spin_unlock(&p->tcf_lock);
128
129         if (unlikely(action == TC_ACT_SHOT))
130                 goto drop;
131
132         if (!pskb_may_pull(skb, sizeof(*iph)))
133                 goto drop;
134
135         iph = ip_hdr(skb);
136
137         if (egress)
138                 addr = iph->saddr;
139         else
140                 addr = iph->daddr;
141
142         if (!((old_addr ^ addr) & mask)) {
143                 if (skb_cloned(skb) &&
144                     !skb_clone_writable(skb, sizeof(*iph)) &&
145                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
146                         goto drop;
147
148                 new_addr &= mask;
149                 new_addr |= addr & ~mask;
150
151                 /* Rewrite IP header */
152                 iph = ip_hdr(skb);
153                 if (egress)
154                         iph->saddr = new_addr;
155                 else
156                         iph->daddr = new_addr;
157
158                 csum_replace4(&iph->check, addr, new_addr);
159         }
160
161         ihl = iph->ihl * 4;
162
163         /* It would be nice to share code with stateful NAT. */
164         switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
165         case IPPROTO_TCP:
166         {
167                 struct tcphdr *tcph;
168
169                 if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) ||
170                     (skb_cloned(skb) &&
171                      !skb_clone_writable(skb, ihl + sizeof(*tcph)) &&
172                      pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
173                         goto drop;
174
175                 tcph = (void *)(skb_network_header(skb) + ihl);
176                 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
177                 break;
178         }
179         case IPPROTO_UDP:
180         {
181                 struct udphdr *udph;
182
183                 if (!pskb_may_pull(skb, ihl + sizeof(*udph)) ||
184                     (skb_cloned(skb) &&
185                      !skb_clone_writable(skb, ihl + sizeof(*udph)) &&
186                      pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
187                         goto drop;
188
189                 udph = (void *)(skb_network_header(skb) + ihl);
190                 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
191                         inet_proto_csum_replace4(&udph->check, skb, addr,
192                                                  new_addr, 1);
193                         if (!udph->check)
194                                 udph->check = CSUM_MANGLED_0;
195                 }
196                 break;
197         }
198         case IPPROTO_ICMP:
199         {
200                 struct icmphdr *icmph;
201
202                 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
203                         goto drop;
204
205                 icmph = (void *)(skb_network_header(skb) + ihl);
206
207                 if ((icmph->type != ICMP_DEST_UNREACH) &&
208                     (icmph->type != ICMP_TIME_EXCEEDED) &&
209                     (icmph->type != ICMP_PARAMETERPROB))
210                         break;
211
212                 iph = (void *)(icmph + 1);
213                 if (egress)
214                         addr = iph->daddr;
215                 else
216                         addr = iph->saddr;
217
218                 if ((old_addr ^ addr) & mask)
219                         break;
220
221                 if (skb_cloned(skb) &&
222                     !skb_clone_writable(skb,
223                                         ihl + sizeof(*icmph) + sizeof(*iph)) &&
224                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
225                         goto drop;
226
227                 icmph = (void *)(skb_network_header(skb) + ihl);
228                 iph = (void *)(icmph + 1);
229
230                 new_addr &= mask;
231                 new_addr |= addr & ~mask;
232
233                 /* XXX Fix up the inner checksums. */
234                 if (egress)
235                         iph->daddr = new_addr;
236                 else
237                         iph->saddr = new_addr;
238
239                 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
240                                          1);
241                 break;
242         }
243         default:
244                 break;
245         }
246
247         return action;
248
249 drop:
250         spin_lock(&p->tcf_lock);
251         p->tcf_qstats.drops++;
252         spin_unlock(&p->tcf_lock);
253         return TC_ACT_SHOT;
254 }
255
256 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
257                         int bind, int ref)
258 {
259         unsigned char *b = skb_tail_pointer(skb);
260         struct tcf_nat *p = a->priv;
261         struct tc_nat *opt;
262         struct tcf_t t;
263         int s;
264
265         s = sizeof(*opt);
266
267         /* netlink spinlocks held above us - must use ATOMIC */
268         opt = kzalloc(s, GFP_ATOMIC);
269         if (unlikely(!opt))
270                 return -ENOBUFS;
271
272         opt->old_addr = p->old_addr;
273         opt->new_addr = p->new_addr;
274         opt->mask = p->mask;
275         opt->flags = p->flags;
276
277         opt->index = p->tcf_index;
278         opt->action = p->tcf_action;
279         opt->refcnt = p->tcf_refcnt - ref;
280         opt->bindcnt = p->tcf_bindcnt - bind;
281
282         NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
283         t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
284         t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
285         t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
286         NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
287
288         kfree(opt);
289
290         return skb->len;
291
292 nla_put_failure:
293         nlmsg_trim(skb, b);
294         kfree(opt);
295         return -1;
296 }
297
298 static struct tc_action_ops act_nat_ops = {
299         .kind           =       "nat",
300         .hinfo          =       &nat_hash_info,
301         .type           =       TCA_ACT_NAT,
302         .capab          =       TCA_CAP_NONE,
303         .owner          =       THIS_MODULE,
304         .act            =       tcf_nat,
305         .dump           =       tcf_nat_dump,
306         .cleanup        =       tcf_nat_cleanup,
307         .lookup         =       tcf_hash_search,
308         .init           =       tcf_nat_init,
309         .walk           =       tcf_generic_walker
310 };
311
312 MODULE_DESCRIPTION("Stateless NAT actions");
313 MODULE_LICENSE("GPL");
314
315 static int __init nat_init_module(void)
316 {
317         return tcf_register_action(&act_nat_ops);
318 }
319
320 static void __exit nat_cleanup_module(void)
321 {
322         tcf_unregister_action(&act_nat_ops);
323 }
324
325 module_init(nat_init_module);
326 module_exit(nat_cleanup_module);