[NET]: More instruction checks fornet/core/filter.c
[linux-2.6.git] / net / core / filter.c
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Author:
5  *     Jay Schulist <jschlst@samba.org>
6  *
7  * Based on the design of:
8  *     - The Berkeley Packet Filter
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version
13  * 2 of the License, or (at your option) any later version.
14  *
15  * Andi Kleen - Fix a few bad bugs and races.
16  * Kris Katterjohn - Added many additional checks in sk_chk_filter()
17  */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/fcntl.h>
24 #include <linux/socket.h>
25 #include <linux/in.h>
26 #include <linux/inet.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_packet.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <linux/skbuff.h>
32 #include <net/sock.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <linux/filter.h>
38
39 /* No hurry in this branch */
40 static void *__load_pointer(struct sk_buff *skb, int k)
41 {
42         u8 *ptr = NULL;
43
44         if (k >= SKF_NET_OFF)
45                 ptr = skb->nh.raw + k - SKF_NET_OFF;
46         else if (k >= SKF_LL_OFF)
47                 ptr = skb->mac.raw + k - SKF_LL_OFF;
48
49         if (ptr >= skb->head && ptr < skb->tail)
50                 return ptr;
51         return NULL;
52 }
53
54 static inline void *load_pointer(struct sk_buff *skb, int k,
55                                  unsigned int size, void *buffer)
56 {
57         if (k >= 0)
58                 return skb_header_pointer(skb, k, size, buffer);
59         else {
60                 if (k >= SKF_AD_OFF)
61                         return NULL;
62                 return __load_pointer(skb, k);
63         }
64 }
65
66 /**
67  *      sk_run_filter   -       run a filter on a socket
68  *      @skb: buffer to run the filter on
69  *      @filter: filter to apply
70  *      @flen: length of filter
71  *
72  * Decode and apply filter instructions to the skb->data.
73  * Return length to keep, 0 for none. skb is the data we are
74  * filtering, filter is the array of filter instructions, and
75  * len is the number of filter blocks in the array.
76  */
77  
78 int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
79 {
80         struct sock_filter *fentry;     /* We walk down these */
81         void *ptr;
82         u32 A = 0;                      /* Accumulator */
83         u32 X = 0;                      /* Index Register */
84         u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
85         u32 tmp;
86         int k;
87         int pc;
88
89         /*
90          * Process array of filter instructions.
91          */
92         for (pc = 0; pc < flen; pc++) {
93                 fentry = &filter[pc];
94                         
95                 switch (fentry->code) {
96                 case BPF_ALU|BPF_ADD|BPF_X:
97                         A += X;
98                         continue;
99                 case BPF_ALU|BPF_ADD|BPF_K:
100                         A += fentry->k;
101                         continue;
102                 case BPF_ALU|BPF_SUB|BPF_X:
103                         A -= X;
104                         continue;
105                 case BPF_ALU|BPF_SUB|BPF_K:
106                         A -= fentry->k;
107                         continue;
108                 case BPF_ALU|BPF_MUL|BPF_X:
109                         A *= X;
110                         continue;
111                 case BPF_ALU|BPF_MUL|BPF_K:
112                         A *= fentry->k;
113                         continue;
114                 case BPF_ALU|BPF_DIV|BPF_X:
115                         if (X == 0)
116                                 return 0;
117                         A /= X;
118                         continue;
119                 case BPF_ALU|BPF_DIV|BPF_K:
120                         A /= fentry->k;
121                         continue;
122                 case BPF_ALU|BPF_AND|BPF_X:
123                         A &= X;
124                         continue;
125                 case BPF_ALU|BPF_AND|BPF_K:
126                         A &= fentry->k;
127                         continue;
128                 case BPF_ALU|BPF_OR|BPF_X:
129                         A |= X;
130                         continue;
131                 case BPF_ALU|BPF_OR|BPF_K:
132                         A |= fentry->k;
133                         continue;
134                 case BPF_ALU|BPF_LSH|BPF_X:
135                         A <<= X;
136                         continue;
137                 case BPF_ALU|BPF_LSH|BPF_K:
138                         A <<= fentry->k;
139                         continue;
140                 case BPF_ALU|BPF_RSH|BPF_X:
141                         A >>= X;
142                         continue;
143                 case BPF_ALU|BPF_RSH|BPF_K:
144                         A >>= fentry->k;
145                         continue;
146                 case BPF_ALU|BPF_NEG:
147                         A = -A;
148                         continue;
149                 case BPF_JMP|BPF_JA:
150                         pc += fentry->k;
151                         continue;
152                 case BPF_JMP|BPF_JGT|BPF_K:
153                         pc += (A > fentry->k) ? fentry->jt : fentry->jf;
154                         continue;
155                 case BPF_JMP|BPF_JGE|BPF_K:
156                         pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
157                         continue;
158                 case BPF_JMP|BPF_JEQ|BPF_K:
159                         pc += (A == fentry->k) ? fentry->jt : fentry->jf;
160                         continue;
161                 case BPF_JMP|BPF_JSET|BPF_K:
162                         pc += (A & fentry->k) ? fentry->jt : fentry->jf;
163                         continue;
164                 case BPF_JMP|BPF_JGT|BPF_X:
165                         pc += (A > X) ? fentry->jt : fentry->jf;
166                         continue;
167                 case BPF_JMP|BPF_JGE|BPF_X:
168                         pc += (A >= X) ? fentry->jt : fentry->jf;
169                         continue;
170                 case BPF_JMP|BPF_JEQ|BPF_X:
171                         pc += (A == X) ? fentry->jt : fentry->jf;
172                         continue;
173                 case BPF_JMP|BPF_JSET|BPF_X:
174                         pc += (A & X) ? fentry->jt : fentry->jf;
175                         continue;
176                 case BPF_LD|BPF_W|BPF_ABS:
177                         k = fentry->k;
178  load_w:
179                         ptr = load_pointer(skb, k, 4, &tmp);
180                         if (ptr != NULL) {
181                                 A = ntohl(*(u32 *)ptr);
182                                 continue;
183                         }
184                         break;
185                 case BPF_LD|BPF_H|BPF_ABS:
186                         k = fentry->k;
187  load_h:
188                         ptr = load_pointer(skb, k, 2, &tmp);
189                         if (ptr != NULL) {
190                                 A = ntohs(*(u16 *)ptr);
191                                 continue;
192                         }
193                         break;
194                 case BPF_LD|BPF_B|BPF_ABS:
195                         k = fentry->k;
196 load_b:
197                         ptr = load_pointer(skb, k, 1, &tmp);
198                         if (ptr != NULL) {
199                                 A = *(u8 *)ptr;
200                                 continue;
201                         }
202                         break;
203                 case BPF_LD|BPF_W|BPF_LEN:
204                         A = skb->len;
205                         continue;
206                 case BPF_LDX|BPF_W|BPF_LEN:
207                         X = skb->len;
208                         continue;
209                 case BPF_LD|BPF_W|BPF_IND:
210                         k = X + fentry->k;
211                         goto load_w;
212                 case BPF_LD|BPF_H|BPF_IND:
213                         k = X + fentry->k;
214                         goto load_h;
215                 case BPF_LD|BPF_B|BPF_IND:
216                         k = X + fentry->k;
217                         goto load_b;
218                 case BPF_LDX|BPF_B|BPF_MSH:
219                         ptr = load_pointer(skb, fentry->k, 1, &tmp);
220                         if (ptr != NULL) {
221                                 X = (*(u8 *)ptr & 0xf) << 2;
222                                 continue;
223                         }
224                         return 0;
225                 case BPF_LD|BPF_IMM:
226                         A = fentry->k;
227                         continue;
228                 case BPF_LDX|BPF_IMM:
229                         X = fentry->k;
230                         continue;
231                 case BPF_LD|BPF_MEM:
232                         A = mem[fentry->k];
233                         continue;
234                 case BPF_LDX|BPF_MEM:
235                         X = mem[fentry->k];
236                         continue;
237                 case BPF_MISC|BPF_TAX:
238                         X = A;
239                         continue;
240                 case BPF_MISC|BPF_TXA:
241                         A = X;
242                         continue;
243                 case BPF_RET|BPF_K:
244                         return ((unsigned int)fentry->k);
245                 case BPF_RET|BPF_A:
246                         return ((unsigned int)A);
247                 case BPF_ST:
248                         mem[fentry->k] = A;
249                         continue;
250                 case BPF_STX:
251                         mem[fentry->k] = X;
252                         continue;
253                 default:
254                         WARN_ON(1);
255                         return 0;
256                 }
257
258                 /*
259                  * Handle ancillary data, which are impossible
260                  * (or very difficult) to get parsing packet contents.
261                  */
262                 switch (k-SKF_AD_OFF) {
263                 case SKF_AD_PROTOCOL:
264                         A = htons(skb->protocol);
265                         continue;
266                 case SKF_AD_PKTTYPE:
267                         A = skb->pkt_type;
268                         continue;
269                 case SKF_AD_IFINDEX:
270                         A = skb->dev->ifindex;
271                         continue;
272                 default:
273                         return 0;
274                 }
275         }
276
277         return 0;
278 }
279
280 /**
281  *      sk_chk_filter - verify socket filter code
282  *      @filter: filter to verify
283  *      @flen: length of filter
284  *
285  * Check the user's filter code. If we let some ugly
286  * filter code slip through kaboom! The filter must contain
287  * no references or jumps that are out of range, no illegal
288  * instructions, and must end with a RET instruction.
289  *
290  * Returns 0 if the rule set is legal or a negative errno code if not.
291  */
292 int sk_chk_filter(struct sock_filter *filter, int flen)
293 {
294         struct sock_filter *ftest;
295         int pc;
296
297         if (flen == 0 || flen > BPF_MAXINSNS)
298                 return -EINVAL;
299
300         /* check the filter code now */
301         for (pc = 0; pc < flen; pc++) {
302                 /* all jumps are forward as they are not signed */
303                 ftest = &filter[pc];
304
305                 /* Only allow valid instructions */
306                 switch (ftest->code) {
307                 case BPF_ALU|BPF_ADD|BPF_K:
308                 case BPF_ALU|BPF_ADD|BPF_X:
309                 case BPF_ALU|BPF_SUB|BPF_K:
310                 case BPF_ALU|BPF_SUB|BPF_X:
311                 case BPF_ALU|BPF_MUL|BPF_K:
312                 case BPF_ALU|BPF_MUL|BPF_X:
313                 case BPF_ALU|BPF_DIV|BPF_X:
314                 case BPF_ALU|BPF_AND|BPF_K:
315                 case BPF_ALU|BPF_AND|BPF_X:
316                 case BPF_ALU|BPF_OR|BPF_K:
317                 case BPF_ALU|BPF_OR|BPF_X:
318                 case BPF_ALU|BPF_LSH|BPF_K:
319                 case BPF_ALU|BPF_LSH|BPF_X:
320                 case BPF_ALU|BPF_RSH|BPF_K:
321                 case BPF_ALU|BPF_RSH|BPF_X:
322                 case BPF_ALU|BPF_NEG:
323                 case BPF_LD|BPF_W|BPF_ABS:
324                 case BPF_LD|BPF_H|BPF_ABS:
325                 case BPF_LD|BPF_B|BPF_ABS:
326                 case BPF_LD|BPF_W|BPF_LEN:
327                 case BPF_LD|BPF_W|BPF_IND:
328                 case BPF_LD|BPF_H|BPF_IND:
329                 case BPF_LD|BPF_B|BPF_IND:
330                 case BPF_LD|BPF_IMM:
331                 case BPF_LDX|BPF_W|BPF_LEN:
332                 case BPF_LDX|BPF_B|BPF_MSH:
333                 case BPF_LDX|BPF_IMM:
334                 case BPF_MISC|BPF_TAX:
335                 case BPF_MISC|BPF_TXA:
336                 case BPF_RET|BPF_K:
337                 case BPF_RET|BPF_A:
338                         break;
339
340                 /* Some instructions need special checks */
341
342                 case BPF_ALU|BPF_DIV|BPF_K:
343                         /* check for division by zero */
344                         if (ftest->k == 0)
345                                 return -EINVAL;
346                         break;
347
348                 case BPF_LD|BPF_MEM:
349                 case BPF_LDX|BPF_MEM:
350                 case BPF_ST:
351                 case BPF_STX:
352                         /* check for invalid memory addresses */
353                         if (ftest->k >= BPF_MEMWORDS)
354                                 return -EINVAL;
355                         break;
356
357                 case BPF_JMP|BPF_JA:
358                         /*
359                          * Note, the large ftest->k might cause loops.
360                          * Compare this with conditional jumps below,
361                          * where offsets are limited. --ANK (981016)
362                          */
363                         if (ftest->k >= (unsigned)(flen-pc-1))
364                                 return -EINVAL;
365                         break;
366
367                 case BPF_JMP|BPF_JEQ|BPF_K:
368                 case BPF_JMP|BPF_JEQ|BPF_X:
369                 case BPF_JMP|BPF_JGE|BPF_K:
370                 case BPF_JMP|BPF_JGE|BPF_X:
371                 case BPF_JMP|BPF_JGT|BPF_K:
372                 case BPF_JMP|BPF_JGT|BPF_X:
373                 case BPF_JMP|BPF_JSET|BPF_K:
374                 case BPF_JMP|BPF_JSET|BPF_X:
375                         /* for conditionals both must be safe */
376                         if (pc + ftest->jt + 1 >= flen ||
377                             pc + ftest->jf + 1 >= flen)
378                                 return -EINVAL;
379                         break;
380
381                 default:
382                         return -EINVAL;
383                 }
384         }
385
386         /*
387          * The program must end with a return. We don't care where they
388          * jumped within the script (its always forwards) but in the end
389          * they _will_ hit this.
390          */
391         return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
392 }
393
394 /**
395  *      sk_attach_filter - attach a socket filter
396  *      @fprog: the filter program
397  *      @sk: the socket to use
398  *
399  * Attach the user's filter code. We first run some sanity checks on
400  * it to make sure it does not explode on us later. If an error
401  * occurs or there is insufficient memory for the filter a negative
402  * errno code is returned. On success the return is zero.
403  */
404 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
405 {
406         struct sk_filter *fp; 
407         unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
408         int err;
409
410         /* Make sure new filter is there and in the right amounts. */
411         if (fprog->filter == NULL)
412                 return -EINVAL;
413
414         fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
415         if (!fp)
416                 return -ENOMEM;
417         if (copy_from_user(fp->insns, fprog->filter, fsize)) {
418                 sock_kfree_s(sk, fp, fsize+sizeof(*fp)); 
419                 return -EFAULT;
420         }
421
422         atomic_set(&fp->refcnt, 1);
423         fp->len = fprog->len;
424
425         err = sk_chk_filter(fp->insns, fp->len);
426         if (!err) {
427                 struct sk_filter *old_fp;
428
429                 spin_lock_bh(&sk->sk_lock.slock);
430                 old_fp = sk->sk_filter;
431                 sk->sk_filter = fp;
432                 spin_unlock_bh(&sk->sk_lock.slock);
433                 fp = old_fp;
434         }
435
436         if (fp)
437                 sk_filter_release(sk, fp);
438         return err;
439 }
440
441 EXPORT_SYMBOL(sk_chk_filter);
442 EXPORT_SYMBOL(sk_run_filter);