[NET]: More kzalloc conversions.
[linux-2.6.git] / net / core / flow.c
1 /* flow.c: Generic flow cache.
2  *
3  * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4  * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <asm/atomic.h>
26 #include <asm/semaphore.h>
27 #include <linux/security.h>
28
29 struct flow_cache_entry {
30         struct flow_cache_entry *next;
31         u16                     family;
32         u8                      dir;
33         struct flowi            key;
34         u32                     genid;
35         u32                     sk_sid;
36         void                    *object;
37         atomic_t                *object_ref;
38 };
39
40 atomic_t flow_cache_genid = ATOMIC_INIT(0);
41
42 static u32 flow_hash_shift;
43 #define flow_hash_size  (1 << flow_hash_shift)
44 static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
45
46 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
47
48 static kmem_cache_t *flow_cachep __read_mostly;
49
50 static int flow_lwm, flow_hwm;
51
52 struct flow_percpu_info {
53         int hash_rnd_recalc;
54         u32 hash_rnd;
55         int count;
56 } ____cacheline_aligned;
57 static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
58
59 #define flow_hash_rnd_recalc(cpu) \
60         (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
61 #define flow_hash_rnd(cpu) \
62         (per_cpu(flow_hash_info, cpu).hash_rnd)
63 #define flow_count(cpu) \
64         (per_cpu(flow_hash_info, cpu).count)
65
66 static struct timer_list flow_hash_rnd_timer;
67
68 #define FLOW_HASH_RND_PERIOD    (10 * 60 * HZ)
69
70 struct flow_flush_info {
71         atomic_t cpuleft;
72         struct completion completion;
73 };
74 static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
75
76 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
77
78 static void flow_cache_new_hashrnd(unsigned long arg)
79 {
80         int i;
81
82         for_each_cpu(i)
83                 flow_hash_rnd_recalc(i) = 1;
84
85         flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
86         add_timer(&flow_hash_rnd_timer);
87 }
88
89 static void __flow_cache_shrink(int cpu, int shrink_to)
90 {
91         struct flow_cache_entry *fle, **flp;
92         int i;
93
94         for (i = 0; i < flow_hash_size; i++) {
95                 int k = 0;
96
97                 flp = &flow_table(cpu)[i];
98                 while ((fle = *flp) != NULL && k < shrink_to) {
99                         k++;
100                         flp = &fle->next;
101                 }
102                 while ((fle = *flp) != NULL) {
103                         *flp = fle->next;
104                         if (fle->object)
105                                 atomic_dec(fle->object_ref);
106                         kmem_cache_free(flow_cachep, fle);
107                         flow_count(cpu)--;
108                 }
109         }
110 }
111
112 static void flow_cache_shrink(int cpu)
113 {
114         int shrink_to = flow_lwm / flow_hash_size;
115
116         __flow_cache_shrink(cpu, shrink_to);
117 }
118
119 static void flow_new_hash_rnd(int cpu)
120 {
121         get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
122         flow_hash_rnd_recalc(cpu) = 0;
123
124         __flow_cache_shrink(cpu, 0);
125 }
126
127 static u32 flow_hash_code(struct flowi *key, int cpu)
128 {
129         u32 *k = (u32 *) key;
130
131         return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
132                 (flow_hash_size - 1));
133 }
134
135 #if (BITS_PER_LONG == 64)
136 typedef u64 flow_compare_t;
137 #else
138 typedef u32 flow_compare_t;
139 #endif
140
141 extern void flowi_is_missized(void);
142
143 /* I hear what you're saying, use memcmp.  But memcmp cannot make
144  * important assumptions that we can here, such as alignment and
145  * constant size.
146  */
147 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
148 {
149         flow_compare_t *k1, *k1_lim, *k2;
150         const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
151
152         if (sizeof(struct flowi) % sizeof(flow_compare_t))
153                 flowi_is_missized();
154
155         k1 = (flow_compare_t *) key1;
156         k1_lim = k1 + n_elem;
157
158         k2 = (flow_compare_t *) key2;
159
160         do {
161                 if (*k1++ != *k2++)
162                         return 1;
163         } while (k1 < k1_lim);
164
165         return 0;
166 }
167
168 void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
169                         flow_resolve_t resolver)
170 {
171         struct flow_cache_entry *fle, **head;
172         unsigned int hash;
173         int cpu;
174
175         local_bh_disable();
176         cpu = smp_processor_id();
177
178         fle = NULL;
179         /* Packet really early in init?  Making flow_cache_init a
180          * pre-smp initcall would solve this.  --RR */
181         if (!flow_table(cpu))
182                 goto nocache;
183
184         if (flow_hash_rnd_recalc(cpu))
185                 flow_new_hash_rnd(cpu);
186         hash = flow_hash_code(key, cpu);
187
188         head = &flow_table(cpu)[hash];
189         for (fle = *head; fle; fle = fle->next) {
190                 if (fle->family == family &&
191                     fle->dir == dir &&
192                     fle->sk_sid == sk_sid &&
193                     flow_key_compare(key, &fle->key) == 0) {
194                         if (fle->genid == atomic_read(&flow_cache_genid)) {
195                                 void *ret = fle->object;
196
197                                 if (ret)
198                                         atomic_inc(fle->object_ref);
199                                 local_bh_enable();
200
201                                 return ret;
202                         }
203                         break;
204                 }
205         }
206
207         if (!fle) {
208                 if (flow_count(cpu) > flow_hwm)
209                         flow_cache_shrink(cpu);
210
211                 fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
212                 if (fle) {
213                         fle->next = *head;
214                         *head = fle;
215                         fle->family = family;
216                         fle->dir = dir;
217                         fle->sk_sid = sk_sid;
218                         memcpy(&fle->key, key, sizeof(*key));
219                         fle->object = NULL;
220                         flow_count(cpu)++;
221                 }
222         }
223
224 nocache:
225         {
226                 void *obj;
227                 atomic_t *obj_ref;
228
229                 resolver(key, sk_sid, family, dir, &obj, &obj_ref);
230
231                 if (fle) {
232                         fle->genid = atomic_read(&flow_cache_genid);
233
234                         if (fle->object)
235                                 atomic_dec(fle->object_ref);
236
237                         fle->object = obj;
238                         fle->object_ref = obj_ref;
239                         if (obj)
240                                 atomic_inc(fle->object_ref);
241                 }
242                 local_bh_enable();
243
244                 return obj;
245         }
246 }
247
248 static void flow_cache_flush_tasklet(unsigned long data)
249 {
250         struct flow_flush_info *info = (void *)data;
251         int i;
252         int cpu;
253
254         cpu = smp_processor_id();
255         for (i = 0; i < flow_hash_size; i++) {
256                 struct flow_cache_entry *fle;
257
258                 fle = flow_table(cpu)[i];
259                 for (; fle; fle = fle->next) {
260                         unsigned genid = atomic_read(&flow_cache_genid);
261
262                         if (!fle->object || fle->genid == genid)
263                                 continue;
264
265                         fle->object = NULL;
266                         atomic_dec(fle->object_ref);
267                 }
268         }
269
270         if (atomic_dec_and_test(&info->cpuleft))
271                 complete(&info->completion);
272 }
273
274 static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
275 static void flow_cache_flush_per_cpu(void *data)
276 {
277         struct flow_flush_info *info = data;
278         int cpu;
279         struct tasklet_struct *tasklet;
280
281         cpu = smp_processor_id();
282
283         tasklet = flow_flush_tasklet(cpu);
284         tasklet->data = (unsigned long)info;
285         tasklet_schedule(tasklet);
286 }
287
288 void flow_cache_flush(void)
289 {
290         struct flow_flush_info info;
291         static DEFINE_MUTEX(flow_flush_sem);
292
293         /* Don't want cpus going down or up during this. */
294         lock_cpu_hotplug();
295         mutex_lock(&flow_flush_sem);
296         atomic_set(&info.cpuleft, num_online_cpus());
297         init_completion(&info.completion);
298
299         local_bh_disable();
300         smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
301         flow_cache_flush_tasklet((unsigned long)&info);
302         local_bh_enable();
303
304         wait_for_completion(&info.completion);
305         mutex_unlock(&flow_flush_sem);
306         unlock_cpu_hotplug();
307 }
308
309 static void __devinit flow_cache_cpu_prepare(int cpu)
310 {
311         struct tasklet_struct *tasklet;
312         unsigned long order;
313
314         for (order = 0;
315              (PAGE_SIZE << order) <
316                      (sizeof(struct flow_cache_entry *)*flow_hash_size);
317              order++)
318                 /* NOTHING */;
319
320         flow_table(cpu) = (struct flow_cache_entry **)
321                 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
322         if (!flow_table(cpu))
323                 panic("NET: failed to allocate flow cache order %lu\n", order);
324
325         flow_hash_rnd_recalc(cpu) = 1;
326         flow_count(cpu) = 0;
327
328         tasklet = flow_flush_tasklet(cpu);
329         tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
330 }
331
332 #ifdef CONFIG_HOTPLUG_CPU
333 static int flow_cache_cpu(struct notifier_block *nfb,
334                           unsigned long action,
335                           void *hcpu)
336 {
337         if (action == CPU_DEAD)
338                 __flow_cache_shrink((unsigned long)hcpu, 0);
339         return NOTIFY_OK;
340 }
341 #endif /* CONFIG_HOTPLUG_CPU */
342
343 static int __init flow_cache_init(void)
344 {
345         int i;
346
347         flow_cachep = kmem_cache_create("flow_cache",
348                                         sizeof(struct flow_cache_entry),
349                                         0, SLAB_HWCACHE_ALIGN,
350                                         NULL, NULL);
351
352         if (!flow_cachep)
353                 panic("NET: failed to allocate flow cache slab\n");
354
355         flow_hash_shift = 10;
356         flow_lwm = 2 * flow_hash_size;
357         flow_hwm = 4 * flow_hash_size;
358
359         init_timer(&flow_hash_rnd_timer);
360         flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
361         flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
362         add_timer(&flow_hash_rnd_timer);
363
364         for_each_cpu(i)
365                 flow_cache_cpu_prepare(i);
366
367         hotcpu_notifier(flow_cache_cpu, 0);
368         return 0;
369 }
370
371 module_init(flow_cache_init);
372
373 EXPORT_SYMBOL(flow_cache_genid);
374 EXPORT_SYMBOL(flow_cache_lookup);