mempolicy: rename mpol_free to mpol_put
[linux-2.6.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/mnt_namespace.h>
21 #include <linux/personality.h>
22 #include <linux/mempolicy.h>
23 #include <linux/sem.h>
24 #include <linux/file.h>
25 #include <linux/key.h>
26 #include <linux/binfmts.h>
27 #include <linux/mman.h>
28 #include <linux/fs.h>
29 #include <linux/nsproxy.h>
30 #include <linux/capability.h>
31 #include <linux/cpu.h>
32 #include <linux/cgroup.h>
33 #include <linux/security.h>
34 #include <linux/swap.h>
35 #include <linux/syscalls.h>
36 #include <linux/jiffies.h>
37 #include <linux/futex.h>
38 #include <linux/task_io_accounting_ops.h>
39 #include <linux/rcupdate.h>
40 #include <linux/ptrace.h>
41 #include <linux/mount.h>
42 #include <linux/audit.h>
43 #include <linux/memcontrol.h>
44 #include <linux/profile.h>
45 #include <linux/rmap.h>
46 #include <linux/acct.h>
47 #include <linux/tsacct_kern.h>
48 #include <linux/cn_proc.h>
49 #include <linux/freezer.h>
50 #include <linux/delayacct.h>
51 #include <linux/taskstats_kern.h>
52 #include <linux/random.h>
53 #include <linux/tty.h>
54 #include <linux/proc_fs.h>
55 #include <linux/blkdev.h>
56
57 #include <asm/pgtable.h>
58 #include <asm/pgalloc.h>
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
61 #include <asm/cacheflush.h>
62 #include <asm/tlbflush.h>
63
64 /*
65  * Protected counters by write_lock_irq(&tasklist_lock)
66  */
67 unsigned long total_forks;      /* Handle normal Linux uptimes. */
68 int nr_threads;                 /* The idle threads do not count.. */
69
70 int max_threads;                /* tunable limit on nr_threads */
71
72 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
73
74 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
75
76 int nr_processes(void)
77 {
78         int cpu;
79         int total = 0;
80
81         for_each_online_cpu(cpu)
82                 total += per_cpu(process_counts, cpu);
83
84         return total;
85 }
86
87 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
88 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
89 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
90 static struct kmem_cache *task_struct_cachep;
91 #endif
92
93 /* SLAB cache for signal_struct structures (tsk->signal) */
94 static struct kmem_cache *signal_cachep;
95
96 /* SLAB cache for sighand_struct structures (tsk->sighand) */
97 struct kmem_cache *sighand_cachep;
98
99 /* SLAB cache for files_struct structures (tsk->files) */
100 struct kmem_cache *files_cachep;
101
102 /* SLAB cache for fs_struct structures (tsk->fs) */
103 struct kmem_cache *fs_cachep;
104
105 /* SLAB cache for vm_area_struct structures */
106 struct kmem_cache *vm_area_cachep;
107
108 /* SLAB cache for mm_struct structures (tsk->mm) */
109 static struct kmem_cache *mm_cachep;
110
111 void free_task(struct task_struct *tsk)
112 {
113         prop_local_destroy_single(&tsk->dirties);
114         free_thread_info(tsk->stack);
115         rt_mutex_debug_task_free(tsk);
116         free_task_struct(tsk);
117 }
118 EXPORT_SYMBOL(free_task);
119
120 void __put_task_struct(struct task_struct *tsk)
121 {
122         WARN_ON(!tsk->exit_state);
123         WARN_ON(atomic_read(&tsk->usage));
124         WARN_ON(tsk == current);
125
126         security_task_free(tsk);
127         free_uid(tsk->user);
128         put_group_info(tsk->group_info);
129         delayacct_tsk_free(tsk);
130
131         if (!profile_handoff_task(tsk))
132                 free_task(tsk);
133 }
134
135 /*
136  * macro override instead of weak attribute alias, to workaround
137  * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
138  */
139 #ifndef arch_task_cache_init
140 #define arch_task_cache_init()
141 #endif
142
143 void __init fork_init(unsigned long mempages)
144 {
145 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
146 #ifndef ARCH_MIN_TASKALIGN
147 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
148 #endif
149         /* create a slab on which task_structs can be allocated */
150         task_struct_cachep =
151                 kmem_cache_create("task_struct", sizeof(struct task_struct),
152                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
153 #endif
154
155         /* do the arch specific task caches init */
156         arch_task_cache_init();
157
158         /*
159          * The default maximum number of threads is set to a safe
160          * value: the thread structures can take up at most half
161          * of memory.
162          */
163         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
164
165         /*
166          * we need to allow at least 20 threads to boot a system
167          */
168         if(max_threads < 20)
169                 max_threads = 20;
170
171         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
172         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
173         init_task.signal->rlim[RLIMIT_SIGPENDING] =
174                 init_task.signal->rlim[RLIMIT_NPROC];
175 }
176
177 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
178                                                struct task_struct *src)
179 {
180         *dst = *src;
181         return 0;
182 }
183
184 static struct task_struct *dup_task_struct(struct task_struct *orig)
185 {
186         struct task_struct *tsk;
187         struct thread_info *ti;
188         int err;
189
190         prepare_to_copy(orig);
191
192         tsk = alloc_task_struct();
193         if (!tsk)
194                 return NULL;
195
196         ti = alloc_thread_info(tsk);
197         if (!ti) {
198                 free_task_struct(tsk);
199                 return NULL;
200         }
201
202         err = arch_dup_task_struct(tsk, orig);
203         if (err)
204                 goto out;
205
206         tsk->stack = ti;
207
208         err = prop_local_init_single(&tsk->dirties);
209         if (err)
210                 goto out;
211
212         setup_thread_stack(tsk, orig);
213
214 #ifdef CONFIG_CC_STACKPROTECTOR
215         tsk->stack_canary = get_random_int();
216 #endif
217
218         /* One for us, one for whoever does the "release_task()" (usually parent) */
219         atomic_set(&tsk->usage,2);
220         atomic_set(&tsk->fs_excl, 0);
221 #ifdef CONFIG_BLK_DEV_IO_TRACE
222         tsk->btrace_seq = 0;
223 #endif
224         tsk->splice_pipe = NULL;
225         return tsk;
226
227 out:
228         free_thread_info(ti);
229         free_task_struct(tsk);
230         return NULL;
231 }
232
233 #ifdef CONFIG_MMU
234 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
235 {
236         struct vm_area_struct *mpnt, *tmp, **pprev;
237         struct rb_node **rb_link, *rb_parent;
238         int retval;
239         unsigned long charge;
240         struct mempolicy *pol;
241
242         down_write(&oldmm->mmap_sem);
243         flush_cache_dup_mm(oldmm);
244         /*
245          * Not linked in yet - no deadlock potential:
246          */
247         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
248
249         mm->locked_vm = 0;
250         mm->mmap = NULL;
251         mm->mmap_cache = NULL;
252         mm->free_area_cache = oldmm->mmap_base;
253         mm->cached_hole_size = ~0UL;
254         mm->map_count = 0;
255         cpus_clear(mm->cpu_vm_mask);
256         mm->mm_rb = RB_ROOT;
257         rb_link = &mm->mm_rb.rb_node;
258         rb_parent = NULL;
259         pprev = &mm->mmap;
260
261         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
262                 struct file *file;
263
264                 if (mpnt->vm_flags & VM_DONTCOPY) {
265                         long pages = vma_pages(mpnt);
266                         mm->total_vm -= pages;
267                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
268                                                                 -pages);
269                         continue;
270                 }
271                 charge = 0;
272                 if (mpnt->vm_flags & VM_ACCOUNT) {
273                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
274                         if (security_vm_enough_memory(len))
275                                 goto fail_nomem;
276                         charge = len;
277                 }
278                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
279                 if (!tmp)
280                         goto fail_nomem;
281                 *tmp = *mpnt;
282                 pol = mpol_copy(vma_policy(mpnt));
283                 retval = PTR_ERR(pol);
284                 if (IS_ERR(pol))
285                         goto fail_nomem_policy;
286                 vma_set_policy(tmp, pol);
287                 tmp->vm_flags &= ~VM_LOCKED;
288                 tmp->vm_mm = mm;
289                 tmp->vm_next = NULL;
290                 anon_vma_link(tmp);
291                 file = tmp->vm_file;
292                 if (file) {
293                         struct inode *inode = file->f_path.dentry->d_inode;
294                         get_file(file);
295                         if (tmp->vm_flags & VM_DENYWRITE)
296                                 atomic_dec(&inode->i_writecount);
297
298                         /* insert tmp into the share list, just after mpnt */
299                         spin_lock(&file->f_mapping->i_mmap_lock);
300                         tmp->vm_truncate_count = mpnt->vm_truncate_count;
301                         flush_dcache_mmap_lock(file->f_mapping);
302                         vma_prio_tree_add(tmp, mpnt);
303                         flush_dcache_mmap_unlock(file->f_mapping);
304                         spin_unlock(&file->f_mapping->i_mmap_lock);
305                 }
306
307                 /*
308                  * Link in the new vma and copy the page table entries.
309                  */
310                 *pprev = tmp;
311                 pprev = &tmp->vm_next;
312
313                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
314                 rb_link = &tmp->vm_rb.rb_right;
315                 rb_parent = &tmp->vm_rb;
316
317                 mm->map_count++;
318                 retval = copy_page_range(mm, oldmm, mpnt);
319
320                 if (tmp->vm_ops && tmp->vm_ops->open)
321                         tmp->vm_ops->open(tmp);
322
323                 if (retval)
324                         goto out;
325         }
326         /* a new mm has just been created */
327         arch_dup_mmap(oldmm, mm);
328         retval = 0;
329 out:
330         up_write(&mm->mmap_sem);
331         flush_tlb_mm(oldmm);
332         up_write(&oldmm->mmap_sem);
333         return retval;
334 fail_nomem_policy:
335         kmem_cache_free(vm_area_cachep, tmp);
336 fail_nomem:
337         retval = -ENOMEM;
338         vm_unacct_memory(charge);
339         goto out;
340 }
341
342 static inline int mm_alloc_pgd(struct mm_struct * mm)
343 {
344         mm->pgd = pgd_alloc(mm);
345         if (unlikely(!mm->pgd))
346                 return -ENOMEM;
347         return 0;
348 }
349
350 static inline void mm_free_pgd(struct mm_struct * mm)
351 {
352         pgd_free(mm, mm->pgd);
353 }
354 #else
355 #define dup_mmap(mm, oldmm)     (0)
356 #define mm_alloc_pgd(mm)        (0)
357 #define mm_free_pgd(mm)
358 #endif /* CONFIG_MMU */
359
360 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
361
362 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
363 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
364
365 #include <linux/init_task.h>
366
367 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
368 {
369         atomic_set(&mm->mm_users, 1);
370         atomic_set(&mm->mm_count, 1);
371         init_rwsem(&mm->mmap_sem);
372         INIT_LIST_HEAD(&mm->mmlist);
373         mm->flags = (current->mm) ? current->mm->flags
374                                   : MMF_DUMP_FILTER_DEFAULT;
375         mm->core_waiters = 0;
376         mm->nr_ptes = 0;
377         set_mm_counter(mm, file_rss, 0);
378         set_mm_counter(mm, anon_rss, 0);
379         spin_lock_init(&mm->page_table_lock);
380         rwlock_init(&mm->ioctx_list_lock);
381         mm->ioctx_list = NULL;
382         mm->free_area_cache = TASK_UNMAPPED_BASE;
383         mm->cached_hole_size = ~0UL;
384         mm_init_cgroup(mm, p);
385
386         if (likely(!mm_alloc_pgd(mm))) {
387                 mm->def_flags = 0;
388                 return mm;
389         }
390
391         mm_free_cgroup(mm);
392         free_mm(mm);
393         return NULL;
394 }
395
396 /*
397  * Allocate and initialize an mm_struct.
398  */
399 struct mm_struct * mm_alloc(void)
400 {
401         struct mm_struct * mm;
402
403         mm = allocate_mm();
404         if (mm) {
405                 memset(mm, 0, sizeof(*mm));
406                 mm = mm_init(mm, current);
407         }
408         return mm;
409 }
410
411 /*
412  * Called when the last reference to the mm
413  * is dropped: either by a lazy thread or by
414  * mmput. Free the page directory and the mm.
415  */
416 void __mmdrop(struct mm_struct *mm)
417 {
418         BUG_ON(mm == &init_mm);
419         mm_free_pgd(mm);
420         destroy_context(mm);
421         free_mm(mm);
422 }
423 EXPORT_SYMBOL_GPL(__mmdrop);
424
425 /*
426  * Decrement the use count and release all resources for an mm.
427  */
428 void mmput(struct mm_struct *mm)
429 {
430         might_sleep();
431
432         if (atomic_dec_and_test(&mm->mm_users)) {
433                 exit_aio(mm);
434                 exit_mmap(mm);
435                 if (!list_empty(&mm->mmlist)) {
436                         spin_lock(&mmlist_lock);
437                         list_del(&mm->mmlist);
438                         spin_unlock(&mmlist_lock);
439                 }
440                 put_swap_token(mm);
441                 mm_free_cgroup(mm);
442                 mmdrop(mm);
443         }
444 }
445 EXPORT_SYMBOL_GPL(mmput);
446
447 /**
448  * get_task_mm - acquire a reference to the task's mm
449  *
450  * Returns %NULL if the task has no mm.  Checks PF_BORROWED_MM (meaning
451  * this kernel workthread has transiently adopted a user mm with use_mm,
452  * to do its AIO) is not set and if so returns a reference to it, after
453  * bumping up the use count.  User must release the mm via mmput()
454  * after use.  Typically used by /proc and ptrace.
455  */
456 struct mm_struct *get_task_mm(struct task_struct *task)
457 {
458         struct mm_struct *mm;
459
460         task_lock(task);
461         mm = task->mm;
462         if (mm) {
463                 if (task->flags & PF_BORROWED_MM)
464                         mm = NULL;
465                 else
466                         atomic_inc(&mm->mm_users);
467         }
468         task_unlock(task);
469         return mm;
470 }
471 EXPORT_SYMBOL_GPL(get_task_mm);
472
473 /* Please note the differences between mmput and mm_release.
474  * mmput is called whenever we stop holding onto a mm_struct,
475  * error success whatever.
476  *
477  * mm_release is called after a mm_struct has been removed
478  * from the current process.
479  *
480  * This difference is important for error handling, when we
481  * only half set up a mm_struct for a new process and need to restore
482  * the old one.  Because we mmput the new mm_struct before
483  * restoring the old one. . .
484  * Eric Biederman 10 January 1998
485  */
486 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
487 {
488         struct completion *vfork_done = tsk->vfork_done;
489
490         /* Get rid of any cached register state */
491         deactivate_mm(tsk, mm);
492
493         /* notify parent sleeping on vfork() */
494         if (vfork_done) {
495                 tsk->vfork_done = NULL;
496                 complete(vfork_done);
497         }
498
499         /*
500          * If we're exiting normally, clear a user-space tid field if
501          * requested.  We leave this alone when dying by signal, to leave
502          * the value intact in a core dump, and to save the unnecessary
503          * trouble otherwise.  Userland only wants this done for a sys_exit.
504          */
505         if (tsk->clear_child_tid
506             && !(tsk->flags & PF_SIGNALED)
507             && atomic_read(&mm->mm_users) > 1) {
508                 u32 __user * tidptr = tsk->clear_child_tid;
509                 tsk->clear_child_tid = NULL;
510
511                 /*
512                  * We don't check the error code - if userspace has
513                  * not set up a proper pointer then tough luck.
514                  */
515                 put_user(0, tidptr);
516                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
517         }
518 }
519
520 /*
521  * Allocate a new mm structure and copy contents from the
522  * mm structure of the passed in task structure.
523  */
524 struct mm_struct *dup_mm(struct task_struct *tsk)
525 {
526         struct mm_struct *mm, *oldmm = current->mm;
527         int err;
528
529         if (!oldmm)
530                 return NULL;
531
532         mm = allocate_mm();
533         if (!mm)
534                 goto fail_nomem;
535
536         memcpy(mm, oldmm, sizeof(*mm));
537
538         /* Initializing for Swap token stuff */
539         mm->token_priority = 0;
540         mm->last_interval = 0;
541
542         if (!mm_init(mm, tsk))
543                 goto fail_nomem;
544
545         if (init_new_context(tsk, mm))
546                 goto fail_nocontext;
547
548         err = dup_mmap(mm, oldmm);
549         if (err)
550                 goto free_pt;
551
552         mm->hiwater_rss = get_mm_rss(mm);
553         mm->hiwater_vm = mm->total_vm;
554
555         return mm;
556
557 free_pt:
558         mmput(mm);
559
560 fail_nomem:
561         return NULL;
562
563 fail_nocontext:
564         /*
565          * If init_new_context() failed, we cannot use mmput() to free the mm
566          * because it calls destroy_context()
567          */
568         mm_free_pgd(mm);
569         free_mm(mm);
570         return NULL;
571 }
572
573 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
574 {
575         struct mm_struct * mm, *oldmm;
576         int retval;
577
578         tsk->min_flt = tsk->maj_flt = 0;
579         tsk->nvcsw = tsk->nivcsw = 0;
580
581         tsk->mm = NULL;
582         tsk->active_mm = NULL;
583
584         /*
585          * Are we cloning a kernel thread?
586          *
587          * We need to steal a active VM for that..
588          */
589         oldmm = current->mm;
590         if (!oldmm)
591                 return 0;
592
593         if (clone_flags & CLONE_VM) {
594                 atomic_inc(&oldmm->mm_users);
595                 mm = oldmm;
596                 goto good_mm;
597         }
598
599         retval = -ENOMEM;
600         mm = dup_mm(tsk);
601         if (!mm)
602                 goto fail_nomem;
603
604 good_mm:
605         /* Initializing for Swap token stuff */
606         mm->token_priority = 0;
607         mm->last_interval = 0;
608
609         tsk->mm = mm;
610         tsk->active_mm = mm;
611         return 0;
612
613 fail_nomem:
614         return retval;
615 }
616
617 static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
618 {
619         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
620         /* We don't need to lock fs - think why ;-) */
621         if (fs) {
622                 atomic_set(&fs->count, 1);
623                 rwlock_init(&fs->lock);
624                 fs->umask = old->umask;
625                 read_lock(&old->lock);
626                 fs->root = old->root;
627                 path_get(&old->root);
628                 fs->pwd = old->pwd;
629                 path_get(&old->pwd);
630                 if (old->altroot.dentry) {
631                         fs->altroot = old->altroot;
632                         path_get(&old->altroot);
633                 } else {
634                         fs->altroot.mnt = NULL;
635                         fs->altroot.dentry = NULL;
636                 }
637                 read_unlock(&old->lock);
638         }
639         return fs;
640 }
641
642 struct fs_struct *copy_fs_struct(struct fs_struct *old)
643 {
644         return __copy_fs_struct(old);
645 }
646
647 EXPORT_SYMBOL_GPL(copy_fs_struct);
648
649 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
650 {
651         if (clone_flags & CLONE_FS) {
652                 atomic_inc(&current->fs->count);
653                 return 0;
654         }
655         tsk->fs = __copy_fs_struct(current->fs);
656         if (!tsk->fs)
657                 return -ENOMEM;
658         return 0;
659 }
660
661 static int count_open_files(struct fdtable *fdt)
662 {
663         int size = fdt->max_fds;
664         int i;
665
666         /* Find the last open fd */
667         for (i = size/(8*sizeof(long)); i > 0; ) {
668                 if (fdt->open_fds->fds_bits[--i])
669                         break;
670         }
671         i = (i+1) * 8 * sizeof(long);
672         return i;
673 }
674
675 static struct files_struct *alloc_files(void)
676 {
677         struct files_struct *newf;
678         struct fdtable *fdt;
679
680         newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
681         if (!newf)
682                 goto out;
683
684         atomic_set(&newf->count, 1);
685
686         spin_lock_init(&newf->file_lock);
687         newf->next_fd = 0;
688         fdt = &newf->fdtab;
689         fdt->max_fds = NR_OPEN_DEFAULT;
690         fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
691         fdt->open_fds = (fd_set *)&newf->open_fds_init;
692         fdt->fd = &newf->fd_array[0];
693         INIT_RCU_HEAD(&fdt->rcu);
694         fdt->next = NULL;
695         rcu_assign_pointer(newf->fdt, fdt);
696 out:
697         return newf;
698 }
699
700 /*
701  * Allocate a new files structure and copy contents from the
702  * passed in files structure.
703  * errorp will be valid only when the returned files_struct is NULL.
704  */
705 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
706 {
707         struct files_struct *newf;
708         struct file **old_fds, **new_fds;
709         int open_files, size, i;
710         struct fdtable *old_fdt, *new_fdt;
711
712         *errorp = -ENOMEM;
713         newf = alloc_files();
714         if (!newf)
715                 goto out;
716
717         spin_lock(&oldf->file_lock);
718         old_fdt = files_fdtable(oldf);
719         new_fdt = files_fdtable(newf);
720         open_files = count_open_files(old_fdt);
721
722         /*
723          * Check whether we need to allocate a larger fd array and fd set.
724          * Note: we're not a clone task, so the open count won't change.
725          */
726         if (open_files > new_fdt->max_fds) {
727                 new_fdt->max_fds = 0;
728                 spin_unlock(&oldf->file_lock);
729                 spin_lock(&newf->file_lock);
730                 *errorp = expand_files(newf, open_files-1);
731                 spin_unlock(&newf->file_lock);
732                 if (*errorp < 0)
733                         goto out_release;
734                 new_fdt = files_fdtable(newf);
735                 /*
736                  * Reacquire the oldf lock and a pointer to its fd table
737                  * who knows it may have a new bigger fd table. We need
738                  * the latest pointer.
739                  */
740                 spin_lock(&oldf->file_lock);
741                 old_fdt = files_fdtable(oldf);
742         }
743
744         old_fds = old_fdt->fd;
745         new_fds = new_fdt->fd;
746
747         memcpy(new_fdt->open_fds->fds_bits,
748                 old_fdt->open_fds->fds_bits, open_files/8);
749         memcpy(new_fdt->close_on_exec->fds_bits,
750                 old_fdt->close_on_exec->fds_bits, open_files/8);
751
752         for (i = open_files; i != 0; i--) {
753                 struct file *f = *old_fds++;
754                 if (f) {
755                         get_file(f);
756                 } else {
757                         /*
758                          * The fd may be claimed in the fd bitmap but not yet
759                          * instantiated in the files array if a sibling thread
760                          * is partway through open().  So make sure that this
761                          * fd is available to the new process.
762                          */
763                         FD_CLR(open_files - i, new_fdt->open_fds);
764                 }
765                 rcu_assign_pointer(*new_fds++, f);
766         }
767         spin_unlock(&oldf->file_lock);
768
769         /* compute the remainder to be cleared */
770         size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
771
772         /* This is long word aligned thus could use a optimized version */
773         memset(new_fds, 0, size);
774
775         if (new_fdt->max_fds > open_files) {
776                 int left = (new_fdt->max_fds-open_files)/8;
777                 int start = open_files / (8 * sizeof(unsigned long));
778
779                 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
780                 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
781         }
782
783         return newf;
784
785 out_release:
786         kmem_cache_free(files_cachep, newf);
787 out:
788         return NULL;
789 }
790
791 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
792 {
793         struct files_struct *oldf, *newf;
794         int error = 0;
795
796         /*
797          * A background process may not have any files ...
798          */
799         oldf = current->files;
800         if (!oldf)
801                 goto out;
802
803         if (clone_flags & CLONE_FILES) {
804                 atomic_inc(&oldf->count);
805                 goto out;
806         }
807
808         newf = dup_fd(oldf, &error);
809         if (!newf)
810                 goto out;
811
812         tsk->files = newf;
813         error = 0;
814 out:
815         return error;
816 }
817
818 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
819 {
820 #ifdef CONFIG_BLOCK
821         struct io_context *ioc = current->io_context;
822
823         if (!ioc)
824                 return 0;
825         /*
826          * Share io context with parent, if CLONE_IO is set
827          */
828         if (clone_flags & CLONE_IO) {
829                 tsk->io_context = ioc_task_link(ioc);
830                 if (unlikely(!tsk->io_context))
831                         return -ENOMEM;
832         } else if (ioprio_valid(ioc->ioprio)) {
833                 tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
834                 if (unlikely(!tsk->io_context))
835                         return -ENOMEM;
836
837                 tsk->io_context->ioprio = ioc->ioprio;
838         }
839 #endif
840         return 0;
841 }
842
843 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
844 {
845         struct sighand_struct *sig;
846
847         if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
848                 atomic_inc(&current->sighand->count);
849                 return 0;
850         }
851         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
852         rcu_assign_pointer(tsk->sighand, sig);
853         if (!sig)
854                 return -ENOMEM;
855         atomic_set(&sig->count, 1);
856         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
857         return 0;
858 }
859
860 void __cleanup_sighand(struct sighand_struct *sighand)
861 {
862         if (atomic_dec_and_test(&sighand->count))
863                 kmem_cache_free(sighand_cachep, sighand);
864 }
865
866 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
867 {
868         struct signal_struct *sig;
869         int ret;
870
871         if (clone_flags & CLONE_THREAD) {
872                 atomic_inc(&current->signal->count);
873                 atomic_inc(&current->signal->live);
874                 return 0;
875         }
876         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
877         tsk->signal = sig;
878         if (!sig)
879                 return -ENOMEM;
880
881         ret = copy_thread_group_keys(tsk);
882         if (ret < 0) {
883                 kmem_cache_free(signal_cachep, sig);
884                 return ret;
885         }
886
887         atomic_set(&sig->count, 1);
888         atomic_set(&sig->live, 1);
889         init_waitqueue_head(&sig->wait_chldexit);
890         sig->flags = 0;
891         sig->group_exit_code = 0;
892         sig->group_exit_task = NULL;
893         sig->group_stop_count = 0;
894         sig->curr_target = NULL;
895         init_sigpending(&sig->shared_pending);
896         INIT_LIST_HEAD(&sig->posix_timers);
897
898         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
899         sig->it_real_incr.tv64 = 0;
900         sig->real_timer.function = it_real_fn;
901
902         sig->it_virt_expires = cputime_zero;
903         sig->it_virt_incr = cputime_zero;
904         sig->it_prof_expires = cputime_zero;
905         sig->it_prof_incr = cputime_zero;
906
907         sig->leader = 0;        /* session leadership doesn't inherit */
908         sig->tty_old_pgrp = NULL;
909
910         sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
911         sig->gtime = cputime_zero;
912         sig->cgtime = cputime_zero;
913         sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
914         sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
915         sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
916         sig->sum_sched_runtime = 0;
917         INIT_LIST_HEAD(&sig->cpu_timers[0]);
918         INIT_LIST_HEAD(&sig->cpu_timers[1]);
919         INIT_LIST_HEAD(&sig->cpu_timers[2]);
920         taskstats_tgid_init(sig);
921
922         task_lock(current->group_leader);
923         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
924         task_unlock(current->group_leader);
925
926         if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
927                 /*
928                  * New sole thread in the process gets an expiry time
929                  * of the whole CPU time limit.
930                  */
931                 tsk->it_prof_expires =
932                         secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
933         }
934         acct_init_pacct(&sig->pacct);
935
936         tty_audit_fork(sig);
937
938         return 0;
939 }
940
941 void __cleanup_signal(struct signal_struct *sig)
942 {
943         exit_thread_group_keys(sig);
944         kmem_cache_free(signal_cachep, sig);
945 }
946
947 static void cleanup_signal(struct task_struct *tsk)
948 {
949         struct signal_struct *sig = tsk->signal;
950
951         atomic_dec(&sig->live);
952
953         if (atomic_dec_and_test(&sig->count))
954                 __cleanup_signal(sig);
955 }
956
957 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
958 {
959         unsigned long new_flags = p->flags;
960
961         new_flags &= ~PF_SUPERPRIV;
962         new_flags |= PF_FORKNOEXEC;
963         if (!(clone_flags & CLONE_PTRACE))
964                 p->ptrace = 0;
965         p->flags = new_flags;
966         clear_freeze_flag(p);
967 }
968
969 asmlinkage long sys_set_tid_address(int __user *tidptr)
970 {
971         current->clear_child_tid = tidptr;
972
973         return task_pid_vnr(current);
974 }
975
976 static void rt_mutex_init_task(struct task_struct *p)
977 {
978         spin_lock_init(&p->pi_lock);
979 #ifdef CONFIG_RT_MUTEXES
980         plist_head_init(&p->pi_waiters, &p->pi_lock);
981         p->pi_blocked_on = NULL;
982 #endif
983 }
984
985 /*
986  * This creates a new process as a copy of the old one,
987  * but does not actually start it yet.
988  *
989  * It copies the registers, and all the appropriate
990  * parts of the process environment (as per the clone
991  * flags). The actual kick-off is left to the caller.
992  */
993 static struct task_struct *copy_process(unsigned long clone_flags,
994                                         unsigned long stack_start,
995                                         struct pt_regs *regs,
996                                         unsigned long stack_size,
997                                         int __user *child_tidptr,
998                                         struct pid *pid)
999 {
1000         int retval;
1001         struct task_struct *p;
1002         int cgroup_callbacks_done = 0;
1003
1004         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1005                 return ERR_PTR(-EINVAL);
1006
1007         /*
1008          * Thread groups must share signals as well, and detached threads
1009          * can only be started up within the thread group.
1010          */
1011         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1012                 return ERR_PTR(-EINVAL);
1013
1014         /*
1015          * Shared signal handlers imply shared VM. By way of the above,
1016          * thread groups also imply shared VM. Blocking this case allows
1017          * for various simplifications in other code.
1018          */
1019         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1020                 return ERR_PTR(-EINVAL);
1021
1022         retval = security_task_create(clone_flags);
1023         if (retval)
1024                 goto fork_out;
1025
1026         retval = -ENOMEM;
1027         p = dup_task_struct(current);
1028         if (!p)
1029                 goto fork_out;
1030
1031         rt_mutex_init_task(p);
1032
1033 #ifdef CONFIG_TRACE_IRQFLAGS
1034         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1035         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1036 #endif
1037         retval = -EAGAIN;
1038         if (atomic_read(&p->user->processes) >=
1039                         p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
1040                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1041                     p->user != current->nsproxy->user_ns->root_user)
1042                         goto bad_fork_free;
1043         }
1044
1045         atomic_inc(&p->user->__count);
1046         atomic_inc(&p->user->processes);
1047         get_group_info(p->group_info);
1048
1049         /*
1050          * If multiple threads are within copy_process(), then this check
1051          * triggers too late. This doesn't hurt, the check is only there
1052          * to stop root fork bombs.
1053          */
1054         if (nr_threads >= max_threads)
1055                 goto bad_fork_cleanup_count;
1056
1057         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1058                 goto bad_fork_cleanup_count;
1059
1060         if (p->binfmt && !try_module_get(p->binfmt->module))
1061                 goto bad_fork_cleanup_put_domain;
1062
1063         p->did_exec = 0;
1064         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1065         copy_flags(clone_flags, p);
1066         INIT_LIST_HEAD(&p->children);
1067         INIT_LIST_HEAD(&p->sibling);
1068 #ifdef CONFIG_PREEMPT_RCU
1069         p->rcu_read_lock_nesting = 0;
1070         p->rcu_flipctr_idx = 0;
1071 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1072         p->vfork_done = NULL;
1073         spin_lock_init(&p->alloc_lock);
1074
1075         clear_tsk_thread_flag(p, TIF_SIGPENDING);
1076         init_sigpending(&p->pending);
1077
1078         p->utime = cputime_zero;
1079         p->stime = cputime_zero;
1080         p->gtime = cputime_zero;
1081         p->utimescaled = cputime_zero;
1082         p->stimescaled = cputime_zero;
1083         p->prev_utime = cputime_zero;
1084         p->prev_stime = cputime_zero;
1085
1086 #ifdef CONFIG_DETECT_SOFTLOCKUP
1087         p->last_switch_count = 0;
1088         p->last_switch_timestamp = 0;
1089 #endif
1090
1091 #ifdef CONFIG_TASK_XACCT
1092         p->rchar = 0;           /* I/O counter: bytes read */
1093         p->wchar = 0;           /* I/O counter: bytes written */
1094         p->syscr = 0;           /* I/O counter: read syscalls */
1095         p->syscw = 0;           /* I/O counter: write syscalls */
1096 #endif
1097         task_io_accounting_init(p);
1098         acct_clear_integrals(p);
1099
1100         p->it_virt_expires = cputime_zero;
1101         p->it_prof_expires = cputime_zero;
1102         p->it_sched_expires = 0;
1103         INIT_LIST_HEAD(&p->cpu_timers[0]);
1104         INIT_LIST_HEAD(&p->cpu_timers[1]);
1105         INIT_LIST_HEAD(&p->cpu_timers[2]);
1106
1107         p->lock_depth = -1;             /* -1 = no lock */
1108         do_posix_clock_monotonic_gettime(&p->start_time);
1109         p->real_start_time = p->start_time;
1110         monotonic_to_bootbased(&p->real_start_time);
1111 #ifdef CONFIG_SECURITY
1112         p->security = NULL;
1113 #endif
1114         p->cap_bset = current->cap_bset;
1115         p->io_context = NULL;
1116         p->audit_context = NULL;
1117         cgroup_fork(p);
1118 #ifdef CONFIG_NUMA
1119         p->mempolicy = mpol_copy(p->mempolicy);
1120         if (IS_ERR(p->mempolicy)) {
1121                 retval = PTR_ERR(p->mempolicy);
1122                 p->mempolicy = NULL;
1123                 goto bad_fork_cleanup_cgroup;
1124         }
1125         mpol_fix_fork_child_flag(p);
1126 #endif
1127 #ifdef CONFIG_TRACE_IRQFLAGS
1128         p->irq_events = 0;
1129 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1130         p->hardirqs_enabled = 1;
1131 #else
1132         p->hardirqs_enabled = 0;
1133 #endif
1134         p->hardirq_enable_ip = 0;
1135         p->hardirq_enable_event = 0;
1136         p->hardirq_disable_ip = _THIS_IP_;
1137         p->hardirq_disable_event = 0;
1138         p->softirqs_enabled = 1;
1139         p->softirq_enable_ip = _THIS_IP_;
1140         p->softirq_enable_event = 0;
1141         p->softirq_disable_ip = 0;
1142         p->softirq_disable_event = 0;
1143         p->hardirq_context = 0;
1144         p->softirq_context = 0;
1145 #endif
1146 #ifdef CONFIG_LOCKDEP
1147         p->lockdep_depth = 0; /* no locks held yet */
1148         p->curr_chain_key = 0;
1149         p->lockdep_recursion = 0;
1150 #endif
1151
1152 #ifdef CONFIG_DEBUG_MUTEXES
1153         p->blocked_on = NULL; /* not blocked yet */
1154 #endif
1155
1156         /* Perform scheduler related setup. Assign this task to a CPU. */
1157         sched_fork(p, clone_flags);
1158
1159         if ((retval = security_task_alloc(p)))
1160                 goto bad_fork_cleanup_policy;
1161         if ((retval = audit_alloc(p)))
1162                 goto bad_fork_cleanup_security;
1163         /* copy all the process information */
1164         if ((retval = copy_semundo(clone_flags, p)))
1165                 goto bad_fork_cleanup_audit;
1166         if ((retval = copy_files(clone_flags, p)))
1167                 goto bad_fork_cleanup_semundo;
1168         if ((retval = copy_fs(clone_flags, p)))
1169                 goto bad_fork_cleanup_files;
1170         if ((retval = copy_sighand(clone_flags, p)))
1171                 goto bad_fork_cleanup_fs;
1172         if ((retval = copy_signal(clone_flags, p)))
1173                 goto bad_fork_cleanup_sighand;
1174         if ((retval = copy_mm(clone_flags, p)))
1175                 goto bad_fork_cleanup_signal;
1176         if ((retval = copy_keys(clone_flags, p)))
1177                 goto bad_fork_cleanup_mm;
1178         if ((retval = copy_namespaces(clone_flags, p)))
1179                 goto bad_fork_cleanup_keys;
1180         if ((retval = copy_io(clone_flags, p)))
1181                 goto bad_fork_cleanup_namespaces;
1182         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1183         if (retval)
1184                 goto bad_fork_cleanup_io;
1185
1186         if (pid != &init_struct_pid) {
1187                 retval = -ENOMEM;
1188                 pid = alloc_pid(task_active_pid_ns(p));
1189                 if (!pid)
1190                         goto bad_fork_cleanup_io;
1191
1192                 if (clone_flags & CLONE_NEWPID) {
1193                         retval = pid_ns_prepare_proc(task_active_pid_ns(p));
1194                         if (retval < 0)
1195                                 goto bad_fork_free_pid;
1196                 }
1197         }
1198
1199         p->pid = pid_nr(pid);
1200         p->tgid = p->pid;
1201         if (clone_flags & CLONE_THREAD)
1202                 p->tgid = current->tgid;
1203
1204         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1205         /*
1206          * Clear TID on mm_release()?
1207          */
1208         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1209 #ifdef CONFIG_FUTEX
1210         p->robust_list = NULL;
1211 #ifdef CONFIG_COMPAT
1212         p->compat_robust_list = NULL;
1213 #endif
1214         INIT_LIST_HEAD(&p->pi_state_list);
1215         p->pi_state_cache = NULL;
1216 #endif
1217         /*
1218          * sigaltstack should be cleared when sharing the same VM
1219          */
1220         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1221                 p->sas_ss_sp = p->sas_ss_size = 0;
1222
1223         /*
1224          * Syscall tracing should be turned off in the child regardless
1225          * of CLONE_PTRACE.
1226          */
1227         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1228 #ifdef TIF_SYSCALL_EMU
1229         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1230 #endif
1231         clear_all_latency_tracing(p);
1232
1233         /* Our parent execution domain becomes current domain
1234            These must match for thread signalling to apply */
1235         p->parent_exec_id = p->self_exec_id;
1236
1237         /* ok, now we should be set up.. */
1238         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1239         p->pdeath_signal = 0;
1240         p->exit_state = 0;
1241
1242         /*
1243          * Ok, make it visible to the rest of the system.
1244          * We dont wake it up yet.
1245          */
1246         p->group_leader = p;
1247         INIT_LIST_HEAD(&p->thread_group);
1248         INIT_LIST_HEAD(&p->ptrace_children);
1249         INIT_LIST_HEAD(&p->ptrace_list);
1250
1251         /* Now that the task is set up, run cgroup callbacks if
1252          * necessary. We need to run them before the task is visible
1253          * on the tasklist. */
1254         cgroup_fork_callbacks(p);
1255         cgroup_callbacks_done = 1;
1256
1257         /* Need tasklist lock for parent etc handling! */
1258         write_lock_irq(&tasklist_lock);
1259
1260         /*
1261          * The task hasn't been attached yet, so its cpus_allowed mask will
1262          * not be changed, nor will its assigned CPU.
1263          *
1264          * The cpus_allowed mask of the parent may have changed after it was
1265          * copied first time - so re-copy it here, then check the child's CPU
1266          * to ensure it is on a valid CPU (and if not, just force it back to
1267          * parent's CPU). This avoids alot of nasty races.
1268          */
1269         p->cpus_allowed = current->cpus_allowed;
1270         p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
1271         if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1272                         !cpu_online(task_cpu(p))))
1273                 set_task_cpu(p, smp_processor_id());
1274
1275         /* CLONE_PARENT re-uses the old parent */
1276         if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1277                 p->real_parent = current->real_parent;
1278         else
1279                 p->real_parent = current;
1280         p->parent = p->real_parent;
1281
1282         spin_lock(&current->sighand->siglock);
1283
1284         /*
1285          * Process group and session signals need to be delivered to just the
1286          * parent before the fork or both the parent and the child after the
1287          * fork. Restart if a signal comes in before we add the new process to
1288          * it's process group.
1289          * A fatal signal pending means that current will exit, so the new
1290          * thread can't slip out of an OOM kill (or normal SIGKILL).
1291          */
1292         recalc_sigpending();
1293         if (signal_pending(current)) {
1294                 spin_unlock(&current->sighand->siglock);
1295                 write_unlock_irq(&tasklist_lock);
1296                 retval = -ERESTARTNOINTR;
1297                 goto bad_fork_free_pid;
1298         }
1299
1300         if (clone_flags & CLONE_THREAD) {
1301                 p->group_leader = current->group_leader;
1302                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1303
1304                 if (!cputime_eq(current->signal->it_virt_expires,
1305                                 cputime_zero) ||
1306                     !cputime_eq(current->signal->it_prof_expires,
1307                                 cputime_zero) ||
1308                     current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1309                     !list_empty(&current->signal->cpu_timers[0]) ||
1310                     !list_empty(&current->signal->cpu_timers[1]) ||
1311                     !list_empty(&current->signal->cpu_timers[2])) {
1312                         /*
1313                          * Have child wake up on its first tick to check
1314                          * for process CPU timers.
1315                          */
1316                         p->it_prof_expires = jiffies_to_cputime(1);
1317                 }
1318         }
1319
1320         if (likely(p->pid)) {
1321                 add_parent(p);
1322                 if (unlikely(p->ptrace & PT_PTRACED))
1323                         __ptrace_link(p, current->parent);
1324
1325                 if (thread_group_leader(p)) {
1326                         if (clone_flags & CLONE_NEWPID)
1327                                 p->nsproxy->pid_ns->child_reaper = p;
1328
1329                         p->signal->leader_pid = pid;
1330                         p->signal->tty = current->signal->tty;
1331                         set_task_pgrp(p, task_pgrp_nr(current));
1332                         set_task_session(p, task_session_nr(current));
1333                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1334                         attach_pid(p, PIDTYPE_SID, task_session(current));
1335                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1336                         __get_cpu_var(process_counts)++;
1337                 }
1338                 attach_pid(p, PIDTYPE_PID, pid);
1339                 nr_threads++;
1340         }
1341
1342         total_forks++;
1343         spin_unlock(&current->sighand->siglock);
1344         write_unlock_irq(&tasklist_lock);
1345         proc_fork_connector(p);
1346         cgroup_post_fork(p);
1347         return p;
1348
1349 bad_fork_free_pid:
1350         if (pid != &init_struct_pid)
1351                 free_pid(pid);
1352 bad_fork_cleanup_io:
1353         put_io_context(p->io_context);
1354 bad_fork_cleanup_namespaces:
1355         exit_task_namespaces(p);
1356 bad_fork_cleanup_keys:
1357         exit_keys(p);
1358 bad_fork_cleanup_mm:
1359         if (p->mm)
1360                 mmput(p->mm);
1361 bad_fork_cleanup_signal:
1362         cleanup_signal(p);
1363 bad_fork_cleanup_sighand:
1364         __cleanup_sighand(p->sighand);
1365 bad_fork_cleanup_fs:
1366         exit_fs(p); /* blocking */
1367 bad_fork_cleanup_files:
1368         exit_files(p); /* blocking */
1369 bad_fork_cleanup_semundo:
1370         exit_sem(p);
1371 bad_fork_cleanup_audit:
1372         audit_free(p);
1373 bad_fork_cleanup_security:
1374         security_task_free(p);
1375 bad_fork_cleanup_policy:
1376 #ifdef CONFIG_NUMA
1377         mpol_put(p->mempolicy);
1378 bad_fork_cleanup_cgroup:
1379 #endif
1380         cgroup_exit(p, cgroup_callbacks_done);
1381         delayacct_tsk_free(p);
1382         if (p->binfmt)
1383                 module_put(p->binfmt->module);
1384 bad_fork_cleanup_put_domain:
1385         module_put(task_thread_info(p)->exec_domain->module);
1386 bad_fork_cleanup_count:
1387         put_group_info(p->group_info);
1388         atomic_dec(&p->user->processes);
1389         free_uid(p->user);
1390 bad_fork_free:
1391         free_task(p);
1392 fork_out:
1393         return ERR_PTR(retval);
1394 }
1395
1396 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1397 {
1398         memset(regs, 0, sizeof(struct pt_regs));
1399         return regs;
1400 }
1401
1402 struct task_struct * __cpuinit fork_idle(int cpu)
1403 {
1404         struct task_struct *task;
1405         struct pt_regs regs;
1406
1407         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1408                                 &init_struct_pid);
1409         if (!IS_ERR(task))
1410                 init_idle(task, cpu);
1411
1412         return task;
1413 }
1414
1415 static int fork_traceflag(unsigned clone_flags)
1416 {
1417         if (clone_flags & CLONE_UNTRACED)
1418                 return 0;
1419         else if (clone_flags & CLONE_VFORK) {
1420                 if (current->ptrace & PT_TRACE_VFORK)
1421                         return PTRACE_EVENT_VFORK;
1422         } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1423                 if (current->ptrace & PT_TRACE_CLONE)
1424                         return PTRACE_EVENT_CLONE;
1425         } else if (current->ptrace & PT_TRACE_FORK)
1426                 return PTRACE_EVENT_FORK;
1427
1428         return 0;
1429 }
1430
1431 /*
1432  *  Ok, this is the main fork-routine.
1433  *
1434  * It copies the process, and if successful kick-starts
1435  * it and waits for it to finish using the VM if required.
1436  */
1437 long do_fork(unsigned long clone_flags,
1438               unsigned long stack_start,
1439               struct pt_regs *regs,
1440               unsigned long stack_size,
1441               int __user *parent_tidptr,
1442               int __user *child_tidptr)
1443 {
1444         struct task_struct *p;
1445         int trace = 0;
1446         long nr;
1447
1448         /*
1449          * We hope to recycle these flags after 2.6.26
1450          */
1451         if (unlikely(clone_flags & CLONE_STOPPED)) {
1452                 static int __read_mostly count = 100;
1453
1454                 if (count > 0 && printk_ratelimit()) {
1455                         char comm[TASK_COMM_LEN];
1456
1457                         count--;
1458                         printk(KERN_INFO "fork(): process `%s' used deprecated "
1459                                         "clone flags 0x%lx\n",
1460                                 get_task_comm(comm, current),
1461                                 clone_flags & CLONE_STOPPED);
1462                 }
1463         }
1464
1465         if (unlikely(current->ptrace)) {
1466                 trace = fork_traceflag (clone_flags);
1467                 if (trace)
1468                         clone_flags |= CLONE_PTRACE;
1469         }
1470
1471         p = copy_process(clone_flags, stack_start, regs, stack_size,
1472                         child_tidptr, NULL);
1473         /*
1474          * Do this prior waking up the new thread - the thread pointer
1475          * might get invalid after that point, if the thread exits quickly.
1476          */
1477         if (!IS_ERR(p)) {
1478                 struct completion vfork;
1479
1480                 nr = task_pid_vnr(p);
1481
1482                 if (clone_flags & CLONE_PARENT_SETTID)
1483                         put_user(nr, parent_tidptr);
1484
1485                 if (clone_flags & CLONE_VFORK) {
1486                         p->vfork_done = &vfork;
1487                         init_completion(&vfork);
1488                 }
1489
1490                 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1491                         /*
1492                          * We'll start up with an immediate SIGSTOP.
1493                          */
1494                         sigaddset(&p->pending.signal, SIGSTOP);
1495                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1496                 }
1497
1498                 if (!(clone_flags & CLONE_STOPPED))
1499                         wake_up_new_task(p, clone_flags);
1500                 else
1501                         __set_task_state(p, TASK_STOPPED);
1502
1503                 if (unlikely (trace)) {
1504                         current->ptrace_message = nr;
1505                         ptrace_notify ((trace << 8) | SIGTRAP);
1506                 }
1507
1508                 if (clone_flags & CLONE_VFORK) {
1509                         freezer_do_not_count();
1510                         wait_for_completion(&vfork);
1511                         freezer_count();
1512                         if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1513                                 current->ptrace_message = nr;
1514                                 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1515                         }
1516                 }
1517         } else {
1518                 nr = PTR_ERR(p);
1519         }
1520         return nr;
1521 }
1522
1523 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1524 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1525 #endif
1526
1527 static void sighand_ctor(struct kmem_cache *cachep, void *data)
1528 {
1529         struct sighand_struct *sighand = data;
1530
1531         spin_lock_init(&sighand->siglock);
1532         init_waitqueue_head(&sighand->signalfd_wqh);
1533 }
1534
1535 void __init proc_caches_init(void)
1536 {
1537         sighand_cachep = kmem_cache_create("sighand_cache",
1538                         sizeof(struct sighand_struct), 0,
1539                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1540                         sighand_ctor);
1541         signal_cachep = kmem_cache_create("signal_cache",
1542                         sizeof(struct signal_struct), 0,
1543                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1544         files_cachep = kmem_cache_create("files_cache",
1545                         sizeof(struct files_struct), 0,
1546                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1547         fs_cachep = kmem_cache_create("fs_cache",
1548                         sizeof(struct fs_struct), 0,
1549                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1550         vm_area_cachep = kmem_cache_create("vm_area_struct",
1551                         sizeof(struct vm_area_struct), 0,
1552                         SLAB_PANIC, NULL);
1553         mm_cachep = kmem_cache_create("mm_struct",
1554                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1555                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1556 }
1557
1558 /*
1559  * Check constraints on flags passed to the unshare system call and
1560  * force unsharing of additional process context as appropriate.
1561  */
1562 static void check_unshare_flags(unsigned long *flags_ptr)
1563 {
1564         /*
1565          * If unsharing a thread from a thread group, must also
1566          * unshare vm.
1567          */
1568         if (*flags_ptr & CLONE_THREAD)
1569                 *flags_ptr |= CLONE_VM;
1570
1571         /*
1572          * If unsharing vm, must also unshare signal handlers.
1573          */
1574         if (*flags_ptr & CLONE_VM)
1575                 *flags_ptr |= CLONE_SIGHAND;
1576
1577         /*
1578          * If unsharing signal handlers and the task was created
1579          * using CLONE_THREAD, then must unshare the thread
1580          */
1581         if ((*flags_ptr & CLONE_SIGHAND) &&
1582             (atomic_read(&current->signal->count) > 1))
1583                 *flags_ptr |= CLONE_THREAD;
1584
1585         /*
1586          * If unsharing namespace, must also unshare filesystem information.
1587          */
1588         if (*flags_ptr & CLONE_NEWNS)
1589                 *flags_ptr |= CLONE_FS;
1590 }
1591
1592 /*
1593  * Unsharing of tasks created with CLONE_THREAD is not supported yet
1594  */
1595 static int unshare_thread(unsigned long unshare_flags)
1596 {
1597         if (unshare_flags & CLONE_THREAD)
1598                 return -EINVAL;
1599
1600         return 0;
1601 }
1602
1603 /*
1604  * Unshare the filesystem structure if it is being shared
1605  */
1606 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1607 {
1608         struct fs_struct *fs = current->fs;
1609
1610         if ((unshare_flags & CLONE_FS) &&
1611             (fs && atomic_read(&fs->count) > 1)) {
1612                 *new_fsp = __copy_fs_struct(current->fs);
1613                 if (!*new_fsp)
1614                         return -ENOMEM;
1615         }
1616
1617         return 0;
1618 }
1619
1620 /*
1621  * Unsharing of sighand is not supported yet
1622  */
1623 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1624 {
1625         struct sighand_struct *sigh = current->sighand;
1626
1627         if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
1628                 return -EINVAL;
1629         else
1630                 return 0;
1631 }
1632
1633 /*
1634  * Unshare vm if it is being shared
1635  */
1636 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1637 {
1638         struct mm_struct *mm = current->mm;
1639
1640         if ((unshare_flags & CLONE_VM) &&
1641             (mm && atomic_read(&mm->mm_users) > 1)) {
1642                 return -EINVAL;
1643         }
1644
1645         return 0;
1646 }
1647
1648 /*
1649  * Unshare file descriptor table if it is being shared
1650  */
1651 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1652 {
1653         struct files_struct *fd = current->files;
1654         int error = 0;
1655
1656         if ((unshare_flags & CLONE_FILES) &&
1657             (fd && atomic_read(&fd->count) > 1)) {
1658                 *new_fdp = dup_fd(fd, &error);
1659                 if (!*new_fdp)
1660                         return error;
1661         }
1662
1663         return 0;
1664 }
1665
1666 /*
1667  * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1668  * supported yet
1669  */
1670 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1671 {
1672         if (unshare_flags & CLONE_SYSVSEM)
1673                 return -EINVAL;
1674
1675         return 0;
1676 }
1677
1678 /*
1679  * unshare allows a process to 'unshare' part of the process
1680  * context which was originally shared using clone.  copy_*
1681  * functions used by do_fork() cannot be used here directly
1682  * because they modify an inactive task_struct that is being
1683  * constructed. Here we are modifying the current, active,
1684  * task_struct.
1685  */
1686 asmlinkage long sys_unshare(unsigned long unshare_flags)
1687 {
1688         int err = 0;
1689         struct fs_struct *fs, *new_fs = NULL;
1690         struct sighand_struct *new_sigh = NULL;
1691         struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1692         struct files_struct *fd, *new_fd = NULL;
1693         struct sem_undo_list *new_ulist = NULL;
1694         struct nsproxy *new_nsproxy = NULL;
1695
1696         check_unshare_flags(&unshare_flags);
1697
1698         /* Return -EINVAL for all unsupported flags */
1699         err = -EINVAL;
1700         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1701                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1702                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
1703                                 CLONE_NEWNET))
1704                 goto bad_unshare_out;
1705
1706         if ((err = unshare_thread(unshare_flags)))
1707                 goto bad_unshare_out;
1708         if ((err = unshare_fs(unshare_flags, &new_fs)))
1709                 goto bad_unshare_cleanup_thread;
1710         if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1711                 goto bad_unshare_cleanup_fs;
1712         if ((err = unshare_vm(unshare_flags, &new_mm)))
1713                 goto bad_unshare_cleanup_sigh;
1714         if ((err = unshare_fd(unshare_flags, &new_fd)))
1715                 goto bad_unshare_cleanup_vm;
1716         if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1717                 goto bad_unshare_cleanup_fd;
1718         if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1719                         new_fs)))
1720                 goto bad_unshare_cleanup_semundo;
1721
1722         if (new_fs ||  new_mm || new_fd || new_ulist || new_nsproxy) {
1723
1724                 if (new_nsproxy) {
1725                         switch_task_namespaces(current, new_nsproxy);
1726                         new_nsproxy = NULL;
1727                 }
1728
1729                 task_lock(current);
1730
1731                 if (new_fs) {
1732                         fs = current->fs;
1733                         current->fs = new_fs;
1734                         new_fs = fs;
1735                 }
1736
1737                 if (new_mm) {
1738                         mm = current->mm;
1739                         active_mm = current->active_mm;
1740                         current->mm = new_mm;
1741                         current->active_mm = new_mm;
1742                         activate_mm(active_mm, new_mm);
1743                         new_mm = mm;
1744                 }
1745
1746                 if (new_fd) {
1747                         fd = current->files;
1748                         current->files = new_fd;
1749                         new_fd = fd;
1750                 }
1751
1752                 task_unlock(current);
1753         }
1754
1755         if (new_nsproxy)
1756                 put_nsproxy(new_nsproxy);
1757
1758 bad_unshare_cleanup_semundo:
1759 bad_unshare_cleanup_fd:
1760         if (new_fd)
1761                 put_files_struct(new_fd);
1762
1763 bad_unshare_cleanup_vm:
1764         if (new_mm)
1765                 mmput(new_mm);
1766
1767 bad_unshare_cleanup_sigh:
1768         if (new_sigh)
1769                 if (atomic_dec_and_test(&new_sigh->count))
1770                         kmem_cache_free(sighand_cachep, new_sigh);
1771
1772 bad_unshare_cleanup_fs:
1773         if (new_fs)
1774                 put_fs_struct(new_fs);
1775
1776 bad_unshare_cleanup_thread:
1777 bad_unshare_out:
1778         return err;
1779 }
1780
1781 /*
1782  *      Helper to unshare the files of the current task.
1783  *      We don't want to expose copy_files internals to
1784  *      the exec layer of the kernel.
1785  */
1786
1787 int unshare_files(struct files_struct **displaced)
1788 {
1789         struct task_struct *task = current;
1790         struct files_struct *copy = NULL;
1791         int error;
1792
1793         error = unshare_fd(CLONE_FILES, &copy);
1794         if (error || !copy) {
1795                 *displaced = NULL;
1796                 return error;
1797         }
1798         *displaced = task->files;
1799         task_lock(task);
1800         task->files = copy;
1801         task_unlock(task);
1802         return 0;
1803 }