989c7c202b3d831ef1aa9ad024872edd864c08ca
[linux-2.6.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/mnt_namespace.h>
21 #include <linux/personality.h>
22 #include <linux/mempolicy.h>
23 #include <linux/sem.h>
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/iocontext.h>
27 #include <linux/key.h>
28 #include <linux/binfmts.h>
29 #include <linux/mman.h>
30 #include <linux/mmu_notifier.h>
31 #include <linux/fs.h>
32 #include <linux/nsproxy.h>
33 #include <linux/capability.h>
34 #include <linux/cpu.h>
35 #include <linux/cgroup.h>
36 #include <linux/security.h>
37 #include <linux/hugetlb.h>
38 #include <linux/swap.h>
39 #include <linux/syscalls.h>
40 #include <linux/jiffies.h>
41 #include <linux/tracehook.h>
42 #include <linux/futex.h>
43 #include <linux/compat.h>
44 #include <linux/task_io_accounting_ops.h>
45 #include <linux/rcupdate.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/audit.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/profile.h>
52 #include <linux/rmap.h>
53 #include <linux/acct.h>
54 #include <linux/tsacct_kern.h>
55 #include <linux/cn_proc.h>
56 #include <linux/freezer.h>
57 #include <linux/delayacct.h>
58 #include <linux/taskstats_kern.h>
59 #include <linux/random.h>
60 #include <linux/tty.h>
61 #include <linux/proc_fs.h>
62 #include <linux/blkdev.h>
63 #include <linux/fs_struct.h>
64 #include <trace/sched.h>
65 #include <linux/magic.h>
66
67 #include <asm/pgtable.h>
68 #include <asm/pgalloc.h>
69 #include <asm/uaccess.h>
70 #include <asm/mmu_context.h>
71 #include <asm/cacheflush.h>
72 #include <asm/tlbflush.h>
73
74 /*
75  * Protected counters by write_lock_irq(&tasklist_lock)
76  */
77 unsigned long total_forks;      /* Handle normal Linux uptimes. */
78 int nr_threads;                 /* The idle threads do not count.. */
79
80 int max_threads;                /* tunable limit on nr_threads */
81
82 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
83
84 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
85
86 DEFINE_TRACE(sched_process_fork);
87
88 int nr_processes(void)
89 {
90         int cpu;
91         int total = 0;
92
93         for_each_online_cpu(cpu)
94                 total += per_cpu(process_counts, cpu);
95
96         return total;
97 }
98
99 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
100 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
101 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
102 static struct kmem_cache *task_struct_cachep;
103 #endif
104
105 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
106 static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
107 {
108 #ifdef CONFIG_DEBUG_STACK_USAGE
109         gfp_t mask = GFP_KERNEL | __GFP_ZERO;
110 #else
111         gfp_t mask = GFP_KERNEL;
112 #endif
113         return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
114 }
115
116 static inline void free_thread_info(struct thread_info *ti)
117 {
118         free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
119 }
120 #endif
121
122 /* SLAB cache for signal_struct structures (tsk->signal) */
123 static struct kmem_cache *signal_cachep;
124
125 /* SLAB cache for sighand_struct structures (tsk->sighand) */
126 struct kmem_cache *sighand_cachep;
127
128 /* SLAB cache for files_struct structures (tsk->files) */
129 struct kmem_cache *files_cachep;
130
131 /* SLAB cache for fs_struct structures (tsk->fs) */
132 struct kmem_cache *fs_cachep;
133
134 /* SLAB cache for vm_area_struct structures */
135 struct kmem_cache *vm_area_cachep;
136
137 /* SLAB cache for mm_struct structures (tsk->mm) */
138 static struct kmem_cache *mm_cachep;
139
140 void free_task(struct task_struct *tsk)
141 {
142         prop_local_destroy_single(&tsk->dirties);
143         free_thread_info(tsk->stack);
144         rt_mutex_debug_task_free(tsk);
145         ftrace_graph_exit_task(tsk);
146         free_task_struct(tsk);
147 }
148 EXPORT_SYMBOL(free_task);
149
150 void __put_task_struct(struct task_struct *tsk)
151 {
152         WARN_ON(!tsk->exit_state);
153         WARN_ON(atomic_read(&tsk->usage));
154         WARN_ON(tsk == current);
155
156         put_cred(tsk->real_cred);
157         put_cred(tsk->cred);
158         delayacct_tsk_free(tsk);
159
160         if (!profile_handoff_task(tsk))
161                 free_task(tsk);
162 }
163
164 /*
165  * macro override instead of weak attribute alias, to workaround
166  * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
167  */
168 #ifndef arch_task_cache_init
169 #define arch_task_cache_init()
170 #endif
171
172 void __init fork_init(unsigned long mempages)
173 {
174 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
175 #ifndef ARCH_MIN_TASKALIGN
176 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
177 #endif
178         /* create a slab on which task_structs can be allocated */
179         task_struct_cachep =
180                 kmem_cache_create("task_struct", sizeof(struct task_struct),
181                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
182 #endif
183
184         /* do the arch specific task caches init */
185         arch_task_cache_init();
186
187         /*
188          * The default maximum number of threads is set to a safe
189          * value: the thread structures can take up at most half
190          * of memory.
191          */
192         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
193
194         /*
195          * we need to allow at least 20 threads to boot a system
196          */
197         if(max_threads < 20)
198                 max_threads = 20;
199
200         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
201         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
202         init_task.signal->rlim[RLIMIT_SIGPENDING] =
203                 init_task.signal->rlim[RLIMIT_NPROC];
204 }
205
206 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
207                                                struct task_struct *src)
208 {
209         *dst = *src;
210         return 0;
211 }
212
213 static struct task_struct *dup_task_struct(struct task_struct *orig)
214 {
215         struct task_struct *tsk;
216         struct thread_info *ti;
217         unsigned long *stackend;
218
219         int err;
220
221         prepare_to_copy(orig);
222
223         tsk = alloc_task_struct();
224         if (!tsk)
225                 return NULL;
226
227         ti = alloc_thread_info(tsk);
228         if (!ti) {
229                 free_task_struct(tsk);
230                 return NULL;
231         }
232
233         err = arch_dup_task_struct(tsk, orig);
234         if (err)
235                 goto out;
236
237         tsk->stack = ti;
238
239         err = prop_local_init_single(&tsk->dirties);
240         if (err)
241                 goto out;
242
243         setup_thread_stack(tsk, orig);
244         stackend = end_of_stack(tsk);
245         *stackend = STACK_END_MAGIC;    /* for overflow detection */
246
247 #ifdef CONFIG_CC_STACKPROTECTOR
248         tsk->stack_canary = get_random_int();
249 #endif
250
251         /* One for us, one for whoever does the "release_task()" (usually parent) */
252         atomic_set(&tsk->usage,2);
253         atomic_set(&tsk->fs_excl, 0);
254 #ifdef CONFIG_BLK_DEV_IO_TRACE
255         tsk->btrace_seq = 0;
256 #endif
257         tsk->splice_pipe = NULL;
258         return tsk;
259
260 out:
261         free_thread_info(ti);
262         free_task_struct(tsk);
263         return NULL;
264 }
265
266 #ifdef CONFIG_MMU
267 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
268 {
269         struct vm_area_struct *mpnt, *tmp, **pprev;
270         struct rb_node **rb_link, *rb_parent;
271         int retval;
272         unsigned long charge;
273         struct mempolicy *pol;
274
275         down_write(&oldmm->mmap_sem);
276         flush_cache_dup_mm(oldmm);
277         /*
278          * Not linked in yet - no deadlock potential:
279          */
280         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
281
282         mm->locked_vm = 0;
283         mm->mmap = NULL;
284         mm->mmap_cache = NULL;
285         mm->free_area_cache = oldmm->mmap_base;
286         mm->cached_hole_size = ~0UL;
287         mm->map_count = 0;
288         cpumask_clear(mm_cpumask(mm));
289         mm->mm_rb = RB_ROOT;
290         rb_link = &mm->mm_rb.rb_node;
291         rb_parent = NULL;
292         pprev = &mm->mmap;
293
294         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
295                 struct file *file;
296
297                 if (mpnt->vm_flags & VM_DONTCOPY) {
298                         long pages = vma_pages(mpnt);
299                         mm->total_vm -= pages;
300                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
301                                                                 -pages);
302                         continue;
303                 }
304                 charge = 0;
305                 if (mpnt->vm_flags & VM_ACCOUNT) {
306                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
307                         if (security_vm_enough_memory(len))
308                                 goto fail_nomem;
309                         charge = len;
310                 }
311                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
312                 if (!tmp)
313                         goto fail_nomem;
314                 *tmp = *mpnt;
315                 pol = mpol_dup(vma_policy(mpnt));
316                 retval = PTR_ERR(pol);
317                 if (IS_ERR(pol))
318                         goto fail_nomem_policy;
319                 vma_set_policy(tmp, pol);
320                 tmp->vm_flags &= ~VM_LOCKED;
321                 tmp->vm_mm = mm;
322                 tmp->vm_next = NULL;
323                 anon_vma_link(tmp);
324                 file = tmp->vm_file;
325                 if (file) {
326                         struct inode *inode = file->f_path.dentry->d_inode;
327                         struct address_space *mapping = file->f_mapping;
328
329                         get_file(file);
330                         if (tmp->vm_flags & VM_DENYWRITE)
331                                 atomic_dec(&inode->i_writecount);
332                         spin_lock(&mapping->i_mmap_lock);
333                         if (tmp->vm_flags & VM_SHARED)
334                                 mapping->i_mmap_writable++;
335                         tmp->vm_truncate_count = mpnt->vm_truncate_count;
336                         flush_dcache_mmap_lock(mapping);
337                         /* insert tmp into the share list, just after mpnt */
338                         vma_prio_tree_add(tmp, mpnt);
339                         flush_dcache_mmap_unlock(mapping);
340                         spin_unlock(&mapping->i_mmap_lock);
341                 }
342
343                 /*
344                  * Clear hugetlb-related page reserves for children. This only
345                  * affects MAP_PRIVATE mappings. Faults generated by the child
346                  * are not guaranteed to succeed, even if read-only
347                  */
348                 if (is_vm_hugetlb_page(tmp))
349                         reset_vma_resv_huge_pages(tmp);
350
351                 /*
352                  * Link in the new vma and copy the page table entries.
353                  */
354                 *pprev = tmp;
355                 pprev = &tmp->vm_next;
356
357                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
358                 rb_link = &tmp->vm_rb.rb_right;
359                 rb_parent = &tmp->vm_rb;
360
361                 mm->map_count++;
362                 retval = copy_page_range(mm, oldmm, mpnt);
363
364                 if (tmp->vm_ops && tmp->vm_ops->open)
365                         tmp->vm_ops->open(tmp);
366
367                 if (retval)
368                         goto out;
369         }
370         /* a new mm has just been created */
371         arch_dup_mmap(oldmm, mm);
372         retval = 0;
373 out:
374         up_write(&mm->mmap_sem);
375         flush_tlb_mm(oldmm);
376         up_write(&oldmm->mmap_sem);
377         return retval;
378 fail_nomem_policy:
379         kmem_cache_free(vm_area_cachep, tmp);
380 fail_nomem:
381         retval = -ENOMEM;
382         vm_unacct_memory(charge);
383         goto out;
384 }
385
386 static inline int mm_alloc_pgd(struct mm_struct * mm)
387 {
388         mm->pgd = pgd_alloc(mm);
389         if (unlikely(!mm->pgd))
390                 return -ENOMEM;
391         return 0;
392 }
393
394 static inline void mm_free_pgd(struct mm_struct * mm)
395 {
396         pgd_free(mm, mm->pgd);
397 }
398 #else
399 #define dup_mmap(mm, oldmm)     (0)
400 #define mm_alloc_pgd(mm)        (0)
401 #define mm_free_pgd(mm)
402 #endif /* CONFIG_MMU */
403
404 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
405
406 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
407 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
408
409 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
410
411 static int __init coredump_filter_setup(char *s)
412 {
413         default_dump_filter =
414                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
415                 MMF_DUMP_FILTER_MASK;
416         return 1;
417 }
418
419 __setup("coredump_filter=", coredump_filter_setup);
420
421 #include <linux/init_task.h>
422
423 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
424 {
425         atomic_set(&mm->mm_users, 1);
426         atomic_set(&mm->mm_count, 1);
427         init_rwsem(&mm->mmap_sem);
428         INIT_LIST_HEAD(&mm->mmlist);
429         mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
430         mm->core_state = NULL;
431         mm->nr_ptes = 0;
432         set_mm_counter(mm, file_rss, 0);
433         set_mm_counter(mm, anon_rss, 0);
434         spin_lock_init(&mm->page_table_lock);
435         spin_lock_init(&mm->ioctx_lock);
436         INIT_HLIST_HEAD(&mm->ioctx_list);
437         mm->free_area_cache = TASK_UNMAPPED_BASE;
438         mm->cached_hole_size = ~0UL;
439         mm_init_owner(mm, p);
440
441         if (likely(!mm_alloc_pgd(mm))) {
442                 mm->def_flags = 0;
443                 mmu_notifier_mm_init(mm);
444                 return mm;
445         }
446
447         free_mm(mm);
448         return NULL;
449 }
450
451 /*
452  * Allocate and initialize an mm_struct.
453  */
454 struct mm_struct * mm_alloc(void)
455 {
456         struct mm_struct * mm;
457
458         mm = allocate_mm();
459         if (mm) {
460                 memset(mm, 0, sizeof(*mm));
461                 mm = mm_init(mm, current);
462         }
463         return mm;
464 }
465
466 /*
467  * Called when the last reference to the mm
468  * is dropped: either by a lazy thread or by
469  * mmput. Free the page directory and the mm.
470  */
471 void __mmdrop(struct mm_struct *mm)
472 {
473         BUG_ON(mm == &init_mm);
474         mm_free_pgd(mm);
475         destroy_context(mm);
476         mmu_notifier_mm_destroy(mm);
477         free_mm(mm);
478 }
479 EXPORT_SYMBOL_GPL(__mmdrop);
480
481 /*
482  * Decrement the use count and release all resources for an mm.
483  */
484 void mmput(struct mm_struct *mm)
485 {
486         might_sleep();
487
488         if (atomic_dec_and_test(&mm->mm_users)) {
489                 exit_aio(mm);
490                 exit_mmap(mm);
491                 set_mm_exe_file(mm, NULL);
492                 if (!list_empty(&mm->mmlist)) {
493                         spin_lock(&mmlist_lock);
494                         list_del(&mm->mmlist);
495                         spin_unlock(&mmlist_lock);
496                 }
497                 put_swap_token(mm);
498                 mmdrop(mm);
499         }
500 }
501 EXPORT_SYMBOL_GPL(mmput);
502
503 /**
504  * get_task_mm - acquire a reference to the task's mm
505  *
506  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
507  * this kernel workthread has transiently adopted a user mm with use_mm,
508  * to do its AIO) is not set and if so returns a reference to it, after
509  * bumping up the use count.  User must release the mm via mmput()
510  * after use.  Typically used by /proc and ptrace.
511  */
512 struct mm_struct *get_task_mm(struct task_struct *task)
513 {
514         struct mm_struct *mm;
515
516         task_lock(task);
517         mm = task->mm;
518         if (mm) {
519                 if (task->flags & PF_KTHREAD)
520                         mm = NULL;
521                 else
522                         atomic_inc(&mm->mm_users);
523         }
524         task_unlock(task);
525         return mm;
526 }
527 EXPORT_SYMBOL_GPL(get_task_mm);
528
529 /* Please note the differences between mmput and mm_release.
530  * mmput is called whenever we stop holding onto a mm_struct,
531  * error success whatever.
532  *
533  * mm_release is called after a mm_struct has been removed
534  * from the current process.
535  *
536  * This difference is important for error handling, when we
537  * only half set up a mm_struct for a new process and need to restore
538  * the old one.  Because we mmput the new mm_struct before
539  * restoring the old one. . .
540  * Eric Biederman 10 January 1998
541  */
542 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
543 {
544         struct completion *vfork_done = tsk->vfork_done;
545
546         /* Get rid of any futexes when releasing the mm */
547 #ifdef CONFIG_FUTEX
548         if (unlikely(tsk->robust_list))
549                 exit_robust_list(tsk);
550 #ifdef CONFIG_COMPAT
551         if (unlikely(tsk->compat_robust_list))
552                 compat_exit_robust_list(tsk);
553 #endif
554 #endif
555
556         /* Get rid of any cached register state */
557         deactivate_mm(tsk, mm);
558
559         /* notify parent sleeping on vfork() */
560         if (vfork_done) {
561                 tsk->vfork_done = NULL;
562                 complete(vfork_done);
563         }
564
565         /*
566          * If we're exiting normally, clear a user-space tid field if
567          * requested.  We leave this alone when dying by signal, to leave
568          * the value intact in a core dump, and to save the unnecessary
569          * trouble otherwise.  Userland only wants this done for a sys_exit.
570          */
571         if (tsk->clear_child_tid
572             && !(tsk->flags & PF_SIGNALED)
573             && atomic_read(&mm->mm_users) > 1) {
574                 u32 __user * tidptr = tsk->clear_child_tid;
575                 tsk->clear_child_tid = NULL;
576
577                 /*
578                  * We don't check the error code - if userspace has
579                  * not set up a proper pointer then tough luck.
580                  */
581                 put_user(0, tidptr);
582                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
583         }
584 }
585
586 /*
587  * Allocate a new mm structure and copy contents from the
588  * mm structure of the passed in task structure.
589  */
590 struct mm_struct *dup_mm(struct task_struct *tsk)
591 {
592         struct mm_struct *mm, *oldmm = current->mm;
593         int err;
594
595         if (!oldmm)
596                 return NULL;
597
598         mm = allocate_mm();
599         if (!mm)
600                 goto fail_nomem;
601
602         memcpy(mm, oldmm, sizeof(*mm));
603
604         /* Initializing for Swap token stuff */
605         mm->token_priority = 0;
606         mm->last_interval = 0;
607
608         if (!mm_init(mm, tsk))
609                 goto fail_nomem;
610
611         if (init_new_context(tsk, mm))
612                 goto fail_nocontext;
613
614         dup_mm_exe_file(oldmm, mm);
615
616         err = dup_mmap(mm, oldmm);
617         if (err)
618                 goto free_pt;
619
620         mm->hiwater_rss = get_mm_rss(mm);
621         mm->hiwater_vm = mm->total_vm;
622
623         return mm;
624
625 free_pt:
626         mmput(mm);
627
628 fail_nomem:
629         return NULL;
630
631 fail_nocontext:
632         /*
633          * If init_new_context() failed, we cannot use mmput() to free the mm
634          * because it calls destroy_context()
635          */
636         mm_free_pgd(mm);
637         free_mm(mm);
638         return NULL;
639 }
640
641 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
642 {
643         struct mm_struct * mm, *oldmm;
644         int retval;
645
646         tsk->min_flt = tsk->maj_flt = 0;
647         tsk->nvcsw = tsk->nivcsw = 0;
648 #ifdef CONFIG_DETECT_HUNG_TASK
649         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
650 #endif
651
652         tsk->mm = NULL;
653         tsk->active_mm = NULL;
654
655         /*
656          * Are we cloning a kernel thread?
657          *
658          * We need to steal a active VM for that..
659          */
660         oldmm = current->mm;
661         if (!oldmm)
662                 return 0;
663
664         if (clone_flags & CLONE_VM) {
665                 atomic_inc(&oldmm->mm_users);
666                 mm = oldmm;
667                 goto good_mm;
668         }
669
670         retval = -ENOMEM;
671         mm = dup_mm(tsk);
672         if (!mm)
673                 goto fail_nomem;
674
675 good_mm:
676         /* Initializing for Swap token stuff */
677         mm->token_priority = 0;
678         mm->last_interval = 0;
679
680         tsk->mm = mm;
681         tsk->active_mm = mm;
682         return 0;
683
684 fail_nomem:
685         return retval;
686 }
687
688 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
689 {
690         struct fs_struct *fs = current->fs;
691         if (clone_flags & CLONE_FS) {
692                 /* tsk->fs is already what we want */
693                 write_lock(&fs->lock);
694                 if (fs->in_exec) {
695                         write_unlock(&fs->lock);
696                         return -EAGAIN;
697                 }
698                 fs->users++;
699                 write_unlock(&fs->lock);
700                 return 0;
701         }
702         tsk->fs = copy_fs_struct(fs);
703         if (!tsk->fs)
704                 return -ENOMEM;
705         return 0;
706 }
707
708 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
709 {
710         struct files_struct *oldf, *newf;
711         int error = 0;
712
713         /*
714          * A background process may not have any files ...
715          */
716         oldf = current->files;
717         if (!oldf)
718                 goto out;
719
720         if (clone_flags & CLONE_FILES) {
721                 atomic_inc(&oldf->count);
722                 goto out;
723         }
724
725         newf = dup_fd(oldf, &error);
726         if (!newf)
727                 goto out;
728
729         tsk->files = newf;
730         error = 0;
731 out:
732         return error;
733 }
734
735 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
736 {
737 #ifdef CONFIG_BLOCK
738         struct io_context *ioc = current->io_context;
739
740         if (!ioc)
741                 return 0;
742         /*
743          * Share io context with parent, if CLONE_IO is set
744          */
745         if (clone_flags & CLONE_IO) {
746                 tsk->io_context = ioc_task_link(ioc);
747                 if (unlikely(!tsk->io_context))
748                         return -ENOMEM;
749         } else if (ioprio_valid(ioc->ioprio)) {
750                 tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
751                 if (unlikely(!tsk->io_context))
752                         return -ENOMEM;
753
754                 tsk->io_context->ioprio = ioc->ioprio;
755         }
756 #endif
757         return 0;
758 }
759
760 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
761 {
762         struct sighand_struct *sig;
763
764         if (clone_flags & CLONE_SIGHAND) {
765                 atomic_inc(&current->sighand->count);
766                 return 0;
767         }
768         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
769         rcu_assign_pointer(tsk->sighand, sig);
770         if (!sig)
771                 return -ENOMEM;
772         atomic_set(&sig->count, 1);
773         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
774         return 0;
775 }
776
777 void __cleanup_sighand(struct sighand_struct *sighand)
778 {
779         if (atomic_dec_and_test(&sighand->count))
780                 kmem_cache_free(sighand_cachep, sighand);
781 }
782
783
784 /*
785  * Initialize POSIX timer handling for a thread group.
786  */
787 static void posix_cpu_timers_init_group(struct signal_struct *sig)
788 {
789         /* Thread group counters. */
790         thread_group_cputime_init(sig);
791
792         /* Expiration times and increments. */
793         sig->it_virt_expires = cputime_zero;
794         sig->it_virt_incr = cputime_zero;
795         sig->it_prof_expires = cputime_zero;
796         sig->it_prof_incr = cputime_zero;
797
798         /* Cached expiration times. */
799         sig->cputime_expires.prof_exp = cputime_zero;
800         sig->cputime_expires.virt_exp = cputime_zero;
801         sig->cputime_expires.sched_exp = 0;
802
803         /* The timer lists. */
804         INIT_LIST_HEAD(&sig->cpu_timers[0]);
805         INIT_LIST_HEAD(&sig->cpu_timers[1]);
806         INIT_LIST_HEAD(&sig->cpu_timers[2]);
807 }
808
809 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
810 {
811         struct signal_struct *sig;
812
813         if (clone_flags & CLONE_THREAD) {
814                 atomic_inc(&current->signal->count);
815                 atomic_inc(&current->signal->live);
816                 return 0;
817         }
818         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
819
820         if (sig)
821                 posix_cpu_timers_init_group(sig);
822
823         tsk->signal = sig;
824         if (!sig)
825                 return -ENOMEM;
826
827         atomic_set(&sig->count, 1);
828         atomic_set(&sig->live, 1);
829         init_waitqueue_head(&sig->wait_chldexit);
830         sig->flags = 0;
831         if (clone_flags & CLONE_NEWPID)
832                 sig->flags |= SIGNAL_UNKILLABLE;
833         sig->group_exit_code = 0;
834         sig->group_exit_task = NULL;
835         sig->group_stop_count = 0;
836         sig->curr_target = tsk;
837         init_sigpending(&sig->shared_pending);
838         INIT_LIST_HEAD(&sig->posix_timers);
839
840         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
841         sig->it_real_incr.tv64 = 0;
842         sig->real_timer.function = it_real_fn;
843
844         sig->leader = 0;        /* session leadership doesn't inherit */
845         sig->tty_old_pgrp = NULL;
846         sig->tty = NULL;
847
848         sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
849         sig->gtime = cputime_zero;
850         sig->cgtime = cputime_zero;
851         sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
852         sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
853         sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
854         task_io_accounting_init(&sig->ioac);
855         sig->sum_sched_runtime = 0;
856         taskstats_tgid_init(sig);
857
858         task_lock(current->group_leader);
859         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
860         task_unlock(current->group_leader);
861
862         acct_init_pacct(&sig->pacct);
863
864         tty_audit_fork(sig);
865
866         return 0;
867 }
868
869 void __cleanup_signal(struct signal_struct *sig)
870 {
871         thread_group_cputime_free(sig);
872         tty_kref_put(sig->tty);
873         kmem_cache_free(signal_cachep, sig);
874 }
875
876 static void cleanup_signal(struct task_struct *tsk)
877 {
878         struct signal_struct *sig = tsk->signal;
879
880         atomic_dec(&sig->live);
881
882         if (atomic_dec_and_test(&sig->count))
883                 __cleanup_signal(sig);
884 }
885
886 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
887 {
888         unsigned long new_flags = p->flags;
889
890         new_flags &= ~PF_SUPERPRIV;
891         new_flags |= PF_FORKNOEXEC;
892         new_flags |= PF_STARTING;
893         p->flags = new_flags;
894         clear_freeze_flag(p);
895 }
896
897 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
898 {
899         current->clear_child_tid = tidptr;
900
901         return task_pid_vnr(current);
902 }
903
904 static void rt_mutex_init_task(struct task_struct *p)
905 {
906         spin_lock_init(&p->pi_lock);
907 #ifdef CONFIG_RT_MUTEXES
908         plist_head_init(&p->pi_waiters, &p->pi_lock);
909         p->pi_blocked_on = NULL;
910 #endif
911 }
912
913 #ifdef CONFIG_MM_OWNER
914 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
915 {
916         mm->owner = p;
917 }
918 #endif /* CONFIG_MM_OWNER */
919
920 /*
921  * Initialize POSIX timer handling for a single task.
922  */
923 static void posix_cpu_timers_init(struct task_struct *tsk)
924 {
925         tsk->cputime_expires.prof_exp = cputime_zero;
926         tsk->cputime_expires.virt_exp = cputime_zero;
927         tsk->cputime_expires.sched_exp = 0;
928         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
929         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
930         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
931 }
932
933 /*
934  * This creates a new process as a copy of the old one,
935  * but does not actually start it yet.
936  *
937  * It copies the registers, and all the appropriate
938  * parts of the process environment (as per the clone
939  * flags). The actual kick-off is left to the caller.
940  */
941 static struct task_struct *copy_process(unsigned long clone_flags,
942                                         unsigned long stack_start,
943                                         struct pt_regs *regs,
944                                         unsigned long stack_size,
945                                         int __user *child_tidptr,
946                                         struct pid *pid,
947                                         int trace)
948 {
949         int retval;
950         struct task_struct *p;
951         int cgroup_callbacks_done = 0;
952
953         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
954                 return ERR_PTR(-EINVAL);
955
956         /*
957          * Thread groups must share signals as well, and detached threads
958          * can only be started up within the thread group.
959          */
960         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
961                 return ERR_PTR(-EINVAL);
962
963         /*
964          * Shared signal handlers imply shared VM. By way of the above,
965          * thread groups also imply shared VM. Blocking this case allows
966          * for various simplifications in other code.
967          */
968         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
969                 return ERR_PTR(-EINVAL);
970
971         retval = security_task_create(clone_flags);
972         if (retval)
973                 goto fork_out;
974
975         retval = -ENOMEM;
976         p = dup_task_struct(current);
977         if (!p)
978                 goto fork_out;
979
980         rt_mutex_init_task(p);
981
982 #ifdef CONFIG_PROVE_LOCKING
983         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
984         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
985 #endif
986         retval = -EAGAIN;
987         if (atomic_read(&p->real_cred->user->processes) >=
988                         p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
989                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
990                     p->real_cred->user != INIT_USER)
991                         goto bad_fork_free;
992         }
993
994         retval = copy_creds(p, clone_flags);
995         if (retval < 0)
996                 goto bad_fork_free;
997
998         /*
999          * If multiple threads are within copy_process(), then this check
1000          * triggers too late. This doesn't hurt, the check is only there
1001          * to stop root fork bombs.
1002          */
1003         retval = -EAGAIN;
1004         if (nr_threads >= max_threads)
1005                 goto bad_fork_cleanup_count;
1006
1007         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1008                 goto bad_fork_cleanup_count;
1009
1010         if (p->binfmt && !try_module_get(p->binfmt->module))
1011                 goto bad_fork_cleanup_put_domain;
1012
1013         p->did_exec = 0;
1014         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1015         copy_flags(clone_flags, p);
1016         INIT_LIST_HEAD(&p->children);
1017         INIT_LIST_HEAD(&p->sibling);
1018 #ifdef CONFIG_PREEMPT_RCU
1019         p->rcu_read_lock_nesting = 0;
1020         p->rcu_flipctr_idx = 0;
1021 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1022         p->vfork_done = NULL;
1023         spin_lock_init(&p->alloc_lock);
1024
1025         clear_tsk_thread_flag(p, TIF_SIGPENDING);
1026         init_sigpending(&p->pending);
1027
1028         p->utime = cputime_zero;
1029         p->stime = cputime_zero;
1030         p->gtime = cputime_zero;
1031         p->utimescaled = cputime_zero;
1032         p->stimescaled = cputime_zero;
1033         p->prev_utime = cputime_zero;
1034         p->prev_stime = cputime_zero;
1035
1036         p->default_timer_slack_ns = current->timer_slack_ns;
1037
1038         task_io_accounting_init(&p->ioac);
1039         acct_clear_integrals(p);
1040
1041         posix_cpu_timers_init(p);
1042
1043         p->lock_depth = -1;             /* -1 = no lock */
1044         do_posix_clock_monotonic_gettime(&p->start_time);
1045         p->real_start_time = p->start_time;
1046         monotonic_to_bootbased(&p->real_start_time);
1047         p->io_context = NULL;
1048         p->audit_context = NULL;
1049         cgroup_fork(p);
1050 #ifdef CONFIG_NUMA
1051         p->mempolicy = mpol_dup(p->mempolicy);
1052         if (IS_ERR(p->mempolicy)) {
1053                 retval = PTR_ERR(p->mempolicy);
1054                 p->mempolicy = NULL;
1055                 goto bad_fork_cleanup_cgroup;
1056         }
1057         mpol_fix_fork_child_flag(p);
1058 #endif
1059 #ifdef CONFIG_TRACE_IRQFLAGS
1060         p->irq_events = 0;
1061 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1062         p->hardirqs_enabled = 1;
1063 #else
1064         p->hardirqs_enabled = 0;
1065 #endif
1066         p->hardirq_enable_ip = 0;
1067         p->hardirq_enable_event = 0;
1068         p->hardirq_disable_ip = _THIS_IP_;
1069         p->hardirq_disable_event = 0;
1070         p->softirqs_enabled = 1;
1071         p->softirq_enable_ip = _THIS_IP_;
1072         p->softirq_enable_event = 0;
1073         p->softirq_disable_ip = 0;
1074         p->softirq_disable_event = 0;
1075         p->hardirq_context = 0;
1076         p->softirq_context = 0;
1077 #endif
1078 #ifdef CONFIG_LOCKDEP
1079         p->lockdep_depth = 0; /* no locks held yet */
1080         p->curr_chain_key = 0;
1081         p->lockdep_recursion = 0;
1082 #endif
1083
1084 #ifdef CONFIG_DEBUG_MUTEXES
1085         p->blocked_on = NULL; /* not blocked yet */
1086 #endif
1087         if (unlikely(current->ptrace))
1088                 ptrace_fork(p, clone_flags);
1089
1090         /* Perform scheduler related setup. Assign this task to a CPU. */
1091         sched_fork(p, clone_flags);
1092
1093         if ((retval = audit_alloc(p)))
1094                 goto bad_fork_cleanup_policy;
1095         /* copy all the process information */
1096         if ((retval = copy_semundo(clone_flags, p)))
1097                 goto bad_fork_cleanup_audit;
1098         if ((retval = copy_files(clone_flags, p)))
1099                 goto bad_fork_cleanup_semundo;
1100         if ((retval = copy_fs(clone_flags, p)))
1101                 goto bad_fork_cleanup_files;
1102         if ((retval = copy_sighand(clone_flags, p)))
1103                 goto bad_fork_cleanup_fs;
1104         if ((retval = copy_signal(clone_flags, p)))
1105                 goto bad_fork_cleanup_sighand;
1106         if ((retval = copy_mm(clone_flags, p)))
1107                 goto bad_fork_cleanup_signal;
1108         if ((retval = copy_namespaces(clone_flags, p)))
1109                 goto bad_fork_cleanup_mm;
1110         if ((retval = copy_io(clone_flags, p)))
1111                 goto bad_fork_cleanup_namespaces;
1112         retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1113         if (retval)
1114                 goto bad_fork_cleanup_io;
1115
1116         if (pid != &init_struct_pid) {
1117                 retval = -ENOMEM;
1118                 pid = alloc_pid(p->nsproxy->pid_ns);
1119                 if (!pid)
1120                         goto bad_fork_cleanup_io;
1121
1122                 if (clone_flags & CLONE_NEWPID) {
1123                         retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
1124                         if (retval < 0)
1125                                 goto bad_fork_free_pid;
1126                 }
1127         }
1128
1129         ftrace_graph_init_task(p);
1130
1131         p->pid = pid_nr(pid);
1132         p->tgid = p->pid;
1133         if (clone_flags & CLONE_THREAD)
1134                 p->tgid = current->tgid;
1135
1136         if (current->nsproxy != p->nsproxy) {
1137                 retval = ns_cgroup_clone(p, pid);
1138                 if (retval)
1139                         goto bad_fork_free_graph;
1140         }
1141
1142         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1143         /*
1144          * Clear TID on mm_release()?
1145          */
1146         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1147 #ifdef CONFIG_FUTEX
1148         p->robust_list = NULL;
1149 #ifdef CONFIG_COMPAT
1150         p->compat_robust_list = NULL;
1151 #endif
1152         INIT_LIST_HEAD(&p->pi_state_list);
1153         p->pi_state_cache = NULL;
1154 #endif
1155         /*
1156          * sigaltstack should be cleared when sharing the same VM
1157          */
1158         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1159                 p->sas_ss_sp = p->sas_ss_size = 0;
1160
1161         /*
1162          * Syscall tracing should be turned off in the child regardless
1163          * of CLONE_PTRACE.
1164          */
1165         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1166 #ifdef TIF_SYSCALL_EMU
1167         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1168 #endif
1169         clear_all_latency_tracing(p);
1170
1171         /* ok, now we should be set up.. */
1172         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1173         p->pdeath_signal = 0;
1174         p->exit_state = 0;
1175
1176         /*
1177          * Ok, make it visible to the rest of the system.
1178          * We dont wake it up yet.
1179          */
1180         p->group_leader = p;
1181         INIT_LIST_HEAD(&p->thread_group);
1182
1183         /* Now that the task is set up, run cgroup callbacks if
1184          * necessary. We need to run them before the task is visible
1185          * on the tasklist. */
1186         cgroup_fork_callbacks(p);
1187         cgroup_callbacks_done = 1;
1188
1189         /* Need tasklist lock for parent etc handling! */
1190         write_lock_irq(&tasklist_lock);
1191
1192         /*
1193          * The task hasn't been attached yet, so its cpus_allowed mask will
1194          * not be changed, nor will its assigned CPU.
1195          *
1196          * The cpus_allowed mask of the parent may have changed after it was
1197          * copied first time - so re-copy it here, then check the child's CPU
1198          * to ensure it is on a valid CPU (and if not, just force it back to
1199          * parent's CPU). This avoids alot of nasty races.
1200          */
1201         p->cpus_allowed = current->cpus_allowed;
1202         p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
1203         if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1204                         !cpu_online(task_cpu(p))))
1205                 set_task_cpu(p, smp_processor_id());
1206
1207         /* CLONE_PARENT re-uses the old parent */
1208         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1209                 p->real_parent = current->real_parent;
1210                 p->parent_exec_id = current->parent_exec_id;
1211         } else {
1212                 p->real_parent = current;
1213                 p->parent_exec_id = current->self_exec_id;
1214         }
1215
1216         spin_lock(&current->sighand->siglock);
1217
1218         /*
1219          * Process group and session signals need to be delivered to just the
1220          * parent before the fork or both the parent and the child after the
1221          * fork. Restart if a signal comes in before we add the new process to
1222          * it's process group.
1223          * A fatal signal pending means that current will exit, so the new
1224          * thread can't slip out of an OOM kill (or normal SIGKILL).
1225          */
1226         recalc_sigpending();
1227         if (signal_pending(current)) {
1228                 spin_unlock(&current->sighand->siglock);
1229                 write_unlock_irq(&tasklist_lock);
1230                 retval = -ERESTARTNOINTR;
1231                 goto bad_fork_free_graph;
1232         }
1233
1234         if (clone_flags & CLONE_THREAD) {
1235                 p->group_leader = current->group_leader;
1236                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1237         }
1238
1239         if (likely(p->pid)) {
1240                 list_add_tail(&p->sibling, &p->real_parent->children);
1241                 tracehook_finish_clone(p, clone_flags, trace);
1242
1243                 if (thread_group_leader(p)) {
1244                         if (clone_flags & CLONE_NEWPID)
1245                                 p->nsproxy->pid_ns->child_reaper = p;
1246
1247                         p->signal->leader_pid = pid;
1248                         tty_kref_put(p->signal->tty);
1249                         p->signal->tty = tty_kref_get(current->signal->tty);
1250                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1251                         attach_pid(p, PIDTYPE_SID, task_session(current));
1252                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1253                         __get_cpu_var(process_counts)++;
1254                 }
1255                 attach_pid(p, PIDTYPE_PID, pid);
1256                 nr_threads++;
1257         }
1258
1259         total_forks++;
1260         spin_unlock(&current->sighand->siglock);
1261         write_unlock_irq(&tasklist_lock);
1262         proc_fork_connector(p);
1263         cgroup_post_fork(p);
1264         return p;
1265
1266 bad_fork_free_graph:
1267         ftrace_graph_exit_task(p);
1268 bad_fork_free_pid:
1269         if (pid != &init_struct_pid)
1270                 free_pid(pid);
1271 bad_fork_cleanup_io:
1272         put_io_context(p->io_context);
1273 bad_fork_cleanup_namespaces:
1274         exit_task_namespaces(p);
1275 bad_fork_cleanup_mm:
1276         if (p->mm)
1277                 mmput(p->mm);
1278 bad_fork_cleanup_signal:
1279         cleanup_signal(p);
1280 bad_fork_cleanup_sighand:
1281         __cleanup_sighand(p->sighand);
1282 bad_fork_cleanup_fs:
1283         exit_fs(p); /* blocking */
1284 bad_fork_cleanup_files:
1285         exit_files(p); /* blocking */
1286 bad_fork_cleanup_semundo:
1287         exit_sem(p);
1288 bad_fork_cleanup_audit:
1289         audit_free(p);
1290 bad_fork_cleanup_policy:
1291 #ifdef CONFIG_NUMA
1292         mpol_put(p->mempolicy);
1293 bad_fork_cleanup_cgroup:
1294 #endif
1295         cgroup_exit(p, cgroup_callbacks_done);
1296         delayacct_tsk_free(p);
1297         if (p->binfmt)
1298                 module_put(p->binfmt->module);
1299 bad_fork_cleanup_put_domain:
1300         module_put(task_thread_info(p)->exec_domain->module);
1301 bad_fork_cleanup_count:
1302         atomic_dec(&p->cred->user->processes);
1303         put_cred(p->real_cred);
1304         put_cred(p->cred);
1305 bad_fork_free:
1306         free_task(p);
1307 fork_out:
1308         return ERR_PTR(retval);
1309 }
1310
1311 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1312 {
1313         memset(regs, 0, sizeof(struct pt_regs));
1314         return regs;
1315 }
1316
1317 struct task_struct * __cpuinit fork_idle(int cpu)
1318 {
1319         struct task_struct *task;
1320         struct pt_regs regs;
1321
1322         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1323                             &init_struct_pid, 0);
1324         if (!IS_ERR(task))
1325                 init_idle(task, cpu);
1326
1327         return task;
1328 }
1329
1330 /*
1331  *  Ok, this is the main fork-routine.
1332  *
1333  * It copies the process, and if successful kick-starts
1334  * it and waits for it to finish using the VM if required.
1335  */
1336 long do_fork(unsigned long clone_flags,
1337               unsigned long stack_start,
1338               struct pt_regs *regs,
1339               unsigned long stack_size,
1340               int __user *parent_tidptr,
1341               int __user *child_tidptr)
1342 {
1343         struct task_struct *p;
1344         int trace = 0;
1345         long nr;
1346
1347         /*
1348          * Do some preliminary argument and permissions checking before we
1349          * actually start allocating stuff
1350          */
1351         if (clone_flags & CLONE_NEWUSER) {
1352                 if (clone_flags & CLONE_THREAD)
1353                         return -EINVAL;
1354                 /* hopefully this check will go away when userns support is
1355                  * complete
1356                  */
1357                 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1358                                 !capable(CAP_SETGID))
1359                         return -EPERM;
1360         }
1361
1362         /*
1363          * We hope to recycle these flags after 2.6.26
1364          */
1365         if (unlikely(clone_flags & CLONE_STOPPED)) {
1366                 static int __read_mostly count = 100;
1367
1368                 if (count > 0 && printk_ratelimit()) {
1369                         char comm[TASK_COMM_LEN];
1370
1371                         count--;
1372                         printk(KERN_INFO "fork(): process `%s' used deprecated "
1373                                         "clone flags 0x%lx\n",
1374                                 get_task_comm(comm, current),
1375                                 clone_flags & CLONE_STOPPED);
1376                 }
1377         }
1378
1379         /*
1380          * When called from kernel_thread, don't do user tracing stuff.
1381          */
1382         if (likely(user_mode(regs)))
1383                 trace = tracehook_prepare_clone(clone_flags);
1384
1385         p = copy_process(clone_flags, stack_start, regs, stack_size,
1386                          child_tidptr, NULL, trace);
1387         /*
1388          * Do this prior waking up the new thread - the thread pointer
1389          * might get invalid after that point, if the thread exits quickly.
1390          */
1391         if (!IS_ERR(p)) {
1392                 struct completion vfork;
1393
1394                 trace_sched_process_fork(current, p);
1395
1396                 nr = task_pid_vnr(p);
1397
1398                 if (clone_flags & CLONE_PARENT_SETTID)
1399                         put_user(nr, parent_tidptr);
1400
1401                 if (clone_flags & CLONE_VFORK) {
1402                         p->vfork_done = &vfork;
1403                         init_completion(&vfork);
1404                 }
1405
1406                 audit_finish_fork(p);
1407                 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1408
1409                 /*
1410                  * We set PF_STARTING at creation in case tracing wants to
1411                  * use this to distinguish a fully live task from one that
1412                  * hasn't gotten to tracehook_report_clone() yet.  Now we
1413                  * clear it and set the child going.
1414                  */
1415                 p->flags &= ~PF_STARTING;
1416
1417                 if (unlikely(clone_flags & CLONE_STOPPED)) {
1418                         /*
1419                          * We'll start up with an immediate SIGSTOP.
1420                          */
1421                         sigaddset(&p->pending.signal, SIGSTOP);
1422                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1423                         __set_task_state(p, TASK_STOPPED);
1424                 } else {
1425                         wake_up_new_task(p, clone_flags);
1426                 }
1427
1428                 tracehook_report_clone_complete(trace, regs,
1429                                                 clone_flags, nr, p);
1430
1431                 if (clone_flags & CLONE_VFORK) {
1432                         freezer_do_not_count();
1433                         wait_for_completion(&vfork);
1434                         freezer_count();
1435                         tracehook_report_vfork_done(p, nr);
1436                 }
1437         } else {
1438                 nr = PTR_ERR(p);
1439         }
1440         return nr;
1441 }
1442
1443 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1444 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1445 #endif
1446
1447 static void sighand_ctor(void *data)
1448 {
1449         struct sighand_struct *sighand = data;
1450
1451         spin_lock_init(&sighand->siglock);
1452         init_waitqueue_head(&sighand->signalfd_wqh);
1453 }
1454
1455 void __init proc_caches_init(void)
1456 {
1457         sighand_cachep = kmem_cache_create("sighand_cache",
1458                         sizeof(struct sighand_struct), 0,
1459                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1460                         sighand_ctor);
1461         signal_cachep = kmem_cache_create("signal_cache",
1462                         sizeof(struct signal_struct), 0,
1463                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1464         files_cachep = kmem_cache_create("files_cache",
1465                         sizeof(struct files_struct), 0,
1466                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1467         fs_cachep = kmem_cache_create("fs_cache",
1468                         sizeof(struct fs_struct), 0,
1469                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1470         mm_cachep = kmem_cache_create("mm_struct",
1471                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1472                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1473         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1474         mmap_init();
1475 }
1476
1477 /*
1478  * Check constraints on flags passed to the unshare system call and
1479  * force unsharing of additional process context as appropriate.
1480  */
1481 static void check_unshare_flags(unsigned long *flags_ptr)
1482 {
1483         /*
1484          * If unsharing a thread from a thread group, must also
1485          * unshare vm.
1486          */
1487         if (*flags_ptr & CLONE_THREAD)
1488                 *flags_ptr |= CLONE_VM;
1489
1490         /*
1491          * If unsharing vm, must also unshare signal handlers.
1492          */
1493         if (*flags_ptr & CLONE_VM)
1494                 *flags_ptr |= CLONE_SIGHAND;
1495
1496         /*
1497          * If unsharing signal handlers and the task was created
1498          * using CLONE_THREAD, then must unshare the thread
1499          */
1500         if ((*flags_ptr & CLONE_SIGHAND) &&
1501             (atomic_read(&current->signal->count) > 1))
1502                 *flags_ptr |= CLONE_THREAD;
1503
1504         /*
1505          * If unsharing namespace, must also unshare filesystem information.
1506          */
1507         if (*flags_ptr & CLONE_NEWNS)
1508                 *flags_ptr |= CLONE_FS;
1509 }
1510
1511 /*
1512  * Unsharing of tasks created with CLONE_THREAD is not supported yet
1513  */
1514 static int unshare_thread(unsigned long unshare_flags)
1515 {
1516         if (unshare_flags & CLONE_THREAD)
1517                 return -EINVAL;
1518
1519         return 0;
1520 }
1521
1522 /*
1523  * Unshare the filesystem structure if it is being shared
1524  */
1525 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1526 {
1527         struct fs_struct *fs = current->fs;
1528
1529         if (!(unshare_flags & CLONE_FS) || !fs)
1530                 return 0;
1531
1532         /* don't need lock here; in the worst case we'll do useless copy */
1533         if (fs->users == 1)
1534                 return 0;
1535
1536         *new_fsp = copy_fs_struct(fs);
1537         if (!*new_fsp)
1538                 return -ENOMEM;
1539
1540         return 0;
1541 }
1542
1543 /*
1544  * Unsharing of sighand is not supported yet
1545  */
1546 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1547 {
1548         struct sighand_struct *sigh = current->sighand;
1549
1550         if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
1551                 return -EINVAL;
1552         else
1553                 return 0;
1554 }
1555
1556 /*
1557  * Unshare vm if it is being shared
1558  */
1559 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1560 {
1561         struct mm_struct *mm = current->mm;
1562
1563         if ((unshare_flags & CLONE_VM) &&
1564             (mm && atomic_read(&mm->mm_users) > 1)) {
1565                 return -EINVAL;
1566         }
1567
1568         return 0;
1569 }
1570
1571 /*
1572  * Unshare file descriptor table if it is being shared
1573  */
1574 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1575 {
1576         struct files_struct *fd = current->files;
1577         int error = 0;
1578
1579         if ((unshare_flags & CLONE_FILES) &&
1580             (fd && atomic_read(&fd->count) > 1)) {
1581                 *new_fdp = dup_fd(fd, &error);
1582                 if (!*new_fdp)
1583                         return error;
1584         }
1585
1586         return 0;
1587 }
1588
1589 /*
1590  * unshare allows a process to 'unshare' part of the process
1591  * context which was originally shared using clone.  copy_*
1592  * functions used by do_fork() cannot be used here directly
1593  * because they modify an inactive task_struct that is being
1594  * constructed. Here we are modifying the current, active,
1595  * task_struct.
1596  */
1597 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1598 {
1599         int err = 0;
1600         struct fs_struct *fs, *new_fs = NULL;
1601         struct sighand_struct *new_sigh = NULL;
1602         struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1603         struct files_struct *fd, *new_fd = NULL;
1604         struct nsproxy *new_nsproxy = NULL;
1605         int do_sysvsem = 0;
1606
1607         check_unshare_flags(&unshare_flags);
1608
1609         /* Return -EINVAL for all unsupported flags */
1610         err = -EINVAL;
1611         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1612                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1613                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1614                 goto bad_unshare_out;
1615
1616         /*
1617          * CLONE_NEWIPC must also detach from the undolist: after switching
1618          * to a new ipc namespace, the semaphore arrays from the old
1619          * namespace are unreachable.
1620          */
1621         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1622                 do_sysvsem = 1;
1623         if ((err = unshare_thread(unshare_flags)))
1624                 goto bad_unshare_out;
1625         if ((err = unshare_fs(unshare_flags, &new_fs)))
1626                 goto bad_unshare_cleanup_thread;
1627         if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1628                 goto bad_unshare_cleanup_fs;
1629         if ((err = unshare_vm(unshare_flags, &new_mm)))
1630                 goto bad_unshare_cleanup_sigh;
1631         if ((err = unshare_fd(unshare_flags, &new_fd)))
1632                 goto bad_unshare_cleanup_vm;
1633         if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1634                         new_fs)))
1635                 goto bad_unshare_cleanup_fd;
1636
1637         if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
1638                 if (do_sysvsem) {
1639                         /*
1640                          * CLONE_SYSVSEM is equivalent to sys_exit().
1641                          */
1642                         exit_sem(current);
1643                 }
1644
1645                 if (new_nsproxy) {
1646                         switch_task_namespaces(current, new_nsproxy);
1647                         new_nsproxy = NULL;
1648                 }
1649
1650                 task_lock(current);
1651
1652                 if (new_fs) {
1653                         fs = current->fs;
1654                         write_lock(&fs->lock);
1655                         current->fs = new_fs;
1656                         if (--fs->users)
1657                                 new_fs = NULL;
1658                         else
1659                                 new_fs = fs;
1660                         write_unlock(&fs->lock);
1661                 }
1662
1663                 if (new_mm) {
1664                         mm = current->mm;
1665                         active_mm = current->active_mm;
1666                         current->mm = new_mm;
1667                         current->active_mm = new_mm;
1668                         activate_mm(active_mm, new_mm);
1669                         new_mm = mm;
1670                 }
1671
1672                 if (new_fd) {
1673                         fd = current->files;
1674                         current->files = new_fd;
1675                         new_fd = fd;
1676                 }
1677
1678                 task_unlock(current);
1679         }
1680
1681         if (new_nsproxy)
1682                 put_nsproxy(new_nsproxy);
1683
1684 bad_unshare_cleanup_fd:
1685         if (new_fd)
1686                 put_files_struct(new_fd);
1687
1688 bad_unshare_cleanup_vm:
1689         if (new_mm)
1690                 mmput(new_mm);
1691
1692 bad_unshare_cleanup_sigh:
1693         if (new_sigh)
1694                 if (atomic_dec_and_test(&new_sigh->count))
1695                         kmem_cache_free(sighand_cachep, new_sigh);
1696
1697 bad_unshare_cleanup_fs:
1698         if (new_fs)
1699                 free_fs_struct(new_fs);
1700
1701 bad_unshare_cleanup_thread:
1702 bad_unshare_out:
1703         return err;
1704 }
1705
1706 /*
1707  *      Helper to unshare the files of the current task.
1708  *      We don't want to expose copy_files internals to
1709  *      the exec layer of the kernel.
1710  */
1711
1712 int unshare_files(struct files_struct **displaced)
1713 {
1714         struct task_struct *task = current;
1715         struct files_struct *copy = NULL;
1716         int error;
1717
1718         error = unshare_fd(CLONE_FILES, &copy);
1719         if (error || !copy) {
1720                 *displaced = NULL;
1721                 return error;
1722         }
1723         *displaced = task->files;
1724         task_lock(task);
1725         task->files = copy;
1726         task_unlock(task);
1727         return 0;
1728 }