Merge sys_clone()/sys_unshare() nsproxy and namespace handling
[linux-2.6.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/smp_lock.h>
18 #include <linux/module.h>
19 #include <linux/vmalloc.h>
20 #include <linux/completion.h>
21 #include <linux/mnt_namespace.h>
22 #include <linux/personality.h>
23 #include <linux/mempolicy.h>
24 #include <linux/sem.h>
25 #include <linux/file.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/fs.h>
30 #include <linux/nsproxy.h>
31 #include <linux/capability.h>
32 #include <linux/cpu.h>
33 #include <linux/cpuset.h>
34 #include <linux/security.h>
35 #include <linux/swap.h>
36 #include <linux/syscalls.h>
37 #include <linux/jiffies.h>
38 #include <linux/futex.h>
39 #include <linux/task_io_accounting_ops.h>
40 #include <linux/rcupdate.h>
41 #include <linux/ptrace.h>
42 #include <linux/mount.h>
43 #include <linux/audit.h>
44 #include <linux/profile.h>
45 #include <linux/rmap.h>
46 #include <linux/acct.h>
47 #include <linux/tsacct_kern.h>
48 #include <linux/cn_proc.h>
49 #include <linux/delayacct.h>
50 #include <linux/taskstats_kern.h>
51 #include <linux/random.h>
52
53 #include <asm/pgtable.h>
54 #include <asm/pgalloc.h>
55 #include <asm/uaccess.h>
56 #include <asm/mmu_context.h>
57 #include <asm/cacheflush.h>
58 #include <asm/tlbflush.h>
59
60 /*
61  * Protected counters by write_lock_irq(&tasklist_lock)
62  */
63 unsigned long total_forks;      /* Handle normal Linux uptimes. */
64 int nr_threads;                 /* The idle threads do not count.. */
65
66 int max_threads;                /* tunable limit on nr_threads */
67
68 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
69
70 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
71
72 int nr_processes(void)
73 {
74         int cpu;
75         int total = 0;
76
77         for_each_online_cpu(cpu)
78                 total += per_cpu(process_counts, cpu);
79
80         return total;
81 }
82
83 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
84 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
85 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
86 static struct kmem_cache *task_struct_cachep;
87 #endif
88
89 /* SLAB cache for signal_struct structures (tsk->signal) */
90 static struct kmem_cache *signal_cachep;
91
92 /* SLAB cache for sighand_struct structures (tsk->sighand) */
93 struct kmem_cache *sighand_cachep;
94
95 /* SLAB cache for files_struct structures (tsk->files) */
96 struct kmem_cache *files_cachep;
97
98 /* SLAB cache for fs_struct structures (tsk->fs) */
99 struct kmem_cache *fs_cachep;
100
101 /* SLAB cache for vm_area_struct structures */
102 struct kmem_cache *vm_area_cachep;
103
104 /* SLAB cache for mm_struct structures (tsk->mm) */
105 static struct kmem_cache *mm_cachep;
106
107 void free_task(struct task_struct *tsk)
108 {
109         free_thread_info(tsk->thread_info);
110         rt_mutex_debug_task_free(tsk);
111         free_task_struct(tsk);
112 }
113 EXPORT_SYMBOL(free_task);
114
115 void __put_task_struct(struct task_struct *tsk)
116 {
117         WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
118         WARN_ON(atomic_read(&tsk->usage));
119         WARN_ON(tsk == current);
120
121         security_task_free(tsk);
122         free_uid(tsk->user);
123         put_group_info(tsk->group_info);
124         delayacct_tsk_free(tsk);
125
126         if (!profile_handoff_task(tsk))
127                 free_task(tsk);
128 }
129
130 void __init fork_init(unsigned long mempages)
131 {
132 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
133 #ifndef ARCH_MIN_TASKALIGN
134 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
135 #endif
136         /* create a slab on which task_structs can be allocated */
137         task_struct_cachep =
138                 kmem_cache_create("task_struct", sizeof(struct task_struct),
139                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
140 #endif
141
142         /*
143          * The default maximum number of threads is set to a safe
144          * value: the thread structures can take up at most half
145          * of memory.
146          */
147         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
148
149         /*
150          * we need to allow at least 20 threads to boot a system
151          */
152         if(max_threads < 20)
153                 max_threads = 20;
154
155         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
156         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
157         init_task.signal->rlim[RLIMIT_SIGPENDING] =
158                 init_task.signal->rlim[RLIMIT_NPROC];
159 }
160
161 static struct task_struct *dup_task_struct(struct task_struct *orig)
162 {
163         struct task_struct *tsk;
164         struct thread_info *ti;
165
166         prepare_to_copy(orig);
167
168         tsk = alloc_task_struct();
169         if (!tsk)
170                 return NULL;
171
172         ti = alloc_thread_info(tsk);
173         if (!ti) {
174                 free_task_struct(tsk);
175                 return NULL;
176         }
177
178         *tsk = *orig;
179         tsk->thread_info = ti;
180         setup_thread_stack(tsk, orig);
181
182 #ifdef CONFIG_CC_STACKPROTECTOR
183         tsk->stack_canary = get_random_int();
184 #endif
185
186         /* One for us, one for whoever does the "release_task()" (usually parent) */
187         atomic_set(&tsk->usage,2);
188         atomic_set(&tsk->fs_excl, 0);
189 #ifdef CONFIG_BLK_DEV_IO_TRACE
190         tsk->btrace_seq = 0;
191 #endif
192         tsk->splice_pipe = NULL;
193         return tsk;
194 }
195
196 #ifdef CONFIG_MMU
197 static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
198 {
199         struct vm_area_struct *mpnt, *tmp, **pprev;
200         struct rb_node **rb_link, *rb_parent;
201         int retval;
202         unsigned long charge;
203         struct mempolicy *pol;
204
205         down_write(&oldmm->mmap_sem);
206         flush_cache_dup_mm(oldmm);
207         /*
208          * Not linked in yet - no deadlock potential:
209          */
210         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
211
212         mm->locked_vm = 0;
213         mm->mmap = NULL;
214         mm->mmap_cache = NULL;
215         mm->free_area_cache = oldmm->mmap_base;
216         mm->cached_hole_size = ~0UL;
217         mm->map_count = 0;
218         cpus_clear(mm->cpu_vm_mask);
219         mm->mm_rb = RB_ROOT;
220         rb_link = &mm->mm_rb.rb_node;
221         rb_parent = NULL;
222         pprev = &mm->mmap;
223
224         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
225                 struct file *file;
226
227                 if (mpnt->vm_flags & VM_DONTCOPY) {
228                         long pages = vma_pages(mpnt);
229                         mm->total_vm -= pages;
230                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
231                                                                 -pages);
232                         continue;
233                 }
234                 charge = 0;
235                 if (mpnt->vm_flags & VM_ACCOUNT) {
236                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
237                         if (security_vm_enough_memory(len))
238                                 goto fail_nomem;
239                         charge = len;
240                 }
241                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
242                 if (!tmp)
243                         goto fail_nomem;
244                 *tmp = *mpnt;
245                 pol = mpol_copy(vma_policy(mpnt));
246                 retval = PTR_ERR(pol);
247                 if (IS_ERR(pol))
248                         goto fail_nomem_policy;
249                 vma_set_policy(tmp, pol);
250                 tmp->vm_flags &= ~VM_LOCKED;
251                 tmp->vm_mm = mm;
252                 tmp->vm_next = NULL;
253                 anon_vma_link(tmp);
254                 file = tmp->vm_file;
255                 if (file) {
256                         struct inode *inode = file->f_path.dentry->d_inode;
257                         get_file(file);
258                         if (tmp->vm_flags & VM_DENYWRITE)
259                                 atomic_dec(&inode->i_writecount);
260       
261                         /* insert tmp into the share list, just after mpnt */
262                         spin_lock(&file->f_mapping->i_mmap_lock);
263                         tmp->vm_truncate_count = mpnt->vm_truncate_count;
264                         flush_dcache_mmap_lock(file->f_mapping);
265                         vma_prio_tree_add(tmp, mpnt);
266                         flush_dcache_mmap_unlock(file->f_mapping);
267                         spin_unlock(&file->f_mapping->i_mmap_lock);
268                 }
269
270                 /*
271                  * Link in the new vma and copy the page table entries.
272                  */
273                 *pprev = tmp;
274                 pprev = &tmp->vm_next;
275
276                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
277                 rb_link = &tmp->vm_rb.rb_right;
278                 rb_parent = &tmp->vm_rb;
279
280                 mm->map_count++;
281                 retval = copy_page_range(mm, oldmm, mpnt);
282
283                 if (tmp->vm_ops && tmp->vm_ops->open)
284                         tmp->vm_ops->open(tmp);
285
286                 if (retval)
287                         goto out;
288         }
289         /* a new mm has just been created */
290         arch_dup_mmap(oldmm, mm);
291         retval = 0;
292 out:
293         up_write(&mm->mmap_sem);
294         flush_tlb_mm(oldmm);
295         up_write(&oldmm->mmap_sem);
296         return retval;
297 fail_nomem_policy:
298         kmem_cache_free(vm_area_cachep, tmp);
299 fail_nomem:
300         retval = -ENOMEM;
301         vm_unacct_memory(charge);
302         goto out;
303 }
304
305 static inline int mm_alloc_pgd(struct mm_struct * mm)
306 {
307         mm->pgd = pgd_alloc(mm);
308         if (unlikely(!mm->pgd))
309                 return -ENOMEM;
310         return 0;
311 }
312
313 static inline void mm_free_pgd(struct mm_struct * mm)
314 {
315         pgd_free(mm->pgd);
316 }
317 #else
318 #define dup_mmap(mm, oldmm)     (0)
319 #define mm_alloc_pgd(mm)        (0)
320 #define mm_free_pgd(mm)
321 #endif /* CONFIG_MMU */
322
323  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
324
325 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
326 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
327
328 #include <linux/init_task.h>
329
330 static struct mm_struct * mm_init(struct mm_struct * mm)
331 {
332         atomic_set(&mm->mm_users, 1);
333         atomic_set(&mm->mm_count, 1);
334         init_rwsem(&mm->mmap_sem);
335         INIT_LIST_HEAD(&mm->mmlist);
336         mm->core_waiters = 0;
337         mm->nr_ptes = 0;
338         set_mm_counter(mm, file_rss, 0);
339         set_mm_counter(mm, anon_rss, 0);
340         spin_lock_init(&mm->page_table_lock);
341         rwlock_init(&mm->ioctx_list_lock);
342         mm->ioctx_list = NULL;
343         mm->free_area_cache = TASK_UNMAPPED_BASE;
344         mm->cached_hole_size = ~0UL;
345
346         if (likely(!mm_alloc_pgd(mm))) {
347                 mm->def_flags = 0;
348                 return mm;
349         }
350         free_mm(mm);
351         return NULL;
352 }
353
354 /*
355  * Allocate and initialize an mm_struct.
356  */
357 struct mm_struct * mm_alloc(void)
358 {
359         struct mm_struct * mm;
360
361         mm = allocate_mm();
362         if (mm) {
363                 memset(mm, 0, sizeof(*mm));
364                 mm = mm_init(mm);
365         }
366         return mm;
367 }
368
369 /*
370  * Called when the last reference to the mm
371  * is dropped: either by a lazy thread or by
372  * mmput. Free the page directory and the mm.
373  */
374 void fastcall __mmdrop(struct mm_struct *mm)
375 {
376         BUG_ON(mm == &init_mm);
377         mm_free_pgd(mm);
378         destroy_context(mm);
379         free_mm(mm);
380 }
381
382 /*
383  * Decrement the use count and release all resources for an mm.
384  */
385 void mmput(struct mm_struct *mm)
386 {
387         might_sleep();
388
389         if (atomic_dec_and_test(&mm->mm_users)) {
390                 exit_aio(mm);
391                 exit_mmap(mm);
392                 if (!list_empty(&mm->mmlist)) {
393                         spin_lock(&mmlist_lock);
394                         list_del(&mm->mmlist);
395                         spin_unlock(&mmlist_lock);
396                 }
397                 put_swap_token(mm);
398                 mmdrop(mm);
399         }
400 }
401 EXPORT_SYMBOL_GPL(mmput);
402
403 /**
404  * get_task_mm - acquire a reference to the task's mm
405  *
406  * Returns %NULL if the task has no mm.  Checks PF_BORROWED_MM (meaning
407  * this kernel workthread has transiently adopted a user mm with use_mm,
408  * to do its AIO) is not set and if so returns a reference to it, after
409  * bumping up the use count.  User must release the mm via mmput()
410  * after use.  Typically used by /proc and ptrace.
411  */
412 struct mm_struct *get_task_mm(struct task_struct *task)
413 {
414         struct mm_struct *mm;
415
416         task_lock(task);
417         mm = task->mm;
418         if (mm) {
419                 if (task->flags & PF_BORROWED_MM)
420                         mm = NULL;
421                 else
422                         atomic_inc(&mm->mm_users);
423         }
424         task_unlock(task);
425         return mm;
426 }
427 EXPORT_SYMBOL_GPL(get_task_mm);
428
429 /* Please note the differences between mmput and mm_release.
430  * mmput is called whenever we stop holding onto a mm_struct,
431  * error success whatever.
432  *
433  * mm_release is called after a mm_struct has been removed
434  * from the current process.
435  *
436  * This difference is important for error handling, when we
437  * only half set up a mm_struct for a new process and need to restore
438  * the old one.  Because we mmput the new mm_struct before
439  * restoring the old one. . .
440  * Eric Biederman 10 January 1998
441  */
442 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
443 {
444         struct completion *vfork_done = tsk->vfork_done;
445
446         /* Get rid of any cached register state */
447         deactivate_mm(tsk, mm);
448
449         /* notify parent sleeping on vfork() */
450         if (vfork_done) {
451                 tsk->vfork_done = NULL;
452                 complete(vfork_done);
453         }
454
455         /*
456          * If we're exiting normally, clear a user-space tid field if
457          * requested.  We leave this alone when dying by signal, to leave
458          * the value intact in a core dump, and to save the unnecessary
459          * trouble otherwise.  Userland only wants this done for a sys_exit.
460          */
461         if (tsk->clear_child_tid
462             && !(tsk->flags & PF_SIGNALED)
463             && atomic_read(&mm->mm_users) > 1) {
464                 u32 __user * tidptr = tsk->clear_child_tid;
465                 tsk->clear_child_tid = NULL;
466
467                 /*
468                  * We don't check the error code - if userspace has
469                  * not set up a proper pointer then tough luck.
470                  */
471                 put_user(0, tidptr);
472                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
473         }
474 }
475
476 /*
477  * Allocate a new mm structure and copy contents from the
478  * mm structure of the passed in task structure.
479  */
480 static struct mm_struct *dup_mm(struct task_struct *tsk)
481 {
482         struct mm_struct *mm, *oldmm = current->mm;
483         int err;
484
485         if (!oldmm)
486                 return NULL;
487
488         mm = allocate_mm();
489         if (!mm)
490                 goto fail_nomem;
491
492         memcpy(mm, oldmm, sizeof(*mm));
493
494         /* Initializing for Swap token stuff */
495         mm->token_priority = 0;
496         mm->last_interval = 0;
497
498         if (!mm_init(mm))
499                 goto fail_nomem;
500
501         if (init_new_context(tsk, mm))
502                 goto fail_nocontext;
503
504         err = dup_mmap(mm, oldmm);
505         if (err)
506                 goto free_pt;
507
508         mm->hiwater_rss = get_mm_rss(mm);
509         mm->hiwater_vm = mm->total_vm;
510
511         return mm;
512
513 free_pt:
514         mmput(mm);
515
516 fail_nomem:
517         return NULL;
518
519 fail_nocontext:
520         /*
521          * If init_new_context() failed, we cannot use mmput() to free the mm
522          * because it calls destroy_context()
523          */
524         mm_free_pgd(mm);
525         free_mm(mm);
526         return NULL;
527 }
528
529 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
530 {
531         struct mm_struct * mm, *oldmm;
532         int retval;
533
534         tsk->min_flt = tsk->maj_flt = 0;
535         tsk->nvcsw = tsk->nivcsw = 0;
536
537         tsk->mm = NULL;
538         tsk->active_mm = NULL;
539
540         /*
541          * Are we cloning a kernel thread?
542          *
543          * We need to steal a active VM for that..
544          */
545         oldmm = current->mm;
546         if (!oldmm)
547                 return 0;
548
549         if (clone_flags & CLONE_VM) {
550                 atomic_inc(&oldmm->mm_users);
551                 mm = oldmm;
552                 goto good_mm;
553         }
554
555         retval = -ENOMEM;
556         mm = dup_mm(tsk);
557         if (!mm)
558                 goto fail_nomem;
559
560 good_mm:
561         /* Initializing for Swap token stuff */
562         mm->token_priority = 0;
563         mm->last_interval = 0;
564
565         tsk->mm = mm;
566         tsk->active_mm = mm;
567         return 0;
568
569 fail_nomem:
570         return retval;
571 }
572
573 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
574 {
575         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
576         /* We don't need to lock fs - think why ;-) */
577         if (fs) {
578                 atomic_set(&fs->count, 1);
579                 rwlock_init(&fs->lock);
580                 fs->umask = old->umask;
581                 read_lock(&old->lock);
582                 fs->rootmnt = mntget(old->rootmnt);
583                 fs->root = dget(old->root);
584                 fs->pwdmnt = mntget(old->pwdmnt);
585                 fs->pwd = dget(old->pwd);
586                 if (old->altroot) {
587                         fs->altrootmnt = mntget(old->altrootmnt);
588                         fs->altroot = dget(old->altroot);
589                 } else {
590                         fs->altrootmnt = NULL;
591                         fs->altroot = NULL;
592                 }
593                 read_unlock(&old->lock);
594         }
595         return fs;
596 }
597
598 struct fs_struct *copy_fs_struct(struct fs_struct *old)
599 {
600         return __copy_fs_struct(old);
601 }
602
603 EXPORT_SYMBOL_GPL(copy_fs_struct);
604
605 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
606 {
607         if (clone_flags & CLONE_FS) {
608                 atomic_inc(&current->fs->count);
609                 return 0;
610         }
611         tsk->fs = __copy_fs_struct(current->fs);
612         if (!tsk->fs)
613                 return -ENOMEM;
614         return 0;
615 }
616
617 static int count_open_files(struct fdtable *fdt)
618 {
619         int size = fdt->max_fds;
620         int i;
621
622         /* Find the last open fd */
623         for (i = size/(8*sizeof(long)); i > 0; ) {
624                 if (fdt->open_fds->fds_bits[--i])
625                         break;
626         }
627         i = (i+1) * 8 * sizeof(long);
628         return i;
629 }
630
631 static struct files_struct *alloc_files(void)
632 {
633         struct files_struct *newf;
634         struct fdtable *fdt;
635
636         newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
637         if (!newf)
638                 goto out;
639
640         atomic_set(&newf->count, 1);
641
642         spin_lock_init(&newf->file_lock);
643         newf->next_fd = 0;
644         fdt = &newf->fdtab;
645         fdt->max_fds = NR_OPEN_DEFAULT;
646         fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
647         fdt->open_fds = (fd_set *)&newf->open_fds_init;
648         fdt->fd = &newf->fd_array[0];
649         INIT_RCU_HEAD(&fdt->rcu);
650         fdt->next = NULL;
651         rcu_assign_pointer(newf->fdt, fdt);
652 out:
653         return newf;
654 }
655
656 /*
657  * Allocate a new files structure and copy contents from the
658  * passed in files structure.
659  * errorp will be valid only when the returned files_struct is NULL.
660  */
661 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
662 {
663         struct files_struct *newf;
664         struct file **old_fds, **new_fds;
665         int open_files, size, i;
666         struct fdtable *old_fdt, *new_fdt;
667
668         *errorp = -ENOMEM;
669         newf = alloc_files();
670         if (!newf)
671                 goto out;
672
673         spin_lock(&oldf->file_lock);
674         old_fdt = files_fdtable(oldf);
675         new_fdt = files_fdtable(newf);
676         open_files = count_open_files(old_fdt);
677
678         /*
679          * Check whether we need to allocate a larger fd array and fd set.
680          * Note: we're not a clone task, so the open count won't change.
681          */
682         if (open_files > new_fdt->max_fds) {
683                 new_fdt->max_fds = 0;
684                 spin_unlock(&oldf->file_lock);
685                 spin_lock(&newf->file_lock);
686                 *errorp = expand_files(newf, open_files-1);
687                 spin_unlock(&newf->file_lock);
688                 if (*errorp < 0)
689                         goto out_release;
690                 new_fdt = files_fdtable(newf);
691                 /*
692                  * Reacquire the oldf lock and a pointer to its fd table
693                  * who knows it may have a new bigger fd table. We need
694                  * the latest pointer.
695                  */
696                 spin_lock(&oldf->file_lock);
697                 old_fdt = files_fdtable(oldf);
698         }
699
700         old_fds = old_fdt->fd;
701         new_fds = new_fdt->fd;
702
703         memcpy(new_fdt->open_fds->fds_bits,
704                 old_fdt->open_fds->fds_bits, open_files/8);
705         memcpy(new_fdt->close_on_exec->fds_bits,
706                 old_fdt->close_on_exec->fds_bits, open_files/8);
707
708         for (i = open_files; i != 0; i--) {
709                 struct file *f = *old_fds++;
710                 if (f) {
711                         get_file(f);
712                 } else {
713                         /*
714                          * The fd may be claimed in the fd bitmap but not yet
715                          * instantiated in the files array if a sibling thread
716                          * is partway through open().  So make sure that this
717                          * fd is available to the new process.
718                          */
719                         FD_CLR(open_files - i, new_fdt->open_fds);
720                 }
721                 rcu_assign_pointer(*new_fds++, f);
722         }
723         spin_unlock(&oldf->file_lock);
724
725         /* compute the remainder to be cleared */
726         size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
727
728         /* This is long word aligned thus could use a optimized version */ 
729         memset(new_fds, 0, size); 
730
731         if (new_fdt->max_fds > open_files) {
732                 int left = (new_fdt->max_fds-open_files)/8;
733                 int start = open_files / (8 * sizeof(unsigned long));
734
735                 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
736                 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
737         }
738
739         return newf;
740
741 out_release:
742         kmem_cache_free(files_cachep, newf);
743 out:
744         return NULL;
745 }
746
747 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
748 {
749         struct files_struct *oldf, *newf;
750         int error = 0;
751
752         /*
753          * A background process may not have any files ...
754          */
755         oldf = current->files;
756         if (!oldf)
757                 goto out;
758
759         if (clone_flags & CLONE_FILES) {
760                 atomic_inc(&oldf->count);
761                 goto out;
762         }
763
764         /*
765          * Note: we may be using current for both targets (See exec.c)
766          * This works because we cache current->files (old) as oldf. Don't
767          * break this.
768          */
769         tsk->files = NULL;
770         newf = dup_fd(oldf, &error);
771         if (!newf)
772                 goto out;
773
774         tsk->files = newf;
775         error = 0;
776 out:
777         return error;
778 }
779
780 /*
781  *      Helper to unshare the files of the current task.
782  *      We don't want to expose copy_files internals to
783  *      the exec layer of the kernel.
784  */
785
786 int unshare_files(void)
787 {
788         struct files_struct *files  = current->files;
789         int rc;
790
791         BUG_ON(!files);
792
793         /* This can race but the race causes us to copy when we don't
794            need to and drop the copy */
795         if(atomic_read(&files->count) == 1)
796         {
797                 atomic_inc(&files->count);
798                 return 0;
799         }
800         rc = copy_files(0, current);
801         if(rc)
802                 current->files = files;
803         return rc;
804 }
805
806 EXPORT_SYMBOL(unshare_files);
807
808 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
809 {
810         struct sighand_struct *sig;
811
812         if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
813                 atomic_inc(&current->sighand->count);
814                 return 0;
815         }
816         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
817         rcu_assign_pointer(tsk->sighand, sig);
818         if (!sig)
819                 return -ENOMEM;
820         atomic_set(&sig->count, 1);
821         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
822         return 0;
823 }
824
825 void __cleanup_sighand(struct sighand_struct *sighand)
826 {
827         if (atomic_dec_and_test(&sighand->count))
828                 kmem_cache_free(sighand_cachep, sighand);
829 }
830
831 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
832 {
833         struct signal_struct *sig;
834         int ret;
835
836         if (clone_flags & CLONE_THREAD) {
837                 atomic_inc(&current->signal->count);
838                 atomic_inc(&current->signal->live);
839                 return 0;
840         }
841         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
842         tsk->signal = sig;
843         if (!sig)
844                 return -ENOMEM;
845
846         ret = copy_thread_group_keys(tsk);
847         if (ret < 0) {
848                 kmem_cache_free(signal_cachep, sig);
849                 return ret;
850         }
851
852         atomic_set(&sig->count, 1);
853         atomic_set(&sig->live, 1);
854         init_waitqueue_head(&sig->wait_chldexit);
855         sig->flags = 0;
856         sig->group_exit_code = 0;
857         sig->group_exit_task = NULL;
858         sig->group_stop_count = 0;
859         sig->curr_target = NULL;
860         init_sigpending(&sig->shared_pending);
861         INIT_LIST_HEAD(&sig->posix_timers);
862
863         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
864         sig->it_real_incr.tv64 = 0;
865         sig->real_timer.function = it_real_fn;
866         sig->tsk = tsk;
867
868         sig->it_virt_expires = cputime_zero;
869         sig->it_virt_incr = cputime_zero;
870         sig->it_prof_expires = cputime_zero;
871         sig->it_prof_incr = cputime_zero;
872
873         sig->leader = 0;        /* session leadership doesn't inherit */
874         sig->tty_old_pgrp = NULL;
875
876         sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
877         sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
878         sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
879         sig->sched_time = 0;
880         INIT_LIST_HEAD(&sig->cpu_timers[0]);
881         INIT_LIST_HEAD(&sig->cpu_timers[1]);
882         INIT_LIST_HEAD(&sig->cpu_timers[2]);
883         taskstats_tgid_init(sig);
884
885         task_lock(current->group_leader);
886         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
887         task_unlock(current->group_leader);
888
889         if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
890                 /*
891                  * New sole thread in the process gets an expiry time
892                  * of the whole CPU time limit.
893                  */
894                 tsk->it_prof_expires =
895                         secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
896         }
897         acct_init_pacct(&sig->pacct);
898
899         return 0;
900 }
901
902 void __cleanup_signal(struct signal_struct *sig)
903 {
904         exit_thread_group_keys(sig);
905         kmem_cache_free(signal_cachep, sig);
906 }
907
908 static inline void cleanup_signal(struct task_struct *tsk)
909 {
910         struct signal_struct *sig = tsk->signal;
911
912         atomic_dec(&sig->live);
913
914         if (atomic_dec_and_test(&sig->count))
915                 __cleanup_signal(sig);
916 }
917
918 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
919 {
920         unsigned long new_flags = p->flags;
921
922         new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
923         new_flags |= PF_FORKNOEXEC;
924         if (!(clone_flags & CLONE_PTRACE))
925                 p->ptrace = 0;
926         p->flags = new_flags;
927 }
928
929 asmlinkage long sys_set_tid_address(int __user *tidptr)
930 {
931         current->clear_child_tid = tidptr;
932
933         return current->pid;
934 }
935
936 static inline void rt_mutex_init_task(struct task_struct *p)
937 {
938         spin_lock_init(&p->pi_lock);
939 #ifdef CONFIG_RT_MUTEXES
940         plist_head_init(&p->pi_waiters, &p->pi_lock);
941         p->pi_blocked_on = NULL;
942 #endif
943 }
944
945 /*
946  * This creates a new process as a copy of the old one,
947  * but does not actually start it yet.
948  *
949  * It copies the registers, and all the appropriate
950  * parts of the process environment (as per the clone
951  * flags). The actual kick-off is left to the caller.
952  */
953 static struct task_struct *copy_process(unsigned long clone_flags,
954                                         unsigned long stack_start,
955                                         struct pt_regs *regs,
956                                         unsigned long stack_size,
957                                         int __user *parent_tidptr,
958                                         int __user *child_tidptr,
959                                         int pid)
960 {
961         int retval;
962         struct task_struct *p = NULL;
963
964         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
965                 return ERR_PTR(-EINVAL);
966
967         /*
968          * Thread groups must share signals as well, and detached threads
969          * can only be started up within the thread group.
970          */
971         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
972                 return ERR_PTR(-EINVAL);
973
974         /*
975          * Shared signal handlers imply shared VM. By way of the above,
976          * thread groups also imply shared VM. Blocking this case allows
977          * for various simplifications in other code.
978          */
979         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
980                 return ERR_PTR(-EINVAL);
981
982         retval = security_task_create(clone_flags);
983         if (retval)
984                 goto fork_out;
985
986         retval = -ENOMEM;
987         p = dup_task_struct(current);
988         if (!p)
989                 goto fork_out;
990
991         rt_mutex_init_task(p);
992
993 #ifdef CONFIG_TRACE_IRQFLAGS
994         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
995         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
996 #endif
997         retval = -EAGAIN;
998         if (atomic_read(&p->user->processes) >=
999                         p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
1000                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1001                                 p->user != &root_user)
1002                         goto bad_fork_free;
1003         }
1004
1005         atomic_inc(&p->user->__count);
1006         atomic_inc(&p->user->processes);
1007         get_group_info(p->group_info);
1008
1009         /*
1010          * If multiple threads are within copy_process(), then this check
1011          * triggers too late. This doesn't hurt, the check is only there
1012          * to stop root fork bombs.
1013          */
1014         if (nr_threads >= max_threads)
1015                 goto bad_fork_cleanup_count;
1016
1017         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1018                 goto bad_fork_cleanup_count;
1019
1020         if (p->binfmt && !try_module_get(p->binfmt->module))
1021                 goto bad_fork_cleanup_put_domain;
1022
1023         p->did_exec = 0;
1024         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1025         copy_flags(clone_flags, p);
1026         p->pid = pid;
1027         retval = -EFAULT;
1028         if (clone_flags & CLONE_PARENT_SETTID)
1029                 if (put_user(p->pid, parent_tidptr))
1030                         goto bad_fork_cleanup_delays_binfmt;
1031
1032         INIT_LIST_HEAD(&p->children);
1033         INIT_LIST_HEAD(&p->sibling);
1034         p->vfork_done = NULL;
1035         spin_lock_init(&p->alloc_lock);
1036
1037         clear_tsk_thread_flag(p, TIF_SIGPENDING);
1038         init_sigpending(&p->pending);
1039
1040         p->utime = cputime_zero;
1041         p->stime = cputime_zero;
1042         p->sched_time = 0;
1043 #ifdef CONFIG_TASK_XACCT
1044         p->rchar = 0;           /* I/O counter: bytes read */
1045         p->wchar = 0;           /* I/O counter: bytes written */
1046         p->syscr = 0;           /* I/O counter: read syscalls */
1047         p->syscw = 0;           /* I/O counter: write syscalls */
1048 #endif
1049         task_io_accounting_init(p);
1050         acct_clear_integrals(p);
1051
1052         p->it_virt_expires = cputime_zero;
1053         p->it_prof_expires = cputime_zero;
1054         p->it_sched_expires = 0;
1055         INIT_LIST_HEAD(&p->cpu_timers[0]);
1056         INIT_LIST_HEAD(&p->cpu_timers[1]);
1057         INIT_LIST_HEAD(&p->cpu_timers[2]);
1058
1059         p->lock_depth = -1;             /* -1 = no lock */
1060         do_posix_clock_monotonic_gettime(&p->start_time);
1061         p->security = NULL;
1062         p->io_context = NULL;
1063         p->io_wait = NULL;
1064         p->audit_context = NULL;
1065         cpuset_fork(p);
1066 #ifdef CONFIG_NUMA
1067         p->mempolicy = mpol_copy(p->mempolicy);
1068         if (IS_ERR(p->mempolicy)) {
1069                 retval = PTR_ERR(p->mempolicy);
1070                 p->mempolicy = NULL;
1071                 goto bad_fork_cleanup_cpuset;
1072         }
1073         mpol_fix_fork_child_flag(p);
1074 #endif
1075 #ifdef CONFIG_TRACE_IRQFLAGS
1076         p->irq_events = 0;
1077 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1078         p->hardirqs_enabled = 1;
1079 #else
1080         p->hardirqs_enabled = 0;
1081 #endif
1082         p->hardirq_enable_ip = 0;
1083         p->hardirq_enable_event = 0;
1084         p->hardirq_disable_ip = _THIS_IP_;
1085         p->hardirq_disable_event = 0;
1086         p->softirqs_enabled = 1;
1087         p->softirq_enable_ip = _THIS_IP_;
1088         p->softirq_enable_event = 0;
1089         p->softirq_disable_ip = 0;
1090         p->softirq_disable_event = 0;
1091         p->hardirq_context = 0;
1092         p->softirq_context = 0;
1093 #endif
1094 #ifdef CONFIG_LOCKDEP
1095         p->lockdep_depth = 0; /* no locks held yet */
1096         p->curr_chain_key = 0;
1097         p->lockdep_recursion = 0;
1098 #endif
1099
1100 #ifdef CONFIG_DEBUG_MUTEXES
1101         p->blocked_on = NULL; /* not blocked yet */
1102 #endif
1103
1104         p->tgid = p->pid;
1105         if (clone_flags & CLONE_THREAD)
1106                 p->tgid = current->tgid;
1107
1108         if ((retval = security_task_alloc(p)))
1109                 goto bad_fork_cleanup_policy;
1110         if ((retval = audit_alloc(p)))
1111                 goto bad_fork_cleanup_security;
1112         /* copy all the process information */
1113         if ((retval = copy_semundo(clone_flags, p)))
1114                 goto bad_fork_cleanup_audit;
1115         if ((retval = copy_files(clone_flags, p)))
1116                 goto bad_fork_cleanup_semundo;
1117         if ((retval = copy_fs(clone_flags, p)))
1118                 goto bad_fork_cleanup_files;
1119         if ((retval = copy_sighand(clone_flags, p)))
1120                 goto bad_fork_cleanup_fs;
1121         if ((retval = copy_signal(clone_flags, p)))
1122                 goto bad_fork_cleanup_sighand;
1123         if ((retval = copy_mm(clone_flags, p)))
1124                 goto bad_fork_cleanup_signal;
1125         if ((retval = copy_keys(clone_flags, p)))
1126                 goto bad_fork_cleanup_mm;
1127         if ((retval = copy_namespaces(clone_flags, p)))
1128                 goto bad_fork_cleanup_keys;
1129         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1130         if (retval)
1131                 goto bad_fork_cleanup_namespaces;
1132
1133         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1134         /*
1135          * Clear TID on mm_release()?
1136          */
1137         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1138         p->robust_list = NULL;
1139 #ifdef CONFIG_COMPAT
1140         p->compat_robust_list = NULL;
1141 #endif
1142         INIT_LIST_HEAD(&p->pi_state_list);
1143         p->pi_state_cache = NULL;
1144
1145         /*
1146          * sigaltstack should be cleared when sharing the same VM
1147          */
1148         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1149                 p->sas_ss_sp = p->sas_ss_size = 0;
1150
1151         /*
1152          * Syscall tracing should be turned off in the child regardless
1153          * of CLONE_PTRACE.
1154          */
1155         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1156 #ifdef TIF_SYSCALL_EMU
1157         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1158 #endif
1159
1160         /* Our parent execution domain becomes current domain
1161            These must match for thread signalling to apply */
1162         p->parent_exec_id = p->self_exec_id;
1163
1164         /* ok, now we should be set up.. */
1165         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1166         p->pdeath_signal = 0;
1167         p->exit_state = 0;
1168
1169         /*
1170          * Ok, make it visible to the rest of the system.
1171          * We dont wake it up yet.
1172          */
1173         p->group_leader = p;
1174         INIT_LIST_HEAD(&p->thread_group);
1175         INIT_LIST_HEAD(&p->ptrace_children);
1176         INIT_LIST_HEAD(&p->ptrace_list);
1177
1178         /* Perform scheduler related setup. Assign this task to a CPU. */
1179         sched_fork(p, clone_flags);
1180
1181         /* Need tasklist lock for parent etc handling! */
1182         write_lock_irq(&tasklist_lock);
1183
1184         /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
1185         p->ioprio = current->ioprio;
1186
1187         /*
1188          * The task hasn't been attached yet, so its cpus_allowed mask will
1189          * not be changed, nor will its assigned CPU.
1190          *
1191          * The cpus_allowed mask of the parent may have changed after it was
1192          * copied first time - so re-copy it here, then check the child's CPU
1193          * to ensure it is on a valid CPU (and if not, just force it back to
1194          * parent's CPU). This avoids alot of nasty races.
1195          */
1196         p->cpus_allowed = current->cpus_allowed;
1197         if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1198                         !cpu_online(task_cpu(p))))
1199                 set_task_cpu(p, smp_processor_id());
1200
1201         /* CLONE_PARENT re-uses the old parent */
1202         if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1203                 p->real_parent = current->real_parent;
1204         else
1205                 p->real_parent = current;
1206         p->parent = p->real_parent;
1207
1208         spin_lock(&current->sighand->siglock);
1209
1210         /*
1211          * Process group and session signals need to be delivered to just the
1212          * parent before the fork or both the parent and the child after the
1213          * fork. Restart if a signal comes in before we add the new process to
1214          * it's process group.
1215          * A fatal signal pending means that current will exit, so the new
1216          * thread can't slip out of an OOM kill (or normal SIGKILL).
1217          */
1218         recalc_sigpending();
1219         if (signal_pending(current)) {
1220                 spin_unlock(&current->sighand->siglock);
1221                 write_unlock_irq(&tasklist_lock);
1222                 retval = -ERESTARTNOINTR;
1223                 goto bad_fork_cleanup_namespaces;
1224         }
1225
1226         if (clone_flags & CLONE_THREAD) {
1227                 p->group_leader = current->group_leader;
1228                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1229
1230                 if (!cputime_eq(current->signal->it_virt_expires,
1231                                 cputime_zero) ||
1232                     !cputime_eq(current->signal->it_prof_expires,
1233                                 cputime_zero) ||
1234                     current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1235                     !list_empty(&current->signal->cpu_timers[0]) ||
1236                     !list_empty(&current->signal->cpu_timers[1]) ||
1237                     !list_empty(&current->signal->cpu_timers[2])) {
1238                         /*
1239                          * Have child wake up on its first tick to check
1240                          * for process CPU timers.
1241                          */
1242                         p->it_prof_expires = jiffies_to_cputime(1);
1243                 }
1244         }
1245
1246         if (likely(p->pid)) {
1247                 add_parent(p);
1248                 if (unlikely(p->ptrace & PT_PTRACED))
1249                         __ptrace_link(p, current->parent);
1250
1251                 if (thread_group_leader(p)) {
1252                         p->signal->tty = current->signal->tty;
1253                         p->signal->pgrp = process_group(current);
1254                         set_signal_session(p->signal, process_session(current));
1255                         attach_pid(p, PIDTYPE_PGID, process_group(p));
1256                         attach_pid(p, PIDTYPE_SID, process_session(p));
1257
1258                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1259                         __get_cpu_var(process_counts)++;
1260                 }
1261                 attach_pid(p, PIDTYPE_PID, p->pid);
1262                 nr_threads++;
1263         }
1264
1265         total_forks++;
1266         spin_unlock(&current->sighand->siglock);
1267         write_unlock_irq(&tasklist_lock);
1268         proc_fork_connector(p);
1269         return p;
1270
1271 bad_fork_cleanup_namespaces:
1272         exit_task_namespaces(p);
1273 bad_fork_cleanup_keys:
1274         exit_keys(p);
1275 bad_fork_cleanup_mm:
1276         if (p->mm)
1277                 mmput(p->mm);
1278 bad_fork_cleanup_signal:
1279         cleanup_signal(p);
1280 bad_fork_cleanup_sighand:
1281         __cleanup_sighand(p->sighand);
1282 bad_fork_cleanup_fs:
1283         exit_fs(p); /* blocking */
1284 bad_fork_cleanup_files:
1285         exit_files(p); /* blocking */
1286 bad_fork_cleanup_semundo:
1287         exit_sem(p);
1288 bad_fork_cleanup_audit:
1289         audit_free(p);
1290 bad_fork_cleanup_security:
1291         security_task_free(p);
1292 bad_fork_cleanup_policy:
1293 #ifdef CONFIG_NUMA
1294         mpol_free(p->mempolicy);
1295 bad_fork_cleanup_cpuset:
1296 #endif
1297         cpuset_exit(p);
1298 bad_fork_cleanup_delays_binfmt:
1299         delayacct_tsk_free(p);
1300         if (p->binfmt)
1301                 module_put(p->binfmt->module);
1302 bad_fork_cleanup_put_domain:
1303         module_put(task_thread_info(p)->exec_domain->module);
1304 bad_fork_cleanup_count:
1305         put_group_info(p->group_info);
1306         atomic_dec(&p->user->processes);
1307         free_uid(p->user);
1308 bad_fork_free:
1309         free_task(p);
1310 fork_out:
1311         return ERR_PTR(retval);
1312 }
1313
1314 noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1315 {
1316         memset(regs, 0, sizeof(struct pt_regs));
1317         return regs;
1318 }
1319
1320 struct task_struct * __cpuinit fork_idle(int cpu)
1321 {
1322         struct task_struct *task;
1323         struct pt_regs regs;
1324
1325         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1326         if (!IS_ERR(task))
1327                 init_idle(task, cpu);
1328
1329         return task;
1330 }
1331
1332 static inline int fork_traceflag (unsigned clone_flags)
1333 {
1334         if (clone_flags & CLONE_UNTRACED)
1335                 return 0;
1336         else if (clone_flags & CLONE_VFORK) {
1337                 if (current->ptrace & PT_TRACE_VFORK)
1338                         return PTRACE_EVENT_VFORK;
1339         } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1340                 if (current->ptrace & PT_TRACE_CLONE)
1341                         return PTRACE_EVENT_CLONE;
1342         } else if (current->ptrace & PT_TRACE_FORK)
1343                 return PTRACE_EVENT_FORK;
1344
1345         return 0;
1346 }
1347
1348 /*
1349  *  Ok, this is the main fork-routine.
1350  *
1351  * It copies the process, and if successful kick-starts
1352  * it and waits for it to finish using the VM if required.
1353  */
1354 long do_fork(unsigned long clone_flags,
1355               unsigned long stack_start,
1356               struct pt_regs *regs,
1357               unsigned long stack_size,
1358               int __user *parent_tidptr,
1359               int __user *child_tidptr)
1360 {
1361         struct task_struct *p;
1362         int trace = 0;
1363         struct pid *pid = alloc_pid();
1364         long nr;
1365
1366         if (!pid)
1367                 return -EAGAIN;
1368         nr = pid->nr;
1369         if (unlikely(current->ptrace)) {
1370                 trace = fork_traceflag (clone_flags);
1371                 if (trace)
1372                         clone_flags |= CLONE_PTRACE;
1373         }
1374
1375         p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
1376         /*
1377          * Do this prior waking up the new thread - the thread pointer
1378          * might get invalid after that point, if the thread exits quickly.
1379          */
1380         if (!IS_ERR(p)) {
1381                 struct completion vfork;
1382
1383                 if (clone_flags & CLONE_VFORK) {
1384                         p->vfork_done = &vfork;
1385                         init_completion(&vfork);
1386                 }
1387
1388                 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1389                         /*
1390                          * We'll start up with an immediate SIGSTOP.
1391                          */
1392                         sigaddset(&p->pending.signal, SIGSTOP);
1393                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1394                 }
1395
1396                 if (!(clone_flags & CLONE_STOPPED))
1397                         wake_up_new_task(p, clone_flags);
1398                 else
1399                         p->state = TASK_STOPPED;
1400
1401                 if (unlikely (trace)) {
1402                         current->ptrace_message = nr;
1403                         ptrace_notify ((trace << 8) | SIGTRAP);
1404                 }
1405
1406                 if (clone_flags & CLONE_VFORK) {
1407                         wait_for_completion(&vfork);
1408                         if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1409                                 current->ptrace_message = nr;
1410                                 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1411                         }
1412                 }
1413         } else {
1414                 free_pid(pid);
1415                 nr = PTR_ERR(p);
1416         }
1417         return nr;
1418 }
1419
1420 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1421 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1422 #endif
1423
1424 static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags)
1425 {
1426         struct sighand_struct *sighand = data;
1427
1428         if (flags & SLAB_CTOR_CONSTRUCTOR)
1429                 spin_lock_init(&sighand->siglock);
1430 }
1431
1432 void __init proc_caches_init(void)
1433 {
1434         sighand_cachep = kmem_cache_create("sighand_cache",
1435                         sizeof(struct sighand_struct), 0,
1436                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1437                         sighand_ctor, NULL);
1438         signal_cachep = kmem_cache_create("signal_cache",
1439                         sizeof(struct signal_struct), 0,
1440                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1441         files_cachep = kmem_cache_create("files_cache", 
1442                         sizeof(struct files_struct), 0,
1443                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1444         fs_cachep = kmem_cache_create("fs_cache", 
1445                         sizeof(struct fs_struct), 0,
1446                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1447         vm_area_cachep = kmem_cache_create("vm_area_struct",
1448                         sizeof(struct vm_area_struct), 0,
1449                         SLAB_PANIC, NULL, NULL);
1450         mm_cachep = kmem_cache_create("mm_struct",
1451                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1452                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1453 }
1454
1455
1456 /*
1457  * Check constraints on flags passed to the unshare system call and
1458  * force unsharing of additional process context as appropriate.
1459  */
1460 static inline void check_unshare_flags(unsigned long *flags_ptr)
1461 {
1462         /*
1463          * If unsharing a thread from a thread group, must also
1464          * unshare vm.
1465          */
1466         if (*flags_ptr & CLONE_THREAD)
1467                 *flags_ptr |= CLONE_VM;
1468
1469         /*
1470          * If unsharing vm, must also unshare signal handlers.
1471          */
1472         if (*flags_ptr & CLONE_VM)
1473                 *flags_ptr |= CLONE_SIGHAND;
1474
1475         /*
1476          * If unsharing signal handlers and the task was created
1477          * using CLONE_THREAD, then must unshare the thread
1478          */
1479         if ((*flags_ptr & CLONE_SIGHAND) &&
1480             (atomic_read(&current->signal->count) > 1))
1481                 *flags_ptr |= CLONE_THREAD;
1482
1483         /*
1484          * If unsharing namespace, must also unshare filesystem information.
1485          */
1486         if (*flags_ptr & CLONE_NEWNS)
1487                 *flags_ptr |= CLONE_FS;
1488 }
1489
1490 /*
1491  * Unsharing of tasks created with CLONE_THREAD is not supported yet
1492  */
1493 static int unshare_thread(unsigned long unshare_flags)
1494 {
1495         if (unshare_flags & CLONE_THREAD)
1496                 return -EINVAL;
1497
1498         return 0;
1499 }
1500
1501 /*
1502  * Unshare the filesystem structure if it is being shared
1503  */
1504 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1505 {
1506         struct fs_struct *fs = current->fs;
1507
1508         if ((unshare_flags & CLONE_FS) &&
1509             (fs && atomic_read(&fs->count) > 1)) {
1510                 *new_fsp = __copy_fs_struct(current->fs);
1511                 if (!*new_fsp)
1512                         return -ENOMEM;
1513         }
1514
1515         return 0;
1516 }
1517
1518 /*
1519  * Unsharing of sighand is not supported yet
1520  */
1521 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1522 {
1523         struct sighand_struct *sigh = current->sighand;
1524
1525         if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
1526                 return -EINVAL;
1527         else
1528                 return 0;
1529 }
1530
1531 /*
1532  * Unshare vm if it is being shared
1533  */
1534 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1535 {
1536         struct mm_struct *mm = current->mm;
1537
1538         if ((unshare_flags & CLONE_VM) &&
1539             (mm && atomic_read(&mm->mm_users) > 1)) {
1540                 return -EINVAL;
1541         }
1542
1543         return 0;
1544 }
1545
1546 /*
1547  * Unshare file descriptor table if it is being shared
1548  */
1549 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1550 {
1551         struct files_struct *fd = current->files;
1552         int error = 0;
1553
1554         if ((unshare_flags & CLONE_FILES) &&
1555             (fd && atomic_read(&fd->count) > 1)) {
1556                 *new_fdp = dup_fd(fd, &error);
1557                 if (!*new_fdp)
1558                         return error;
1559         }
1560
1561         return 0;
1562 }
1563
1564 /*
1565  * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1566  * supported yet
1567  */
1568 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1569 {
1570         if (unshare_flags & CLONE_SYSVSEM)
1571                 return -EINVAL;
1572
1573         return 0;
1574 }
1575
1576 /*
1577  * unshare allows a process to 'unshare' part of the process
1578  * context which was originally shared using clone.  copy_*
1579  * functions used by do_fork() cannot be used here directly
1580  * because they modify an inactive task_struct that is being
1581  * constructed. Here we are modifying the current, active,
1582  * task_struct.
1583  */
1584 asmlinkage long sys_unshare(unsigned long unshare_flags)
1585 {
1586         int err = 0;
1587         struct fs_struct *fs, *new_fs = NULL;
1588         struct sighand_struct *new_sigh = NULL;
1589         struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1590         struct files_struct *fd, *new_fd = NULL;
1591         struct sem_undo_list *new_ulist = NULL;
1592         struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
1593
1594         check_unshare_flags(&unshare_flags);
1595
1596         /* Return -EINVAL for all unsupported flags */
1597         err = -EINVAL;
1598         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1599                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1600                                 CLONE_NEWUTS|CLONE_NEWIPC))
1601                 goto bad_unshare_out;
1602
1603         if ((err = unshare_thread(unshare_flags)))
1604                 goto bad_unshare_out;
1605         if ((err = unshare_fs(unshare_flags, &new_fs)))
1606                 goto bad_unshare_cleanup_thread;
1607         if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1608                 goto bad_unshare_cleanup_fs;
1609         if ((err = unshare_vm(unshare_flags, &new_mm)))
1610                 goto bad_unshare_cleanup_sigh;
1611         if ((err = unshare_fd(unshare_flags, &new_fd)))
1612                 goto bad_unshare_cleanup_vm;
1613         if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1614                 goto bad_unshare_cleanup_fd;
1615         if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1616                         new_fs)))
1617                 goto bad_unshare_cleanup_semundo;
1618
1619         if (new_fs ||  new_mm || new_fd || new_ulist || new_nsproxy) {
1620
1621                 task_lock(current);
1622
1623                 if (new_nsproxy) {
1624                         old_nsproxy = current->nsproxy;
1625                         current->nsproxy = new_nsproxy;
1626                         new_nsproxy = old_nsproxy;
1627                 }
1628
1629                 if (new_fs) {
1630                         fs = current->fs;
1631                         current->fs = new_fs;
1632                         new_fs = fs;
1633                 }
1634
1635                 if (new_mm) {
1636                         mm = current->mm;
1637                         active_mm = current->active_mm;
1638                         current->mm = new_mm;
1639                         current->active_mm = new_mm;
1640                         activate_mm(active_mm, new_mm);
1641                         new_mm = mm;
1642                 }
1643
1644                 if (new_fd) {
1645                         fd = current->files;
1646                         current->files = new_fd;
1647                         new_fd = fd;
1648                 }
1649
1650                 task_unlock(current);
1651         }
1652
1653         if (new_nsproxy)
1654                 put_nsproxy(new_nsproxy);
1655
1656 bad_unshare_cleanup_semundo:
1657 bad_unshare_cleanup_fd:
1658         if (new_fd)
1659                 put_files_struct(new_fd);
1660
1661 bad_unshare_cleanup_vm:
1662         if (new_mm)
1663                 mmput(new_mm);
1664
1665 bad_unshare_cleanup_sigh:
1666         if (new_sigh)
1667                 if (atomic_dec_and_test(&new_sigh->count))
1668                         kmem_cache_free(sighand_cachep, new_sigh);
1669
1670 bad_unshare_cleanup_fs:
1671         if (new_fs)
1672                 put_fs_struct(new_fs);
1673
1674 bad_unshare_cleanup_thread:
1675 bad_unshare_out:
1676         return err;
1677 }