]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - arch/um/kernel/tt/process_kern.c
Merge with /home/shaggy/git/linus-clean/
[linux-3.10.git] / arch / um / kernel / tt / process_kern.c
1 /* 
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/sched.h"
7 #include "linux/signal.h"
8 #include "linux/kernel.h"
9 #include "linux/interrupt.h"
10 #include "linux/ptrace.h"
11 #include "asm/system.h"
12 #include "asm/pgalloc.h"
13 #include "asm/ptrace.h"
14 #include "asm/tlbflush.h"
15 #include "irq_user.h"
16 #include "signal_user.h"
17 #include "kern_util.h"
18 #include "user_util.h"
19 #include "os.h"
20 #include "kern.h"
21 #include "sigcontext.h"
22 #include "time_user.h"
23 #include "mem_user.h"
24 #include "tlb.h"
25 #include "mode.h"
26 #include "init.h"
27 #include "tt.h"
28
29 void *switch_to_tt(void *prev, void *next, void *last)
30 {
31         struct task_struct *from, *to, *prev_sched;
32         unsigned long flags;
33         int err, vtalrm, alrm, prof, cpu;
34         char c;
35
36         from = prev;
37         to = next;
38
39         to->thread.prev_sched = from;
40
41         cpu = from->thread_info->cpu;
42         if(cpu == 0)
43                 forward_interrupts(to->thread.mode.tt.extern_pid);
44 #ifdef CONFIG_SMP
45         forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid);
46 #endif
47         local_irq_save(flags);
48
49         vtalrm = change_sig(SIGVTALRM, 0);
50         alrm = change_sig(SIGALRM, 0);
51         prof = change_sig(SIGPROF, 0);
52
53         forward_pending_sigio(to->thread.mode.tt.extern_pid);
54
55         c = 0;
56         set_current(to);
57
58         err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
59         if(err != sizeof(c))
60                 panic("write of switch_pipe failed, err = %d", -err);
61
62         if(from->thread.mode.tt.switch_pipe[0] == -1)
63                 os_kill_process(os_getpid(), 0);
64
65         err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c));
66         if(err != sizeof(c))
67                 panic("read of switch_pipe failed, errno = %d", -err);
68
69         /* If the process that we have just scheduled away from has exited,
70          * then it needs to be killed here.  The reason is that, even though
71          * it will kill itself when it next runs, that may be too late.  Its
72          * stack will be freed, possibly before then, and if that happens,
73          * we have a use-after-free situation.  So, it gets killed here
74          * in case it has not already killed itself.
75          */
76         prev_sched = current->thread.prev_sched;
77         if(prev_sched->thread.mode.tt.switch_pipe[0] == -1)
78                 os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1);
79
80         change_sig(SIGVTALRM, vtalrm);
81         change_sig(SIGALRM, alrm);
82         change_sig(SIGPROF, prof);
83
84         arch_switch();
85
86         flush_tlb_all();
87         local_irq_restore(flags);
88
89         return(current->thread.prev_sched);
90 }
91
92 void release_thread_tt(struct task_struct *task)
93 {
94         int pid = task->thread.mode.tt.extern_pid;
95
96         /*
97          * We first have to kill the other process, before
98          * closing its switch_pipe. Else it might wake up
99          * and receive "EOF" before we could kill it.
100          */
101         if(os_getpid() != pid)
102                 os_kill_process(pid, 0);
103
104         os_close_file(task->thread.mode.tt.switch_pipe[0]);
105         os_close_file(task->thread.mode.tt.switch_pipe[1]);
106         /* use switch_pipe as flag: thread is released */
107         task->thread.mode.tt.switch_pipe[0] = -1;
108 }
109
110 void suspend_new_thread(int fd)
111 {
112         int err;
113         char c;
114
115         os_stop_process(os_getpid());
116         err = os_read_file(fd, &c, sizeof(c));
117         if(err != sizeof(c))
118                 panic("read failed in suspend_new_thread, err = %d", -err);
119 }
120
121 void schedule_tail(task_t *prev);
122
123 static void new_thread_handler(int sig)
124 {
125         unsigned long disable;
126         int (*fn)(void *);
127         void *arg;
128
129         fn = current->thread.request.u.thread.proc;
130         arg = current->thread.request.u.thread.arg;
131
132         UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
133         disable = (1 << (SIGVTALRM - 1)) | (1 << (SIGALRM - 1)) |
134                 (1 << (SIGIO - 1)) | (1 << (SIGPROF - 1));
135         SC_SIGMASK(UPT_SC(&current->thread.regs.regs)) &= ~disable;
136
137         suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
138
139         force_flush_all();
140         if(current->thread.prev_sched != NULL)
141                 schedule_tail(current->thread.prev_sched);
142         current->thread.prev_sched = NULL;
143
144         init_new_thread_signals(1);
145         enable_timer();
146         free_page(current->thread.temp_stack);
147         set_cmdline("(kernel thread)");
148
149         change_sig(SIGUSR1, 1);
150         change_sig(SIGVTALRM, 1);
151         change_sig(SIGPROF, 1);
152         local_irq_enable();
153         if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
154                 do_exit(0);
155
156         /* XXX No set_user_mode here because a newly execed process will
157          * immediately segfault on its non-existent IP, coming straight back
158          * to the signal handler, which will call set_user_mode on its way
159          * out.  This should probably change since it's confusing.
160          */
161 }
162
163 static int new_thread_proc(void *stack)
164 {
165         /* local_irq_disable is needed to block out signals until this thread is
166          * properly scheduled.  Otherwise, the tracing thread will get mighty
167          * upset about any signals that arrive before that.
168          * This has the complication that it sets the saved signal mask in
169          * the sigcontext to block signals.  This gets restored when this
170          * thread (or a descendant, since they get a copy of this sigcontext)
171          * returns to userspace.
172          * So, this is compensated for elsewhere.
173          * XXX There is still a small window until local_irq_disable() actually
174          * finishes where signals are possible - shouldn't be a problem in
175          * practice since SIGIO hasn't been forwarded here yet, and the
176          * local_irq_disable should finish before a SIGVTALRM has time to be
177          * delivered.
178          */
179
180         local_irq_disable();
181         init_new_thread_stack(stack, new_thread_handler);
182         os_usr1_process(os_getpid());
183         change_sig(SIGUSR1, 1);
184         return(0);
185 }
186
187 /* Signal masking - signals are blocked at the start of fork_tramp.  They
188  * are re-enabled when finish_fork_handler is entered by fork_tramp hitting
189  * itself with a SIGUSR1.  set_user_mode has to be run with SIGUSR1 off,
190  * so it is blocked before it's called.  They are re-enabled on sigreturn
191  * despite the fact that they were blocked when the SIGUSR1 was issued because
192  * copy_thread copies the parent's sigcontext, including the signal mask
193  * onto the signal frame.
194  */
195
196 void finish_fork_handler(int sig)
197 {
198         UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
199         suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
200
201         force_flush_all();
202         if(current->thread.prev_sched != NULL)
203                 schedule_tail(current->thread.prev_sched);
204         current->thread.prev_sched = NULL;
205
206         enable_timer();
207         change_sig(SIGVTALRM, 1);
208         local_irq_enable();
209         if(current->mm != current->parent->mm)
210                 protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 
211                                1, 0, 1);
212         task_protections((unsigned long) current_thread);
213
214         free_page(current->thread.temp_stack);
215         local_irq_disable();
216         change_sig(SIGUSR1, 0);
217         set_user_mode(current);
218 }
219
220 int fork_tramp(void *stack)
221 {
222         local_irq_disable();
223         arch_init_thread();
224         init_new_thread_stack(stack, finish_fork_handler);
225
226         os_usr1_process(os_getpid());
227         change_sig(SIGUSR1, 1);
228         return(0);
229 }
230
231 int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
232                    unsigned long stack_top, struct task_struct * p, 
233                    struct pt_regs *regs)
234 {
235         int (*tramp)(void *);
236         int new_pid, err;
237         unsigned long stack;
238         
239         if(current->thread.forking)
240                 tramp = fork_tramp;
241         else {
242                 tramp = new_thread_proc;
243                 p->thread.request.u.thread = current->thread.request.u.thread;
244         }
245
246         err = os_pipe(p->thread.mode.tt.switch_pipe, 1, 1);
247         if(err < 0){
248                 printk("copy_thread : pipe failed, err = %d\n", -err);
249                 return(err);
250         }
251
252         stack = alloc_stack(0, 0);
253         if(stack == 0){
254                 printk(KERN_ERR "copy_thread : failed to allocate "
255                        "temporary stack\n");
256                 return(-ENOMEM);
257         }
258
259         clone_flags &= CLONE_VM;
260         p->thread.temp_stack = stack;
261         new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
262         if(new_pid < 0){
263                 printk(KERN_ERR "copy_thread : clone failed - errno = %d\n", 
264                        -new_pid);
265                 return(new_pid);
266         }
267
268         if(current->thread.forking){
269                 sc_to_sc(UPT_SC(&p->thread.regs.regs), 
270                          UPT_SC(&current->thread.regs.regs));
271                 SC_SET_SYSCALL_RETURN(UPT_SC(&p->thread.regs.regs), 0);
272                 if(sp != 0) SC_SP(UPT_SC(&p->thread.regs.regs)) = sp;
273         }
274         p->thread.mode.tt.extern_pid = new_pid;
275
276         current->thread.request.op = OP_FORK;
277         current->thread.request.u.fork.pid = new_pid;
278         os_usr1_process(os_getpid());
279
280         /* Enable the signal and then disable it to ensure that it is handled
281          * here, and nowhere else.
282          */
283         change_sig(SIGUSR1, 1);
284
285         change_sig(SIGUSR1, 0);
286         err = 0;
287         return(err);
288 }
289
290 void reboot_tt(void)
291 {
292         current->thread.request.op = OP_REBOOT;
293         os_usr1_process(os_getpid());
294         change_sig(SIGUSR1, 1);
295 }
296
297 void halt_tt(void)
298 {
299         current->thread.request.op = OP_HALT;
300         os_usr1_process(os_getpid());
301         change_sig(SIGUSR1, 1);
302 }
303
304 void kill_off_processes_tt(void)
305 {
306         struct task_struct *p;
307         int me;
308
309         me = os_getpid();
310         for_each_process(p){
311                 if(p->thread.mode.tt.extern_pid != me) 
312                         os_kill_process(p->thread.mode.tt.extern_pid, 0);
313         }
314         if(init_task.thread.mode.tt.extern_pid != me) 
315                 os_kill_process(init_task.thread.mode.tt.extern_pid, 0);
316 }
317
318 void initial_thread_cb_tt(void (*proc)(void *), void *arg)
319 {
320         if(os_getpid() == tracing_pid){
321                 (*proc)(arg);
322         }
323         else {
324                 current->thread.request.op = OP_CB;
325                 current->thread.request.u.cb.proc = proc;
326                 current->thread.request.u.cb.arg = arg;
327                 os_usr1_process(os_getpid());
328                 change_sig(SIGUSR1, 1);
329
330                 change_sig(SIGUSR1, 0);
331         }
332 }
333
334 int do_proc_op(void *t, int proc_id)
335 {
336         struct task_struct *task;
337         struct thread_struct *thread;
338         int op, pid;
339
340         task = t;
341         thread = &task->thread;
342         op = thread->request.op;
343         switch(op){
344         case OP_NONE:
345         case OP_TRACE_ON:
346                 break;
347         case OP_EXEC:
348                 pid = thread->request.u.exec.pid;
349                 do_exec(thread->mode.tt.extern_pid, pid);
350                 thread->mode.tt.extern_pid = pid;
351                 cpu_tasks[task->thread_info->cpu].pid = pid;
352                 break;
353         case OP_FORK:
354                 attach_process(thread->request.u.fork.pid);
355                 break;
356         case OP_CB:
357                 (*thread->request.u.cb.proc)(thread->request.u.cb.arg);
358                 break;
359         case OP_REBOOT:
360         case OP_HALT:
361                 break;
362         default:
363                 tracer_panic("Bad op in do_proc_op");
364                 break;
365         }
366         thread->request.op = OP_NONE;
367         return(op);
368 }
369
370 void init_idle_tt(void)
371 {
372         default_idle();
373 }
374
375 extern void start_kernel(void);
376
377 static int start_kernel_proc(void *unused)
378 {
379         int pid;
380
381         block_signals();
382         pid = os_getpid();
383
384         cpu_tasks[0].pid = pid;
385         cpu_tasks[0].task = current;
386 #ifdef CONFIG_SMP
387         cpu_online_map = cpumask_of_cpu(0);
388 #endif
389         if(debug) os_stop_process(pid);
390         start_kernel();
391         return(0);
392 }
393
394 void set_tracing(void *task, int tracing)
395 {
396         ((struct task_struct *) task)->thread.mode.tt.tracing = tracing;
397 }
398
399 int is_tracing(void *t)
400 {
401         return (((struct task_struct *) t)->thread.mode.tt.tracing);
402 }
403
404 int set_user_mode(void *t)
405 {
406         struct task_struct *task;
407
408         task = t ? t : current;
409         if(task->thread.mode.tt.tracing) 
410                 return(1);
411         task->thread.request.op = OP_TRACE_ON;
412         os_usr1_process(os_getpid());
413         return(0);
414 }
415
416 void set_init_pid(int pid)
417 {
418         int err;
419
420         init_task.thread.mode.tt.extern_pid = pid;
421         err = os_pipe(init_task.thread.mode.tt.switch_pipe, 1, 1);
422         if(err)
423                 panic("Can't create switch pipe for init_task, errno = %d",
424                       -err);
425 }
426
427 int start_uml_tt(void)
428 {
429         void *sp;
430         int pages;
431
432         pages = (1 << CONFIG_KERNEL_STACK_ORDER);
433         sp = (void *) ((unsigned long) init_task.thread_info) +
434                 pages * PAGE_SIZE - sizeof(unsigned long);
435         return(tracer(start_kernel_proc, sp));
436 }
437
438 int external_pid_tt(struct task_struct *task)
439 {
440         return(task->thread.mode.tt.extern_pid);
441 }
442
443 int thread_pid_tt(struct task_struct *task)
444 {
445         return(task->thread.mode.tt.extern_pid);
446 }
447
448 int is_valid_pid(int pid)
449 {
450         struct task_struct *task;
451
452         read_lock(&tasklist_lock);
453         for_each_process(task){
454                 if(task->thread.mode.tt.extern_pid == pid){
455                         read_unlock(&tasklist_lock);
456                         return(1);
457                 }
458         }
459         read_unlock(&tasklist_lock);
460         return(0);
461 }
462
463 /*
464  * Overrides for Emacs so that we follow Linus's tabbing style.
465  * Emacs will notice this stuff at the end of the file and automatically
466  * adjust the settings for this buffer only.  This must remain at the end
467  * of the file.
468  * ---------------------------------------------------------------------------
469  * Local variables:
470  * c-file-style: "linux"
471  * End:
472  */