dc59235445606283ed6e59a809fabfa1e8d33dea
[linux-3.10.git] / arch / metag / kernel / process.c
1 /*
2  * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
3  *
4  * This file contains the architecture-dependent parts of process handling.
5  *
6  */
7
8 #include <linux/errno.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/unistd.h>
14 #include <linux/ptrace.h>
15 #include <linux/user.h>
16 #include <linux/reboot.h>
17 #include <linux/elfcore.h>
18 #include <linux/fs.h>
19 #include <linux/tick.h>
20 #include <linux/slab.h>
21 #include <linux/mman.h>
22 #include <linux/pm.h>
23 #include <linux/syscalls.h>
24 #include <linux/uaccess.h>
25 #include <linux/smp.h>
26 #include <asm/core_reg.h>
27 #include <asm/user_gateway.h>
28 #include <asm/tcm.h>
29 #include <asm/traps.h>
30 #include <asm/switch_to.h>
31
32 /*
33  * Wait for the next interrupt and enable local interrupts
34  */
35 void arch_cpu_idle(void)
36 {
37         int tmp;
38
39         /*
40          * Quickly jump straight into the interrupt entry point without actually
41          * triggering an interrupt. When TXSTATI gets read the processor will
42          * block until an interrupt is triggered.
43          */
44         asm volatile (/* Switch into ISTAT mode */
45                       "RTH\n\t"
46                       /* Enable local interrupts */
47                       "MOV      TXMASKI, %1\n\t"
48                       /*
49                        * We can't directly "SWAP PC, PCX", so we swap via a
50                        * temporary. Essentially we do:
51                        *  PCX_new = 1f (the place to continue execution)
52                        *  PC = PCX_old
53                        */
54                       "ADD      %0, CPC0, #(1f-.)\n\t"
55                       "SWAP     PCX, %0\n\t"
56                       "MOV      PC, %0\n"
57                       /* Continue execution here with interrupts enabled */
58                       "1:"
59                       : "=a" (tmp)
60                       : "r" (get_trigger_mask()));
61 }
62
63 #ifdef CONFIG_HOTPLUG_CPU
64 void arch_cpu_idle_dead(void)
65 {
66         cpu_die();
67 }
68 #endif
69
70 void (*pm_power_off)(void);
71 EXPORT_SYMBOL(pm_power_off);
72
73 void (*soc_restart)(char *cmd);
74 void (*soc_halt)(void);
75
76 void machine_restart(char *cmd)
77 {
78         if (soc_restart)
79                 soc_restart(cmd);
80         hard_processor_halt(HALT_OK);
81 }
82
83 void machine_halt(void)
84 {
85         if (soc_halt)
86                 soc_halt();
87         smp_send_stop();
88         hard_processor_halt(HALT_OK);
89 }
90
91 void machine_power_off(void)
92 {
93         if (pm_power_off)
94                 pm_power_off();
95         smp_send_stop();
96         hard_processor_halt(HALT_OK);
97 }
98
99 #define FLAG_Z 0x8
100 #define FLAG_N 0x4
101 #define FLAG_O 0x2
102 #define FLAG_C 0x1
103
104 void show_regs(struct pt_regs *regs)
105 {
106         int i;
107         const char *AX0_names[] = {"A0StP", "A0FrP"};
108         const char *AX1_names[] = {"A1GbP", "A1LbP"};
109
110         const char *DX0_names[] = {
111                 "D0Re0",
112                 "D0Ar6",
113                 "D0Ar4",
114                 "D0Ar2",
115                 "D0FrT",
116                 "D0.5 ",
117                 "D0.6 ",
118                 "D0.7 "
119         };
120
121         const char *DX1_names[] = {
122                 "D1Re0",
123                 "D1Ar5",
124                 "D1Ar3",
125                 "D1Ar1",
126                 "D1RtP",
127                 "D1.5 ",
128                 "D1.6 ",
129                 "D1.7 "
130         };
131
132         pr_info(" pt_regs @ %p\n", regs);
133         pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
134         pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
135                 regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
136                 regs->ctx.Flags & FLAG_N ? 'N' : 'n',
137                 regs->ctx.Flags & FLAG_O ? 'O' : 'o',
138                 regs->ctx.Flags & FLAG_C ? 'C' : 'c');
139         pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
140         pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
141
142         /* AX regs */
143         for (i = 0; i < 2; i++) {
144                 pr_info(" %s = 0x%08x    ",
145                         AX0_names[i],
146                         regs->ctx.AX[i].U0);
147                 printk(" %s = 0x%08x\n",
148                         AX1_names[i],
149                         regs->ctx.AX[i].U1);
150         }
151
152         if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
153                 pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
154
155         /* Special place with AXx.2 */
156         pr_info(" A0.2  = 0x%08x    ",
157                 regs->ctx.Ext.AX2.U0);
158         printk(" A1.2  = 0x%08x\n",
159                 regs->ctx.Ext.AX2.U1);
160
161         /* 'extended' AX regs (nominally, just AXx.3) */
162         for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
163                 pr_info(" A0.%d  = 0x%08x    ", i + 3, regs->ctx.AX3[i].U0);
164                 printk(" A1.%d  = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
165         }
166
167         for (i = 0; i < 8; i++) {
168                 pr_info(" %s = 0x%08x    ", DX0_names[i], regs->ctx.DX[i].U0);
169                 printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
170         }
171
172         show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
173 }
174
175 int copy_thread(unsigned long clone_flags, unsigned long usp,
176                 unsigned long arg, struct task_struct *tsk)
177 {
178         struct pt_regs *childregs = task_pt_regs(tsk);
179         void *kernel_context = ((void *) childregs +
180                                 sizeof(struct pt_regs));
181         unsigned long global_base;
182
183         BUG_ON(((unsigned long)childregs) & 0x7);
184         BUG_ON(((unsigned long)kernel_context) & 0x7);
185
186         memset(&tsk->thread.kernel_context, 0,
187                         sizeof(tsk->thread.kernel_context));
188
189         tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
190                                                      ret_from_fork,
191                                                      0, 0);
192
193         if (unlikely(tsk->flags & PF_KTHREAD)) {
194                 /*
195                  * Make sure we don't leak any kernel data to child's regs
196                  * if kernel thread becomes a userspace thread in the future
197                  */
198                 memset(childregs, 0 , sizeof(struct pt_regs));
199
200                 global_base = __core_reg_get(A1GbP);
201                 childregs->ctx.AX[0].U1 = (unsigned long) global_base;
202                 childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
203                 /* Set D1Ar1=arg and D1RtP=usp (fn) */
204                 childregs->ctx.DX[4].U1 = usp;
205                 childregs->ctx.DX[3].U1 = arg;
206                 tsk->thread.int_depth = 2;
207                 return 0;
208         }
209         /*
210          * Get a pointer to where the new child's register block should have
211          * been pushed.
212          * The Meta's stack grows upwards, and the context is the the first
213          * thing to be pushed by TBX (phew)
214          */
215         *childregs = *current_pt_regs();
216         /* Set the correct stack for the clone mode */
217         if (usp)
218                 childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
219         tsk->thread.int_depth = 1;
220
221         /* set return value for child process */
222         childregs->ctx.DX[0].U0 = 0;
223
224         /* The TLS pointer is passed as an argument to sys_clone. */
225         if (clone_flags & CLONE_SETTLS)
226                 tsk->thread.tls_ptr =
227                                 (__force void __user *)childregs->ctx.DX[1].U1;
228
229 #ifdef CONFIG_METAG_FPU
230         if (tsk->thread.fpu_context) {
231                 struct meta_fpu_context *ctx;
232
233                 ctx = kmemdup(tsk->thread.fpu_context,
234                               sizeof(struct meta_fpu_context), GFP_ATOMIC);
235                 tsk->thread.fpu_context = ctx;
236         }
237 #endif
238
239 #ifdef CONFIG_METAG_DSP
240         if (tsk->thread.dsp_context) {
241                 struct meta_ext_context *ctx;
242                 int i;
243
244                 ctx = kmemdup(tsk->thread.dsp_context,
245                               sizeof(struct meta_ext_context), GFP_ATOMIC);
246                 for (i = 0; i < 2; i++)
247                         ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
248                                               GFP_ATOMIC);
249                 tsk->thread.dsp_context = ctx;
250         }
251 #endif
252
253         return 0;
254 }
255
256 #ifdef CONFIG_METAG_FPU
257 static void alloc_fpu_context(struct thread_struct *thread)
258 {
259         thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
260                                       GFP_ATOMIC);
261 }
262
263 static void clear_fpu(struct thread_struct *thread)
264 {
265         thread->user_flags &= ~TBICTX_FPAC_BIT;
266         kfree(thread->fpu_context);
267         thread->fpu_context = NULL;
268 }
269 #else
270 static void clear_fpu(struct thread_struct *thread)
271 {
272 }
273 #endif
274
275 #ifdef CONFIG_METAG_DSP
276 static void clear_dsp(struct thread_struct *thread)
277 {
278         if (thread->dsp_context) {
279                 kfree(thread->dsp_context->ram[0]);
280                 kfree(thread->dsp_context->ram[1]);
281
282                 kfree(thread->dsp_context);
283
284                 thread->dsp_context = NULL;
285         }
286
287         __core_reg_set(D0.8, 0);
288 }
289 #else
290 static void clear_dsp(struct thread_struct *thread)
291 {
292 }
293 #endif
294
295 struct task_struct *__sched __switch_to(struct task_struct *prev,
296                                         struct task_struct *next)
297 {
298         TBIRES to, from;
299
300         to.Switch.pCtx = next->thread.kernel_context;
301         to.Switch.pPara = prev;
302
303 #ifdef CONFIG_METAG_FPU
304         if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
305                 struct pt_regs *regs = task_pt_regs(prev);
306                 TBIRES state;
307
308                 state.Sig.SaveMask = prev->thread.user_flags;
309                 state.Sig.pCtx = &regs->ctx;
310
311                 if (!prev->thread.fpu_context)
312                         alloc_fpu_context(&prev->thread);
313                 if (prev->thread.fpu_context)
314                         __TBICtxFPUSave(state, prev->thread.fpu_context);
315         }
316         /*
317          * Force a restore of the FPU context next time this process is
318          * scheduled.
319          */
320         if (prev->thread.fpu_context)
321                 prev->thread.fpu_context->needs_restore = true;
322 #endif
323
324
325         from = __TBISwitch(to, &prev->thread.kernel_context);
326
327         /* Restore TLS pointer for this process. */
328         set_gateway_tls(current->thread.tls_ptr);
329
330         return (struct task_struct *) from.Switch.pPara;
331 }
332
333 void flush_thread(void)
334 {
335         clear_fpu(&current->thread);
336         clear_dsp(&current->thread);
337 }
338
339 /*
340  * Free current thread data structures etc.
341  */
342 void exit_thread(void)
343 {
344         clear_fpu(&current->thread);
345         clear_dsp(&current->thread);
346 }
347
348 /* TODO: figure out how to unwind the kernel stack here to figure out
349  * where we went to sleep. */
350 unsigned long get_wchan(struct task_struct *p)
351 {
352         return 0;
353 }
354
355 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
356 {
357         /* Returning 0 indicates that the FPU state was not stored (as it was
358          * not in use) */
359         return 0;
360 }
361
362 #ifdef CONFIG_METAG_USER_TCM
363
364 #define ELF_MIN_ALIGN   PAGE_SIZE
365
366 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
367 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
368 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
369
370 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
371
372 unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
373                               struct elf_phdr *eppnt, int prot, int type,
374                               unsigned long total_size)
375 {
376         unsigned long map_addr, size;
377         unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
378         unsigned long raw_size = eppnt->p_filesz + page_off;
379         unsigned long off = eppnt->p_offset - page_off;
380         unsigned int tcm_tag;
381         addr = ELF_PAGESTART(addr);
382         size = ELF_PAGEALIGN(raw_size);
383
384         /* mmap() will return -EINVAL if given a zero size, but a
385          * segment with zero filesize is perfectly valid */
386         if (!size)
387                 return addr;
388
389         tcm_tag = tcm_lookup_tag(addr);
390
391         if (tcm_tag != TCM_INVALID_TAG)
392                 type &= ~MAP_FIXED;
393
394         /*
395         * total_size is the size of the ELF (interpreter) image.
396         * The _first_ mmap needs to know the full size, otherwise
397         * randomization might put this image into an overlapping
398         * position with the ELF binary image. (since size < total_size)
399         * So we first map the 'big' image - and unmap the remainder at
400         * the end. (which unmap is needed for ELF images with holes.)
401         */
402         if (total_size) {
403                 total_size = ELF_PAGEALIGN(total_size);
404                 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
405                 if (!BAD_ADDR(map_addr))
406                         vm_munmap(map_addr+size, total_size-size);
407         } else
408                 map_addr = vm_mmap(filep, addr, size, prot, type, off);
409
410         if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
411                 struct tcm_allocation *tcm;
412                 unsigned long tcm_addr;
413
414                 tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
415                 if (!tcm)
416                         return -ENOMEM;
417
418                 tcm_addr = tcm_alloc(tcm_tag, raw_size);
419                 if (tcm_addr != addr) {
420                         kfree(tcm);
421                         return -ENOMEM;
422                 }
423
424                 tcm->tag = tcm_tag;
425                 tcm->addr = tcm_addr;
426                 tcm->size = raw_size;
427
428                 list_add(&tcm->list, &current->mm->context.tcm);
429
430                 eppnt->p_vaddr = map_addr;
431                 if (copy_from_user((void *) addr, (void __user *) map_addr,
432                                    raw_size))
433                         return -EFAULT;
434         }
435
436         return map_addr;
437 }
438 #endif