]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - arch/blackfin/mach-common/entry.S
Blackfin: bf54x: drop unused pm gpio handling
[linux-3.10.git] / arch / blackfin / mach-common / entry.S
index a9b15aaf5254487acd9c811c295168bb6df89968..f96933f48a7fd933c9fce9df72ef182cf66c5cce 100644 (file)
@@ -1,32 +1,11 @@
 /*
- * File:         arch/blackfin/mach-common/entry.S
- * Based on:
- * Author:       Linus Torvalds
+ * Contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all
+ * interrupts and faults that can result in a task-switch.
  *
- * Created:      ?
- * Description:  contains the system-call and fault low-level handling routines.
- *               This also contains the timer-interrupt handler, as well as all
- *               interrupts and faults that can result in a task-switch.
+ * Copyright 2005-2009 Analog Devices Inc.
  *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Licensed under the GPL-2 or later.
  */
 
 /* NOTE: This code handles signal-recognition, which happens every time
@@ -289,7 +268,7 @@ ENTRY(_handle_bad_cplb)
        /* To get here, we just tried and failed to change a CPLB
         * so, handle things in trap_c (C code), by lowering to
         * IRQ5, just like we normally do. Since this is not a
-        * "normal" return path, we have a do alot of stuff to
+        * "normal" return path, we have a do a lot of stuff to
         * the stack to get ready so, we can fall through - we
         * need to make a CPLB exception look like a normal exception
         */
@@ -426,7 +405,7 @@ ENTRY(_double_fault)
 
        r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
        SP += -12;
-       call _double_fault_c;
+       pseudo_long_call _double_fault_c, p5;
        SP += 12;
 .L_double_fault_panic:
         JUMP .L_double_fault_panic
@@ -468,7 +447,7 @@ ENTRY(_exception_to_level5)
 
        r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
        SP += -12;
-       call _trap_c;
+       pseudo_long_call _trap_c, p4;
        SP += 12;
 
        /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
@@ -503,6 +482,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
        [--sp] = ASTAT;
        [--sp] = (R7:6,P5:4);
 
+       ANOMALY_283_315_WORKAROUND(p5, r7)
+
 #ifdef CONFIG_EXACT_HWERR
        /* Make sure all pending read/writes complete. This will ensure any
         * accesses which could cause hardware errors completes, and signal
@@ -513,15 +494,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
        ssync;
 #endif
 
-#if ANOMALY_05000283 || ANOMALY_05000315
-       cc = r7 == r7;
-       p5.h = HI(CHIPID);
-       p5.l = LO(CHIPID);
-       if cc jump 1f;
-       r7.l = W[p5];
-1:
-#endif
-
 #ifdef CONFIG_DEBUG_DOUBLEFAULT
        /*
         * Save these registers, as they are only valid in exception context
@@ -579,7 +551,7 @@ ENTRY(_kernel_execve)
        p0 = sp;
        sp += -16;
        [sp + 12] = p0;
-       call _do_execve;
+       pseudo_long_call _do_execve, p5;
        SP += 16;
        cc = r0 == 0;
        if ! cc jump .Lexecve_failed;
@@ -643,7 +615,7 @@ ENTRY(_system_call)
 #ifdef CONFIG_IPIPE
        r0 = sp;
        SP += -12;
-       call ___ipipe_syscall_root;
+       pseudo_long_call ___ipipe_syscall_root, p0;
        SP += 12;
        cc = r0 == 1;
        if cc jump .Lsyscall_really_exit;
@@ -654,13 +626,6 @@ ENTRY(_system_call)
        p0 = [sp + PT_ORIG_P0];
 #endif /* CONFIG_IPIPE */
 
-       /* Check the System Call */
-       r7 = __NR_syscall;
-       /* System call number is passed in P0 */
-       r6 = p0;
-       cc = r6 < r7;
-       if ! cc jump .Lbadsys;
-
        /* are we tracing syscalls?*/
        r7 = sp;
        r6.l = lo(ALIGN_PAGE_MASK);
@@ -670,6 +635,14 @@ ENTRY(_system_call)
        r7 = [p2+TI_FLAGS];
        CC = BITTST(r7,TIF_SYSCALL_TRACE);
        if CC JUMP _sys_trace;
+       CC = BITTST(r7,TIF_SINGLESTEP);
+       if CC JUMP _sys_trace;
+
+       /* Make sure the system call # is valid */
+       p4 = __NR_syscall;
+       /* System call number is passed in P0 */
+       cc = p4 <= p0;
+       if cc jump .Lbadsys;
 
        /* Execute the appropriate system call */
 
@@ -719,7 +692,7 @@ ENTRY(_system_call)
        [--sp] = reti;
        SP += 4; /* don't merge with next insn to keep the pattern obvious */
        SP += -12;
-       call ___ipipe_sync_root;
+       pseudo_long_call ___ipipe_sync_root, p4;
        SP += 12;
        jump .Lresume_userspace_1;
 .Lsyscall_no_irqsync:
@@ -732,7 +705,7 @@ ENTRY(_system_call)
        sp += 4;
 
        SP += -12;
-       call _schedule;
+       pseudo_long_call _schedule, p4;
        SP += 12;
 
        jump .Lresume_userspace_1;
@@ -741,6 +714,8 @@ ENTRY(_system_call)
        cc = BITTST(r7, TIF_RESTORE_SIGMASK);
        if cc jump .Lsyscall_do_signals;
        cc = BITTST(r7, TIF_SIGPENDING);
+       if cc jump .Lsyscall_do_signals;
+       cc = BITTST(r7, TIF_NOTIFY_RESUME);
        if !cc jump .Lsyscall_really_exit;
 .Lsyscall_do_signals:
        /* Reenable interrupts.  */
@@ -749,7 +724,7 @@ ENTRY(_system_call)
 
        r0 = sp;
        SP += -12;
-       call _do_signal;
+       pseudo_long_call _do_notify_resume, p5;
        SP += 12;
 
 .Lsyscall_really_exit:
@@ -762,11 +737,17 @@ ENDPROC(_system_call)
  * this symbol need not be global anyways, so ...
  */
 _sys_trace:
-       call _syscall_trace;
-
-       /* Execute the appropriate system call */
+       r0 = sp;
+       pseudo_long_call _syscall_trace_enter, p5;
 
+       /* Make sure the system call # is valid */
        p4 = [SP + PT_P0];
+       p3 = __NR_syscall;
+       cc = p3 <= p4;
+       r0 = -ENOSYS;
+       if cc jump .Lsys_trace_badsys;
+
+       /* Execute the appropriate system call */
        p5.l = _sys_call_table;
        p5.h = _sys_call_table;
        p5 = p5 + (p4 << 2);
@@ -784,9 +765,11 @@ _sys_trace:
        SP += -12;
        call (p5);
        SP += 24;
+.Lsys_trace_badsys:
        [sp + PT_R0] = r0;
 
-       call _syscall_trace;
+       r0 = sp;
+       pseudo_long_call _syscall_trace_leave, p5;
        jump .Lresume_userspace;
 ENDPROC(_sys_trace)
 
@@ -834,7 +817,7 @@ _new_old_task:
        rets = [sp++];
 
        /*
-        * When we come out of resume, r0 carries "old" task, becuase we are
+        * When we come out of resume, r0 carries "old" task, because we are
         * in "new" task.
         */
        rts;
@@ -842,8 +825,8 @@ ENDPROC(_resume)
 
 ENTRY(_ret_from_exception)
 #ifdef CONFIG_IPIPE
-       p2.l = _per_cpu__ipipe_percpu_domain;
-       p2.h = _per_cpu__ipipe_percpu_domain;
+       p2.l = _ipipe_percpu_domain;
+       p2.h = _ipipe_percpu_domain;
        r0.l = _ipipe_root;
        r0.h = _ipipe_root;
        r2 = [p2];
@@ -906,11 +889,80 @@ ENTRY(_ret_from_exception)
        rts;
 ENDPROC(_ret_from_exception)
 
+#if defined(CONFIG_PREEMPT)
+
+ENTRY(_up_to_irq14)
+#if ANOMALY_05000281 || ANOMALY_05000461
+       r0.l = lo(SAFE_USER_INSTRUCTION);
+       r0.h = hi(SAFE_USER_INSTRUCTION);
+       reti = r0;
+#endif
+
+#ifdef CONFIG_DEBUG_HWERR
+       /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
+       r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#else
+       /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
+       r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#endif
+       sti r0;
+
+       p0.l = lo(EVT14);
+       p0.h = hi(EVT14);
+       p1.l = _evt_up_evt14;
+       p1.h = _evt_up_evt14;
+       [p0] = p1;
+       csync;
+
+       raise 14;
+1:
+       jump 1b;
+ENDPROC(_up_to_irq14)
+
+ENTRY(_evt_up_evt14)
+#ifdef CONFIG_DEBUG_HWERR
+       r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+       sti r0;
+#else
+       cli r0;
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+       [--sp] = rets;
+       sp += -12;
+       call _trace_hardirqs_off;
+       sp += 12;
+       rets = [sp++];
+#endif
+       [--sp] = RETI;
+       SP += 4;
+
+       /* restore normal evt14 */
+       p0.l = lo(EVT14);
+       p0.h = hi(EVT14);
+       p1.l = _evt_evt14;
+       p1.h = _evt_evt14;
+       [p0] = p1;
+       csync;
+
+       rts;
+ENDPROC(_evt_up_evt14)
+
+#endif
+
 #ifdef CONFIG_IPIPE
 
 _resume_kernel_from_int:
+       r1 = LO(~0x8000) (Z);
+       r1 = r0 & r1;
+       r0 = 1;
+       r0 = r1 - r0;
+       r2 = r1 & r0;
+       cc = r2 == 0;
+       /* Sync the root stage only from the outer interrupt level. */
+       if !cc jump .Lnosync;
        r0.l = ___ipipe_sync_root;
        r0.h = ___ipipe_sync_root;
+       [--sp] = reti;
        [--sp] = rets;
        [--sp] = ( r7:4, p5:3 );
        SP += -12;
@@ -918,9 +970,57 @@ _resume_kernel_from_int:
        SP += 12;
        ( r7:4, p5:3 ) = [sp++];
        rets = [sp++];
+       reti = [sp++];
+.Lnosync:
        rts
+#elif defined(CONFIG_PREEMPT)
+
+_resume_kernel_from_int:
+       /* check preempt_count */
+       r7 = sp;
+       r4.l = lo(ALIGN_PAGE_MASK);
+       r4.h = hi(ALIGN_PAGE_MASK);
+       r7 = r7 & r4;
+       p5 = r7;
+       r7 = [p5 + TI_PREEMPT];
+       cc = r7 == 0x0;
+       if !cc jump .Lreturn_to_kernel;
+.Lneed_schedule:
+       r7 = [p5 + TI_FLAGS];
+       r4.l = lo(_TIF_WORK_MASK);
+       r4.h = hi(_TIF_WORK_MASK);
+       r7 =  r7 & r4;
+       cc = BITTST(r7, TIF_NEED_RESCHED);
+       if !cc jump .Lreturn_to_kernel;
+       /*
+        * let schedule done at level 15, otherwise sheduled process will run
+        * at high level and block low level interrupt
+        */
+       r6 = reti;  /* save reti */
+       r5.l = .Lkernel_schedule;
+       r5.h = .Lkernel_schedule;
+       reti = r5;
+       rti;
+.Lkernel_schedule:
+       [--sp] = rets;
+       sp += -12;
+       pseudo_long_call _preempt_schedule_irq, p4;
+       sp += 12;
+       rets = [sp++];
+
+       [--sp] = rets;
+       sp += -12;
+       /* up to irq14 so that reti after restore_all can return to irq15(kernel) */
+       pseudo_long_call _up_to_irq14, p4;
+       sp += 12;
+       rets = [sp++];
+
+       reti = r6; /* restore reti so that origin process can return to interrupted point */
+
+       jump .Lneed_schedule;
 #else
-#define _resume_kernel_from_int         2f
+
+#define _resume_kernel_from_int        .Lreturn_to_kernel
 #endif
 
 ENTRY(_return_from_int)
@@ -930,7 +1030,7 @@ ENTRY(_return_from_int)
        p2.h = hi(ILAT);
        r0 = [p2];
        cc = bittst (r0, EVT_IVG15_P);
-       if cc jump 2f;
+       if cc jump .Lreturn_to_kernel;
 
        /* if not return to user mode, get out */
        p2.l = lo(IPEND);
@@ -962,7 +1062,7 @@ ENTRY(_return_from_int)
        STI r0;
        raise 15;       /* raise evt15 to do signal or reschedule */
        rti;
-2:
+.Lreturn_to_kernel:
        rts;
 ENDPROC(_return_from_int)
 
@@ -991,6 +1091,13 @@ ENTRY(_evt_evt14)
        sti r0;
 #else
        cli r0;
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+       [--sp] = rets;
+       sp += -12;
+       call _trace_hardirqs_off;
+       sp += 12;
+       rets = [sp++];
 #endif
        [--sp] = RETI;
        SP += 4;
@@ -1015,6 +1122,14 @@ ENTRY(_schedule_and_signal_from_int)
        p1 = rets;
        [sp + PT_RESERVED] = p1;
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
+        * is turned on, so disable all irqs. */
+       cli r0;
+       sp += -12;
+       call _trace_hardirqs_on;
+       sp += 12;
+#endif
 #ifdef CONFIG_SMP
        GET_PDA(p0, r0);        /* Fetch current PDA (can't migrate to other CPU here) */
        r0 = [p0 + PDA_IRQFLAGS];
@@ -1033,7 +1148,8 @@ ENTRY(_schedule_and_signal_from_int)
 
        r0 = sp;
        sp += -12;
-       call _finish_atomic_sections;
+
+       pseudo_long_call _finish_atomic_sections, p5;
        sp += 12;
        jump.s .Lresume_userspace;
 ENDPROC(_schedule_and_signal_from_int)
@@ -1134,14 +1250,7 @@ ENTRY(_early_trap)
        SAVE_ALL_SYS
        trace_buffer_stop(p0,r0);
 
-#if ANOMALY_05000283 || ANOMALY_05000315
-       cc = r5 == r5;
-       p4.h = HI(CHIPID);
-       p4.l = LO(CHIPID);
-       if cc jump 1f;
-       r5.l = W[p4];
-1:
-#endif
+       ANOMALY_283_315_WORKAROUND(p4, r5)
 
        /* Turn caches off, to ensure we don't get double exceptions */
 
@@ -1390,7 +1499,7 @@ ENTRY(_sys_call_table)
        .long _sys_newuname
        .long _sys_ni_syscall   /* old sys_modify_ldt */
        .long _sys_adjtimex
-       .long _sys_ni_syscall   /* 125 */ /* sys_mprotect */
+       .long _sys_mprotect     /* 125 */
        .long _sys_ni_syscall   /* old sys_sigprocmask */
        .long _sys_ni_syscall   /* old "creat_module" */
        .long _sys_init_module
@@ -1409,16 +1518,16 @@ ENTRY(_sys_call_table)
        .long _sys_getdents
        .long _sys_ni_syscall   /* sys_select */
        .long _sys_flock
-       .long _sys_ni_syscall   /* sys_msync */
+       .long _sys_msync
        .long _sys_readv                /* 145 */
        .long _sys_writev
        .long _sys_getsid
        .long _sys_fdatasync
        .long _sys_sysctl
-       .long _sys_ni_syscall   /* 150 */ /* sys_mlock */
-       .long _sys_ni_syscall   /* sys_munlock */
-       .long _sys_ni_syscall   /* sys_mlockall */
-       .long _sys_ni_syscall   /* sys_munlockall */
+       .long _sys_mlock        /* 150 */
+       .long _sys_munlock
+       .long _sys_mlockall
+       .long _sys_munlockall
        .long _sys_sched_setparam
        .long _sys_sched_getparam /* 155 */
        .long _sys_sched_setscheduler
@@ -1457,7 +1566,7 @@ ENTRY(_sys_call_table)
        .long _sys_ni_syscall   /* streams2 */
        .long _sys_vfork                /* 190 */
        .long _sys_getrlimit
-       .long _sys_mmap2
+       .long _sys_mmap_pgoff
        .long _sys_truncate64
        .long _sys_ftruncate64
        .long _sys_stat64       /* 195 */
@@ -1483,8 +1592,8 @@ ENTRY(_sys_call_table)
        .long _sys_setfsuid     /* 215 */
        .long _sys_setfsgid
        .long _sys_pivot_root
-       .long _sys_ni_syscall   /* sys_mincore */
-       .long _sys_ni_syscall   /* sys_madvise */
+       .long _sys_mincore
+       .long _sys_madvise
        .long _sys_getdents64   /* 220 */
        .long _sys_fcntl64
        .long _sys_ni_syscall   /* reserved for TUX */
@@ -1540,7 +1649,7 @@ ENTRY(_sys_call_table)
        .long _sys_utimes
        .long _sys_fadvise64_64
        .long _sys_ni_syscall /* vserver */
-       .long _sys_ni_syscall /* 275, mbind */
+       .long _sys_mbind        /* 275 */
        .long _sys_ni_syscall /* get_mempolicy */
        .long _sys_ni_syscall /* set_mempolicy */
        .long _sys_mq_open
@@ -1634,7 +1743,16 @@ ENTRY(_sys_call_table)
        .long _sys_preadv
        .long _sys_pwritev
        .long _sys_rt_tgsigqueueinfo
-       .long _sys_perf_counter_open
+       .long _sys_perf_event_open
+       .long _sys_recvmmsg             /* 370 */
+       .long _sys_fanotify_init
+       .long _sys_fanotify_mark
+       .long _sys_prlimit64
+       .long _sys_cacheflush
+       .long _sys_name_to_handle_at    /* 375 */
+       .long _sys_open_by_handle_at
+       .long _sys_clock_adjtime
+       .long _sys_syncfs
 
        .rept NR_syscalls-(.-_sys_call_table)/4
        .long _sys_ni_syscall