Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-3.10.git] / arch / x86 / kernel / vsyscall_64.c
index 78f2250..9a907a6 100644 (file)
@@ -2,6 +2,8 @@
  *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
  *  Copyright 2003 Andi Kleen, SuSE Labs.
  *
+ *  [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
+ *
  *  Thanks to hpa@transmeta.com for some useful hint.
  *  Special thanks to Ingo Molnar for his early experience with
  *  a different vsyscall implementation for Linux/IA32 and for the name.
  *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
  *  jumping out of line if necessary. We cannot add more with this
  *  mechanism because older kernels won't return -ENOSYS.
- *  If we want more than four we need a vDSO.
  *
- *  Note: the concept clashes with user mode linux. If you use UML and
- *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
+ *  Note: the concept clashes with user mode linux.  UML users should
+ *  use the vDSO.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/seqlock.h>
 #include <linux/jiffies.h>
 #include <linux/sysctl.h>
-#include <linux/clocksource.h>
+#include <linux/topology.h>
+#include <linux/timekeeper_internal.h>
 #include <linux/getcpu.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/notifier.h>
+#include <linux/syscalls.h>
+#include <linux/ratelimit.h>
 
 #include <asm/vsyscall.h>
 #include <asm/pgtable.h>
+#include <asm/compat.h>
 #include <asm/page.h>
 #include <asm/unistd.h>
 #include <asm/fixmap.h>
 #include <asm/desc.h>
 #include <asm/topology.h>
 #include <asm/vgtod.h>
+#include <asm/traps.h>
 
-#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-#define __syscall_clobber "r11","rcx","memory"
-#define __pa_vsymbol(x)                        \
-       ({unsigned long v;              \
-       extern char __vsyscall_0;       \
-         asm("" : "=r" (v) : "0" (x)); \
-         ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
+#define CREATE_TRACE_POINTS
+#include "vsyscall_trace.h"
 
-/*
- * vsyscall_gtod_data contains data that is :
- * - readonly from vsyscalls
- * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
- * Try to keep this structure as small as possible to avoid cache line ping pongs
- */
-int __vgetcpu_mode __section_vgetcpu_mode;
+DEFINE_VVAR(int, vgetcpu_mode);
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
 
-struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+
+static int __init vsyscall_setup(char *str)
 {
-       .lock = SEQLOCK_UNLOCKED,
-       .sysctl_enabled = 1,
-};
+       if (str) {
+               if (!strcmp("emulate", str))
+                       vsyscall_mode = EMULATE;
+               else if (!strcmp("native", str))
+                       vsyscall_mode = NATIVE;
+               else if (!strcmp("none", str))
+                       vsyscall_mode = NONE;
+               else
+                       return -EINVAL;
+
+               return 0;
+       }
+
+       return -EINVAL;
+}
+early_param("vsyscall", vsyscall_setup);
 
 void update_vsyscall_tz(void)
 {
-       unsigned long flags;
-
-       write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
-       /* sys_tz has changed */
        vsyscall_gtod_data.sys_tz = sys_tz;
-       write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
 }
 
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timekeeper *tk)
 {
-       unsigned long flags;
+       struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
+
+       write_seqcount_begin(&vdata->seq);
 
-       write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
        /* copy vsyscall data */
-       vsyscall_gtod_data.clock.vread = clock->vread;
-       vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
-       vsyscall_gtod_data.clock.mask = clock->mask;
-       vsyscall_gtod_data.clock.mult = clock->mult;
-       vsyscall_gtod_data.clock.shift = clock->shift;
-       vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
-       vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
-       vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
-       write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
-}
+       vdata->clock.vclock_mode        = tk->clock->archdata.vclock_mode;
+       vdata->clock.cycle_last         = tk->clock->cycle_last;
+       vdata->clock.mask               = tk->clock->mask;
+       vdata->clock.mult               = tk->mult;
+       vdata->clock.shift              = tk->shift;
+
+       vdata->wall_time_sec            = tk->xtime_sec;
+       vdata->wall_time_snsec          = tk->xtime_nsec;
+
+       vdata->monotonic_time_sec       = tk->xtime_sec
+                                       + tk->wall_to_monotonic.tv_sec;
+       vdata->monotonic_time_snsec     = tk->xtime_nsec
+                                       + (tk->wall_to_monotonic.tv_nsec
+                                               << tk->shift);
+       while (vdata->monotonic_time_snsec >=
+                                       (((u64)NSEC_PER_SEC) << tk->shift)) {
+               vdata->monotonic_time_snsec -=
+                                       ((u64)NSEC_PER_SEC) << tk->shift;
+               vdata->monotonic_time_sec++;
+       }
 
-/* RED-PEN may want to readd seq locking, but then the variable should be
- * write-once.
- */
-static __always_inline void do_get_tz(struct timezone * tz)
-{
-       *tz = __vsyscall_gtod_data.sys_tz;
+       vdata->wall_time_coarse.tv_sec  = tk->xtime_sec;
+       vdata->wall_time_coarse.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
+
+       vdata->monotonic_time_coarse    = timespec_add(vdata->wall_time_coarse,
+                                                       tk->wall_to_monotonic);
+
+       write_seqcount_end(&vdata->seq);
 }
 
-static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
+static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+                             const char *message)
 {
-       int ret;
-       asm volatile("vsysc2: syscall"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
-               : __syscall_clobber );
-       return ret;
+       if (!show_unhandled_signals)
+               return;
+
+       pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+                             level, current->comm, task_pid_nr(current),
+                             message, regs->ip, regs->cs,
+                             regs->sp, regs->ax, regs->si, regs->di);
 }
 
-static __always_inline long time_syscall(long *t)
+static int addr_to_vsyscall_nr(unsigned long addr)
 {
-       long secs;
-       asm volatile("vsysc1: syscall"
-               : "=a" (secs)
-               : "0" (__NR_time),"D" (t) : __syscall_clobber);
-       return secs;
+       int nr;
+
+       if ((addr & ~0xC00UL) != VSYSCALL_START)
+               return -EINVAL;
+
+       nr = (addr & 0xC00UL) >> 10;
+       if (nr >= 3)
+               return -EINVAL;
+
+       return nr;
 }
 
-static __always_inline void do_vgettimeofday(struct timeval * tv)
+static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
-       cycle_t now, base, mask, cycle_delta;
-       unsigned seq;
-       unsigned long mult, shift, nsec;
-       cycle_t (*vread)(void);
-       do {
-               seq = read_seqbegin(&__vsyscall_gtod_data.lock);
-
-               vread = __vsyscall_gtod_data.clock.vread;
-               if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
-                       gettimeofday(tv,NULL);
-                       return;
-               }
-               now = vread();
-               base = __vsyscall_gtod_data.clock.cycle_last;
-               mask = __vsyscall_gtod_data.clock.mask;
-               mult = __vsyscall_gtod_data.clock.mult;
-               shift = __vsyscall_gtod_data.clock.shift;
-
-               tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
-               nsec = __vsyscall_gtod_data.wall_time_nsec;
-       } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
-
-       /* calculate interval: */
-       cycle_delta = (now - base) & mask;
-       /* convert to nsecs: */
-       nsec += (cycle_delta * mult) >> shift;
-
-       while (nsec >= NSEC_PER_SEC) {
-               tv->tv_sec += 1;
-               nsec -= NSEC_PER_SEC;
+       /*
+        * XXX: if access_ok, get_user, and put_user handled
+        * sig_on_uaccess_error, this could go away.
+        */
+
+       if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
+               siginfo_t info;
+               struct thread_struct *thread = &current->thread;
+
+               thread->error_code      = 6;  /* user fault, no page, write */
+               thread->cr2             = ptr;
+               thread->trap_nr         = X86_TRAP_PF;
+
+               memset(&info, 0, sizeof(info));
+               info.si_signo           = SIGSEGV;
+               info.si_errno           = 0;
+               info.si_code            = SEGV_MAPERR;
+               info.si_addr            = (void __user *)ptr;
+
+               force_sig_info(SIGSEGV, &info, current);
+               return false;
+       } else {
+               return true;
        }
-       tv->tv_usec = nsec / NSEC_PER_USEC;
 }
 
-int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 {
-       if (tv)
-               do_vgettimeofday(tv);
-       if (tz)
-               do_get_tz(tz);
-       return 0;
-}
+       struct task_struct *tsk;
+       unsigned long caller;
+       int vsyscall_nr, syscall_nr, tmp;
+       int prev_sig_on_uaccess_error;
+       long ret;
+
+       /*
+        * No point in checking CS -- the only way to get here is a user mode
+        * trap to a high address, which means that we're in 64-bit user code.
+        */
+
+       WARN_ON_ONCE(address != regs->ip);
+
+       if (vsyscall_mode == NONE) {
+               warn_bad_vsyscall(KERN_INFO, regs,
+                                 "vsyscall attempted with vsyscall=none");
+               return false;
+       }
 
-/* This will break when the xtime seconds get inaccurate, but that is
- * unlikely */
-time_t __vsyscall(1) vtime(time_t *t)
-{
-       struct timeval tv;
-       time_t result;
-       if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
-               return time_syscall(t);
-
-       vgettimeofday(&tv, NULL);
-       result = tv.tv_sec;
-       if (t)
-               *t = result;
-       return result;
-}
+       vsyscall_nr = addr_to_vsyscall_nr(address);
 
-/* Fast way to get current CPU and node.
-   This helps to do per node and per CPU caches in user space.
-   The result is not guaranteed without CPU affinity, but usually
-   works out because the scheduler tries to keep a thread on the same
-   CPU.
+       trace_emulate_vsyscall(vsyscall_nr);
 
-   tcache must point to a two element sized long array.
-   All arguments can be NULL. */
-long __vsyscall(2)
-vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-{
-       unsigned int dummy, p;
-       unsigned long j = 0;
-
-       /* Fast cache - only recompute value once per jiffies and avoid
-          relatively costly rdtscp/cpuid otherwise.
-          This works because the scheduler usually keeps the process
-          on the same CPU and this syscall doesn't guarantee its
-          results anyways.
-          We do this here because otherwise user space would do it on
-          its own in a likely inferior way (no access to jiffies).
-          If you don't like it pass NULL. */
-       if (tcache && tcache->blob[0] == (j = __jiffies)) {
-               p = tcache->blob[1];
-       } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
-               /* Load per CPU data from RDTSCP */
-               rdtscp(dummy, dummy, p);
-       } else {
-               /* Load per CPU data from GDT */
-               asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       if (vsyscall_nr < 0) {
+               warn_bad_vsyscall(KERN_WARNING, regs,
+                                 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
+               goto sigsegv;
        }
-       if (tcache) {
-               tcache->blob[0] = j;
-               tcache->blob[1] = p;
+
+       if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
+               warn_bad_vsyscall(KERN_WARNING, regs,
+                                 "vsyscall with bad stack (exploit attempt?)");
+               goto sigsegv;
        }
-       if (cpu)
-               *cpu = p & 0xfff;
-       if (node)
-               *node = p >> 12;
-       return 0;
-}
 
-long __vsyscall(3) venosys_1(void)
-{
-       return -ENOSYS;
-}
+       tsk = current;
+
+       /*
+        * Check for access_ok violations and find the syscall nr.
+        *
+        * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
+        * 64-bit, so we don't need to special-case it here.  For all the
+        * vsyscalls, NULL means "don't write anything" not "write it at
+        * address 0".
+        */
+       switch (vsyscall_nr) {
+       case 0:
+               if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
+                   !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_gettimeofday;
+               break;
 
-#ifdef CONFIG_SYSCTL
+       case 1:
+               if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
 
-#define SYSCALL 0x050f
-#define NOP2    0x9090
+               syscall_nr = __NR_time;
+               break;
 
-/*
- * NOP out syscall in vsyscall page when not needed.
- */
-static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-                        void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-       extern u16 vsysc1, vsysc2;
-       u16 __iomem *map1;
-       u16 __iomem *map2;
-       int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-       if (!write)
-               return ret;
-       /* gcc has some trouble with __va(__pa()), so just do it this
-          way. */
-       map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
-       if (!map1)
-               return -ENOMEM;
-       map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
-       if (!map2) {
-               ret = -ENOMEM;
-               goto out;
+       case 2:
+               if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+                   !write_ok_or_segv(regs->si, sizeof(unsigned))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_getcpu;
+               break;
        }
-       if (!vsyscall_gtod_data.sysctl_enabled) {
-               writew(SYSCALL, map1);
-               writew(SYSCALL, map2);
-       } else {
-               writew(NOP2, map1);
-               writew(NOP2, map2);
+
+       /*
+        * Handle seccomp.  regs->ip must be the original value.
+        * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
+        *
+        * We could optimize the seccomp disabled case, but performance
+        * here doesn't matter.
+        */
+       regs->orig_ax = syscall_nr;
+       regs->ax = -ENOSYS;
+       tmp = secure_computing(syscall_nr);
+       if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
+               warn_bad_vsyscall(KERN_DEBUG, regs,
+                                 "seccomp tried to change syscall nr or ip");
+               do_exit(SIGSYS);
+       }
+       if (tmp)
+               goto do_ret;  /* skip requested */
+
+       /*
+        * With a real vsyscall, page faults cause SIGSEGV.  We want to
+        * preserve that behavior to make writing exploits harder.
+        */
+       prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+       current_thread_info()->sig_on_uaccess_error = 1;
+
+       ret = -EFAULT;
+       switch (vsyscall_nr) {
+       case 0:
+               ret = sys_gettimeofday(
+                       (struct timeval __user *)regs->di,
+                       (struct timezone __user *)regs->si);
+               break;
+
+       case 1:
+               ret = sys_time((time_t __user *)regs->di);
+               break;
+
+       case 2:
+               ret = sys_getcpu((unsigned __user *)regs->di,
+                                (unsigned __user *)regs->si,
+                                NULL);
+               break;
        }
-       iounmap(map2);
-out:
-       iounmap(map1);
-       return ret;
-}
 
-static ctl_table kernel_table2[] = {
-       { .procname = "vsyscall64",
-         .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
-         .mode = 0644,
-         .proc_handler = vsyscall_sysctl_change },
-       {}
-};
+       current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
-static ctl_table kernel_root_table2[] = {
-       { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
-         .child = kernel_table2 },
-       {}
-};
+check_fault:
+       if (ret == -EFAULT) {
+               /* Bad news -- userspace fed a bad pointer to a vsyscall. */
+               warn_bad_vsyscall(KERN_INFO, regs,
+                                 "vsyscall fault (exploit attempt?)");
 
-#endif
+               /*
+                * If we failed to generate a signal for any reason,
+                * generate one here.  (This should be impossible.)
+                */
+               if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
+                                !sigismember(&tsk->pending.signal, SIGSEGV)))
+                       goto sigsegv;
 
-/* Assume __initcall executes before all user space. Hopefully kmod
-   doesn't violate that. We'll find out if it does. */
+               return true;  /* Don't emulate the ret. */
+       }
+
+       regs->ax = ret;
+
+do_ret:
+       /* Emulate a ret instruction. */
+       regs->ip = caller;
+       regs->sp += 8;
+       return true;
+
+sigsegv:
+       force_sig(SIGSEGV, current);
+       return true;
+}
+
+/*
+ * Assume __initcall executes before all user space. Hopefully kmod
+ * doesn't violate that. We'll find out if it does.
+ */
 static void __cpuinit vsyscall_set_cpu(int cpu)
 {
-       unsigned long *d;
+       unsigned long d;
        unsigned long node = 0;
 #ifdef CONFIG_NUMA
        node = cpu_to_node(cpu);
@@ -294,14 +341,16 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
        if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
                write_rdtscp_aux((node << 12) | cpu);
 
-       /* Store cpu number in limit so that it can be loaded quickly
-          in user space in vgetcpu.
-          12 bits for the CPU and 8 bits for the node. */
-       d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
-       *d = 0x0f40000000000ULL;
-       *d |= cpu;
-       *d |= (node & 0xf) << 12;
-       *d |= (node >> 4) << 48;
+       /*
+        * Store cpu number in limit so that it can be loaded quickly
+        * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
+        */
+       d = 0x0f40000000000ULL;
+       d |= cpu;
+       d |= (node & 0xf) << 12;
+       d |= (node >> 4) << 48;
+
+       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
 }
 
 static void __cpuinit cpu_vsyscall_init(void *arg)
@@ -314,34 +363,40 @@ static int __cpuinit
 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 {
        long cpu = (long)arg;
+
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-               smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
+               smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
+
        return NOTIFY_DONE;
 }
 
-static void __init map_vsyscall(void)
+void __init map_vsyscall(void)
 {
-       extern char __vsyscall_0;
-       unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-
-       /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
-       __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+       extern char __vsyscall_page;
+       unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+       extern char __vvar_page;
+       unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
+
+       __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
+                    vsyscall_mode == NATIVE
+                    ? PAGE_KERNEL_VSYSCALL
+                    : PAGE_KERNEL_VVAR);
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
+                    (unsigned long)VSYSCALL_START);
+
+       __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
+                    (unsigned long)VVAR_ADDRESS);
 }
 
 static int __init vsyscall_init(void)
 {
-       BUG_ON(((unsigned long) &vgettimeofday !=
-                       VSYSCALL_ADDR(__NR_vgettimeofday)));
-       BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
-       BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
-       BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
-       map_vsyscall();
-#ifdef CONFIG_SYSCTL
-       register_sysctl_table(kernel_root_table2);
-#endif
-       on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
-       hotcpu_notifier(cpu_vsyscall_notifier, 0);
+       BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
+
+       on_each_cpu(cpu_vsyscall_init, NULL, 1);
+       /* notifier priority > KVM */
+       hotcpu_notifier(cpu_vsyscall_notifier, 30);
+
        return 0;
 }
-
 __initcall(vsyscall_init);