Merge commit 'linus/master' into merge-linus
Arjan van de Ven [Fri, 17 Oct 2008 16:20:26 +0000 (09:20 -0700)]
Conflicts:

arch/x86/kvm/i8254.c

30 files changed:
arch/alpha/kernel/osf_sys.c
arch/ia64/kvm/kvm-ia64.c
arch/powerpc/oprofile/cell/spu_profiler.c
arch/x86/kvm/i8254.c
arch/x86/kvm/lapic.c
drivers/cpuidle/cpuidle.c
drivers/s390/crypto/ap_bus.c
fs/compat.c
fs/select.c
fs/timerfd.c
include/linux/hrtimer.h
include/linux/init_task.h
include/linux/poll.h
include/linux/prctl.h
include/linux/sched.h
include/linux/thread_info.h
include/linux/time.h
kernel/fork.c
kernel/futex.c
kernel/hrtimer.c
kernel/posix-timers.c
kernel/rtmutex.c
kernel/sched.c
kernel/sys.c
kernel/time.c
kernel/time/ntp.c
kernel/time/tick-sched.c
kernel/time/timer_list.c
net/sched/sch_cbq.c
sound/drivers/pcsp/pcsp_lib.c

index 8509dad..8e19acb 100644 (file)
@@ -986,10 +986,12 @@ asmlinkage int
 osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
           struct timeval32 __user *tvp)
 {
-       s64 timeout = MAX_SCHEDULE_TIMEOUT;
+       struct timespec end_time, *to = NULL;
        if (tvp) {
                time_t sec, usec;
 
+               to = &end_time;
+
                if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
                    || __get_user(sec, &tvp->tv_sec)
                    || __get_user(usec, &tvp->tv_usec)) {
@@ -999,14 +1001,13 @@ osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
                if (sec < 0 || usec < 0)
                        return -EINVAL;
 
-               if ((unsigned long) sec < MAX_SELECT_SECONDS) {
-                       timeout = (usec + 1000000/HZ - 1) / (1000000/HZ);
-                       timeout += sec * (unsigned long) HZ;
-               }
+               if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC))
+                       return -EINVAL;         
+
        }
 
        /* OSF does not copy back the remaining time.  */
-       return core_sys_select(n, inp, outp, exp, &timeout);
+       return core_sys_select(n, inp, outp, exp, to);
 }
 
 struct rusage32 {
index c0699f0..a312c9e 100644 (file)
@@ -1114,7 +1114,7 @@ static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
        struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
 
        if (hrtimer_cancel(p_ht))
-               hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
+               hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
 }
 
 static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
index 380d7e2..02ffe06 100644 (file)
@@ -196,7 +196,7 @@ int start_spu_profiling(unsigned int cycles_reset)
        pr_debug("timer resolution: %lu\n", TICK_NSEC);
        kt = ktime_set(0, profiling_interval);
        hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       timer.expires = kt;
+       hrtimer_set_expires(&timer, kt);
        timer.function = profile_spus;
 
        /* Allocate arrays for collecting SPU PC samples */
index 634132a..11c6725 100644 (file)
@@ -204,10 +204,10 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps)
        if (vcpu0 && waitqueue_active(&vcpu0->wq))
                wake_up_interruptible(&vcpu0->wq);
 
-       pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
-       pt->scheduled = ktime_to_ns(pt->timer.expires);
+       hrtimer_add_expires_ns(&pt->timer, pt->period);
+       pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
        if (pt->period)
-               ps->channels[0].count_load_time = pt->timer.expires;
+               ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer);
 
        return (pt->period == 0 ? 0 : 1);
 }
@@ -257,7 +257,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 
        timer = &pit->pit_state.pit_timer.timer;
        if (hrtimer_cancel(timer))
-               hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+               hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
 static void destroy_pit_timer(struct kvm_kpit_timer *pt)
index 6571926..0fc3cab 100644 (file)
@@ -946,9 +946,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
 
        if (apic_lvtt_period(apic)) {
                result = 1;
-               apic->timer.dev.expires = ktime_add_ns(
-                                       apic->timer.dev.expires,
-                                       apic->timer.period);
+               hrtimer_add_expires_ns(&apic->timer.dev, apic->timer.period);
        }
        return result;
 }
@@ -1117,7 +1115,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
 
        timer = &apic->timer.dev;
        if (hrtimer_cancel(timer))
-               hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+               hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
index 5ce07b5..2e31484 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
 #include <linux/ktime.h>
+#include <linux/hrtimer.h>
 
 #include "cpuidle.h"
 
@@ -60,6 +61,12 @@ static void cpuidle_idle_call(void)
                return;
        }
 
+       /*
+        * run any timers that can be run now, at this point
+        * before calculating the idle duration etc.
+        */
+       hrtimer_peek_ahead_timers();
+
        /* ask the governor for the next state */
        next_state = cpuidle_curr_governor->select(dev);
        if (need_resched())
index 326db1e..e3fe683 100644 (file)
@@ -659,9 +659,9 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
        hr_time = ktime_set(0, poll_timeout);
 
        if (!hrtimer_is_queued(&ap_poll_timer) ||
-           !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) {
-               ap_poll_timer.expires = hr_time;
-               hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS);
+           !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
+               hrtimer_set_expires(&ap_poll_timer, hr_time);
+               hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
        }
        return count;
 }
index 5f9ec44..3b58c32 100644 (file)
@@ -1475,6 +1475,57 @@ out_ret:
 
 #define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))
 
+static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
+                                     int timeval, int ret)
+{
+       struct timespec ts;
+
+       if (!p)
+               return ret;
+
+       if (current->personality & STICKY_TIMEOUTS)
+               goto sticky;
+
+       /* No update for zero timeout */
+       if (!end_time->tv_sec && !end_time->tv_nsec)
+               return ret;
+
+       ktime_get_ts(&ts);
+       ts = timespec_sub(*end_time, ts);
+       if (ts.tv_sec < 0)
+               ts.tv_sec = ts.tv_nsec = 0;
+
+       if (timeval) {
+               struct compat_timeval rtv;
+
+               rtv.tv_sec = ts.tv_sec;
+               rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+               if (!copy_to_user(p, &rtv, sizeof(rtv)))
+                       return ret;
+       } else {
+               struct compat_timespec rts;
+
+               rts.tv_sec = ts.tv_sec;
+               rts.tv_nsec = ts.tv_nsec;
+
+               if (!copy_to_user(p, &rts, sizeof(rts)))
+                       return ret;
+       }
+       /*
+        * If an application puts its timeval in read-only memory, we
+        * don't want the Linux-specific update to the timeval to
+        * cause a fault after the select has completed
+        * successfully. However, because we're not updating the
+        * timeval, we can't restart the system call.
+        */
+
+sticky:
+       if (ret == -ERESTARTNOHAND)
+               ret = -EINTR;
+       return ret;
+}
+
 /*
  * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
  * 64-bit unsigned longs.
@@ -1556,7 +1607,8 @@ int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
        ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
 
 int compat_core_sys_select(int n, compat_ulong_t __user *inp,
-       compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout)
+       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+       struct timespec *end_time)
 {
        fd_set_bits fds;
        void *bits;
@@ -1603,7 +1655,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
        zero_fd_set(n, fds.res_out);
        zero_fd_set(n, fds.res_ex);
 
-       ret = do_select(n, &fds, timeout);
+       ret = do_select(n, &fds, end_time);
 
        if (ret < 0)
                goto out;
@@ -1629,7 +1681,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
        compat_ulong_t __user *outp, compat_ulong_t __user *exp,
        struct compat_timeval __user *tvp)
 {
-       s64 timeout = -1;
+       struct timespec end_time, *to = NULL;
        struct compat_timeval tv;
        int ret;
 
@@ -1637,43 +1689,14 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
                if (copy_from_user(&tv, tvp, sizeof(tv)))
                        return -EFAULT;
 
-               if (tv.tv_sec < 0 || tv.tv_usec < 0)
+               to = &end_time;
+               if (poll_select_set_timeout(to, tv.tv_sec,
+                                           tv.tv_usec * NSEC_PER_USEC))
                        return -EINVAL;
-
-               /* Cast to u64 to make GCC stop complaining */
-               if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
-                       timeout = -1;   /* infinite */
-               else {
-                       timeout = DIV_ROUND_UP(tv.tv_usec, 1000000/HZ);
-                       timeout += tv.tv_sec * HZ;
-               }
        }
 
-       ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
-
-       if (tvp) {
-               struct compat_timeval rtv;
-
-               if (current->personality & STICKY_TIMEOUTS)
-                       goto sticky;
-               rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
-               rtv.tv_sec = timeout;
-               if (compat_timeval_compare(&rtv, &tv) >= 0)
-                       rtv = tv;
-               if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
-sticky:
-                       /*
-                        * If an application puts its timeval in read-only
-                        * memory, we don't want the Linux-specific update to
-                        * the timeval to cause a fault after the select has
-                        * completed successfully. However, because we're not
-                        * updating the timeval, we can't restart the system
-                        * call.
-                        */
-                       if (ret == -ERESTARTNOHAND)
-                               ret = -EINTR;
-               }
-       }
+       ret = compat_core_sys_select(n, inp, outp, exp, to);
+       ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
 
        return ret;
 }
@@ -1686,15 +1709,16 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
 {
        compat_sigset_t ss32;
        sigset_t ksigmask, sigsaved;
-       s64 timeout = MAX_SCHEDULE_TIMEOUT;
        struct compat_timespec ts;
+       struct timespec end_time, *to = NULL;
        int ret;
 
        if (tsp) {
                if (copy_from_user(&ts, tsp, sizeof(ts)))
                        return -EFAULT;
 
-               if (ts.tv_sec < 0 || ts.tv_nsec < 0)
+               to = &end_time;
+               if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
                        return -EINVAL;
        }
 
@@ -1709,51 +1733,8 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
                sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
        }
 
-       do {
-               if (tsp) {
-                       if ((unsigned long)ts.tv_sec < MAX_SELECT_SECONDS) {
-                               timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ);
-                               timeout += ts.tv_sec * (unsigned long)HZ;
-                               ts.tv_sec = 0;
-                               ts.tv_nsec = 0;
-                       } else {
-                               ts.tv_sec -= MAX_SELECT_SECONDS;
-                               timeout = MAX_SELECT_SECONDS * HZ;
-                       }
-               }
-
-               ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
-
-       } while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));
-
-       if (tsp) {
-               struct compat_timespec rts;
-
-               if (current->personality & STICKY_TIMEOUTS)
-                       goto sticky;
-
-               rts.tv_sec = timeout / HZ;
-               rts.tv_nsec = (timeout % HZ) * (NSEC_PER_SEC/HZ);
-               if (rts.tv_nsec >= NSEC_PER_SEC) {
-                       rts.tv_sec++;
-                       rts.tv_nsec -= NSEC_PER_SEC;
-               }
-               if (compat_timespec_compare(&rts, &ts) >= 0)
-                       rts = ts;
-               if (copy_to_user(tsp, &rts, sizeof(rts))) {
-sticky:
-                       /*
-                        * If an application puts its timeval in read-only
-                        * memory, we don't want the Linux-specific update to
-                        * the timeval to cause a fault after the select has
-                        * completed successfully. However, because we're not
-                        * updating the timeval, we can't restart the system
-                        * call.
-                        */
-                       if (ret == -ERESTARTNOHAND)
-                               ret = -EINTR;
-               }
-       }
+       ret = compat_core_sys_select(n, inp, outp, exp, to);
+       ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 
        if (ret == -ERESTARTNOHAND) {
                /*
@@ -1798,18 +1779,16 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
        compat_sigset_t ss32;
        sigset_t ksigmask, sigsaved;
        struct compat_timespec ts;
-       s64 timeout = -1;
+       struct timespec end_time, *to = NULL;
        int ret;
 
        if (tsp) {
                if (copy_from_user(&ts, tsp, sizeof(ts)))
                        return -EFAULT;
 
-               /* We assume that ts.tv_sec is always lower than
-                  the number of seconds that can be expressed in
-                  an s64. Otherwise the compiler bitches at us */
-               timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ);
-               timeout += ts.tv_sec * HZ;
+               to = &end_time;
+               if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
+                       return -EINVAL;
        }
 
        if (sigmask) {
@@ -1823,7 +1802,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
                sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
        }
 
-       ret = do_sys_poll(ufds, nfds, &timeout);
+       ret = do_sys_poll(ufds, nfds, to);
 
        /* We can restart this syscall, usually */
        if (ret == -EINTR) {
@@ -1841,31 +1820,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
        } else if (sigmask)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 
-       if (tsp && timeout >= 0) {
-               struct compat_timespec rts;
-
-               if (current->personality & STICKY_TIMEOUTS)
-                       goto sticky;
-               /* Yes, we know it's actually an s64, but it's also positive. */
-               rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
-                                       1000;
-               rts.tv_sec = timeout;
-               if (compat_timespec_compare(&rts, &ts) >= 0)
-                       rts = ts;
-               if (copy_to_user(tsp, &rts, sizeof(rts))) {
-sticky:
-                       /*
-                        * If an application puts its timeval in read-only
-                        * memory, we don't want the Linux-specific update to
-                        * the timeval to cause a fault after the select has
-                        * completed successfully. However, because we're not
-                        * updating the timeval, we can't restart the system
-                        * call.
-                        */
-                       if (ret == -ERESTARTNOHAND && timeout >= 0)
-                               ret = -EINTR;
-               }
-       }
+       ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 
        return ret;
 }
index da0e882..448e440 100644 (file)
 #include <linux/fdtable.h>
 #include <linux/fs.h>
 #include <linux/rcupdate.h>
+#include <linux/hrtimer.h>
 
 #include <asm/uaccess.h>
 
+
+/*
+ * Estimate expected accuracy in ns from a timeval.
+ *
+ * After quite a bit of churning around, we've settled on
+ * a simple thing of taking 0.1% of the timeout as the
+ * slack, with a cap of 100 msec.
+ * "nice" tasks get a 0.5% slack instead.
+ *
+ * Consider this comment an open invitation to come up with even
+ * better solutions..
+ */
+
+static long __estimate_accuracy(struct timespec *tv)
+{
+       long slack;
+       int divfactor = 1000;
+
+       if (task_nice(current) > 0)
+               divfactor = divfactor / 5;
+
+       slack = tv->tv_nsec / divfactor;
+       slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
+
+       if (slack > 100 * NSEC_PER_MSEC)
+               slack =  100 * NSEC_PER_MSEC;
+
+       if (slack < 0)
+               slack = 0;
+       return slack;
+}
+
+static long estimate_accuracy(struct timespec *tv)
+{
+       unsigned long ret;
+       struct timespec now;
+
+       /*
+        * Realtime tasks get a slack of 0 for obvious reasons.
+        */
+
+       if (rt_task(current))
+               return 0;
+
+       ktime_get_ts(&now);
+       now = timespec_sub(*tv, now);
+       ret = __estimate_accuracy(&now);
+       if (ret < current->timer_slack_ns)
+               return current->timer_slack_ns;
+       return ret;
+}
+
+
+
 struct poll_table_page {
        struct poll_table_page * next;
        struct poll_table_entry * entry;
@@ -130,6 +185,79 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
        add_wait_queue(wait_address, &entry->wait);
 }
 
+/**
+ * poll_select_set_timeout - helper function to setup the timeout value
+ * @to:                pointer to timespec variable for the final timeout
+ * @sec:       seconds (from user space)
+ * @nsec:      nanoseconds (from user space)
+ *
+ * Note, we do not use a timespec for the user space value here, That
+ * way we can use the function for timeval and compat interfaces as well.
+ *
+ * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
+ */
+int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
+{
+       struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
+
+       if (!timespec_valid(&ts))
+               return -EINVAL;
+
+       /* Optimize for the zero timeout value here */
+       if (!sec && !nsec) {
+               to->tv_sec = to->tv_nsec = 0;
+       } else {
+               ktime_get_ts(to);
+               *to = timespec_add_safe(*to, ts);
+       }
+       return 0;
+}
+
+static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
+                                     int timeval, int ret)
+{
+       struct timespec rts;
+       struct timeval rtv;
+
+       if (!p)
+               return ret;
+
+       if (current->personality & STICKY_TIMEOUTS)
+               goto sticky;
+
+       /* No update for zero timeout */
+       if (!end_time->tv_sec && !end_time->tv_nsec)
+               return ret;
+
+       ktime_get_ts(&rts);
+       rts = timespec_sub(*end_time, rts);
+       if (rts.tv_sec < 0)
+               rts.tv_sec = rts.tv_nsec = 0;
+
+       if (timeval) {
+               rtv.tv_sec = rts.tv_sec;
+               rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
+
+               if (!copy_to_user(p, &rtv, sizeof(rtv)))
+                       return ret;
+
+       } else if (!copy_to_user(p, &rts, sizeof(rts)))
+               return ret;
+
+       /*
+        * If an application puts its timeval in read-only memory, we
+        * don't want the Linux-specific update to the timeval to
+        * cause a fault after the select has completed
+        * successfully. However, because we're not updating the
+        * timeval, we can't restart the system call.
+        */
+
+sticky:
+       if (ret == -ERESTARTNOHAND)
+               ret = -EINTR;
+       return ret;
+}
+
 #define FDS_IN(fds, n)         (fds->in + n)
 #define FDS_OUT(fds, n)                (fds->out + n)
 #define FDS_EX(fds, n)         (fds->ex + n)
@@ -182,11 +310,13 @@ get_max:
 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 #define POLLEX_SET (POLLPRI)
 
-int do_select(int n, fd_set_bits *fds, s64 *timeout)
+int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
 {
+       ktime_t expire, *to = NULL;
        struct poll_wqueues table;
        poll_table *wait;
-       int retval, i;
+       int retval, i, timed_out = 0;
+       unsigned long slack = 0;
 
        rcu_read_lock();
        retval = max_select_fd(n, fds);
@@ -198,12 +328,17 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
 
        poll_initwait(&table);
        wait = &table.pt;
-       if (!*timeout)
+       if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
                wait = NULL;
+               timed_out = 1;
+       }
+
+       if (end_time && !timed_out)
+               slack = estimate_accuracy(end_time);
+
        retval = 0;
        for (;;) {
                unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
-               long __timeout;
 
                set_current_state(TASK_INTERRUPTIBLE);
 
@@ -259,27 +394,25 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
                        cond_resched();
                }
                wait = NULL;
-               if (retval || !*timeout || signal_pending(current))
+               if (retval || timed_out || signal_pending(current))
                        break;
                if (table.error) {
                        retval = table.error;
                        break;
                }
 
-               if (*timeout < 0) {
-                       /* Wait indefinitely */
-                       __timeout = MAX_SCHEDULE_TIMEOUT;
-               } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT - 1)) {
-                       /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in a loop */
-                       __timeout = MAX_SCHEDULE_TIMEOUT - 1;
-                       *timeout -= __timeout;
-               } else {
-                       __timeout = *timeout;
-                       *timeout = 0;
+               /*
+                * If this is the first loop and we have a timeout
+                * given, then we convert to ktime_t and set the to
+                * pointer to the expiry value.
+                */
+               if (end_time && !to) {
+                       expire = timespec_to_ktime(*end_time);
+                       to = &expire;
                }
-               __timeout = schedule_timeout(__timeout);
-               if (*timeout >= 0)
-                       *timeout += __timeout;
+
+               if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                       timed_out = 1;
        }
        __set_current_state(TASK_RUNNING);
 
@@ -300,7 +433,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
        ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
 
 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
-                          fd_set __user *exp, s64 *timeout)
+                          fd_set __user *exp, struct timespec *end_time)
 {
        fd_set_bits fds;
        void *bits;
@@ -351,7 +484,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
        zero_fd_set(n, fds.res_out);
        zero_fd_set(n, fds.res_ex);
 
-       ret = do_select(n, &fds, timeout);
+       ret = do_select(n, &fds, end_time);
 
        if (ret < 0)
                goto out;
@@ -377,7 +510,7 @@ out_nofds:
 asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
                        fd_set __user *exp, struct timeval __user *tvp)
 {
-       s64 timeout = -1;
+       struct timespec end_time, *to = NULL;
        struct timeval tv;
        int ret;
 
@@ -385,43 +518,14 @@ asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
                if (copy_from_user(&tv, tvp, sizeof(tv)))
                        return -EFAULT;
 
-               if (tv.tv_sec < 0 || tv.tv_usec < 0)
+               to = &end_time;
+               if (poll_select_set_timeout(to, tv.tv_sec,
+                                           tv.tv_usec * NSEC_PER_USEC))
                        return -EINVAL;
-
-               /* Cast to u64 to make GCC stop complaining */
-               if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
-                       timeout = -1;   /* infinite */
-               else {
-                       timeout = DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ);
-                       timeout += tv.tv_sec * HZ;
-               }
        }
 
-       ret = core_sys_select(n, inp, outp, exp, &timeout);
-
-       if (tvp) {
-               struct timeval rtv;
-
-               if (current->personality & STICKY_TIMEOUTS)
-                       goto sticky;
-               rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
-               rtv.tv_sec = timeout;
-               if (timeval_compare(&rtv, &tv) >= 0)
-                       rtv = tv;
-               if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
-sticky:
-                       /*
-                        * If an application puts its timeval in read-only
-                        * memory, we don't want the Linux-specific update to
-                        * the timeval to cause a fault after the select has
-                        * completed successfully. However, because we're not
-                        * updating the timeval, we can't restart the system
-                        * call.
-                        */
-                       if (ret == -ERESTARTNOHAND)
-                               ret = -EINTR;
-               }
-       }
+       ret = core_sys_select(n, inp, outp, exp, to);
+       ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
 
        return ret;
 }
@@ -431,25 +535,17 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
                fd_set __user *exp, struct timespec __user *tsp,
                const sigset_t __user *sigmask, size_t sigsetsize)
 {
-       s64 timeout = MAX_SCHEDULE_TIMEOUT;
        sigset_t ksigmask, sigsaved;
-       struct timespec ts;
+       struct timespec ts, end_time, *to = NULL;
        int ret;
 
        if (tsp) {
                if (copy_from_user(&ts, tsp, sizeof(ts)))
                        return -EFAULT;
 
-               if (ts.tv_sec < 0 || ts.tv_nsec < 0)
+               to = &end_time;
+               if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
                        return -EINVAL;
-
-               /* Cast to u64 to make GCC stop complaining */
-               if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS)
-                       timeout = -1;   /* infinite */
-               else {
-                       timeout = DIV_ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ);
-                       timeout += ts.tv_sec * HZ;
-               }
        }
 
        if (sigmask) {
@@ -463,32 +559,8 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
                sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
        }
 
-       ret = core_sys_select(n, inp, outp, exp, &timeout);
-
-       if (tsp) {
-               struct timespec rts;
-
-               if (current->personality & STICKY_TIMEOUTS)
-                       goto sticky;
-               rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
-                                               1000;
-               rts.tv_sec = timeout;
-               if (timespec_compare(&rts, &ts) >= 0)
-                       rts = ts;
-               if (copy_to_user(tsp, &rts, sizeof(rts))) {
-sticky:
-                       /*
-                        * If an application puts its timeval in read-only
-                        * memory, we don't want the Linux-specific update to
-                        * the timeval to cause a fault after the select has
-                        * completed successfully. However, because we're not
-                        * updating the timeval, we can't restart the system
-                        * call.
-                        */
-                       if (ret == -ERESTARTNOHAND)
-                               ret = -EINTR;
-               }
-       }
+       ret = core_sys_select(n, inp, outp, exp, &end_time);
+       ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 
        if (ret == -ERESTARTNOHAND) {
                /*
@@ -574,18 +646,24 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
 }
 
 static int do_poll(unsigned int nfds,  struct poll_list *list,
-                  struct poll_wqueues *wait, s64 *timeout)
+                  struct poll_wqueues *wait, struct timespec *end_time)
 {
-       int count = 0;
        poll_table* pt = &wait->pt;
+       ktime_t expire, *to = NULL;
+       int timed_out = 0, count = 0;
+       unsigned long slack = 0;
 
        /* Optimise the no-wait case */
-       if (!(*timeout))
+       if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
                pt = NULL;
+               timed_out = 1;
+       }
+
+       if (end_time && !timed_out)
+               slack = estimate_accuracy(end_time);
 
        for (;;) {
                struct poll_list *walk;
-               long __timeout;
 
                set_current_state(TASK_INTERRUPTIBLE);
                for (walk = list; walk != NULL; walk = walk->next) {
@@ -617,27 +695,21 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
                        if (signal_pending(current))
                                count = -EINTR;
                }
-               if (count || !*timeout)
+               if (count || timed_out)
                        break;
 
-               if (*timeout < 0) {
-                       /* Wait indefinitely */
-                       __timeout = MAX_SCHEDULE_TIMEOUT;
-               } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT-1)) {
-                       /*
-                        * Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in
-                        * a loop
-                        */
-                       __timeout = MAX_SCHEDULE_TIMEOUT - 1;
-                       *timeout -= __timeout;
-               } else {
-                       __timeout = *timeout;
-                       *timeout = 0;
+               /*
+                * If this is the first loop and we have a timeout
+                * given, then we convert to ktime_t and set the to
+                * pointer to the expiry value.
+                */
+               if (end_time && !to) {
+                       expire = timespec_to_ktime(*end_time);
+                       to = &expire;
                }
 
-               __timeout = schedule_timeout(__timeout);
-               if (*timeout >= 0)
-                       *timeout += __timeout;
+               if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                       timed_out = 1;
        }
        __set_current_state(TASK_RUNNING);
        return count;
@@ -646,7 +718,8 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
                        sizeof(struct pollfd))
 
-int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
+int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+               struct timespec *end_time)
 {
        struct poll_wqueues table;
        int err = -EFAULT, fdcount, len, size;
@@ -686,7 +759,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
        }
 
        poll_initwait(&table);
-       fdcount = do_poll(nfds, head, &table, timeout);
+       fdcount = do_poll(nfds, head, &table, end_time);
        poll_freewait(&table);
 
        for (walk = head; walk; walk = walk->next) {
@@ -712,16 +785,21 @@ out_fds:
 
 static long do_restart_poll(struct restart_block *restart_block)
 {
-       struct pollfd __user *ufds = (struct pollfd __user*)restart_block->arg0;
-       int nfds = restart_block->arg1;
-       s64 timeout = ((s64)restart_block->arg3<<32) | (s64)restart_block->arg2;
+       struct pollfd __user *ufds = restart_block->poll.ufds;
+       int nfds = restart_block->poll.nfds;
+       struct timespec *to = NULL, end_time;
        int ret;
 
-       ret = do_sys_poll(ufds, nfds, &timeout);
+       if (restart_block->poll.has_timeout) {
+               end_time.tv_sec = restart_block->poll.tv_sec;
+               end_time.tv_nsec = restart_block->poll.tv_nsec;
+               to = &end_time;
+       }
+
+       ret = do_sys_poll(ufds, nfds, to);
+
        if (ret == -EINTR) {
                restart_block->fn = do_restart_poll;
-               restart_block->arg2 = timeout & 0xFFFFFFFF;
-               restart_block->arg3 = (u64)timeout >> 32;
                ret = -ERESTART_RESTARTBLOCK;
        }
        return ret;
@@ -730,31 +808,32 @@ static long do_restart_poll(struct restart_block *restart_block)
 asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
                        long timeout_msecs)
 {
-       s64 timeout_jiffies;
+       struct timespec end_time, *to = NULL;
        int ret;
 
-       if (timeout_msecs > 0) {
-#if HZ > 1000
-               /* We can only overflow if HZ > 1000 */
-               if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ)
-                       timeout_jiffies = -1;
-               else
-#endif
-                       timeout_jiffies = msecs_to_jiffies(timeout_msecs) + 1;
-       } else {
-               /* Infinite (< 0) or no (0) timeout */
-               timeout_jiffies = timeout_msecs;
+       if (timeout_msecs >= 0) {
+               to = &end_time;
+               poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
+                       NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
        }
 
-       ret = do_sys_poll(ufds, nfds, &timeout_jiffies);
+       ret = do_sys_poll(ufds, nfds, to);
+
        if (ret == -EINTR) {
                struct restart_block *restart_block;
+
                restart_block = &current_thread_info()->restart_block;
                restart_block->fn = do_restart_poll;
-               restart_block->arg0 = (unsigned long)ufds;
-               restart_block->arg1 = nfds;
-               restart_block->arg2 = timeout_jiffies & 0xFFFFFFFF;
-               restart_block->arg3 = (u64)timeout_jiffies >> 32;
+               restart_block->poll.ufds = ufds;
+               restart_block->poll.nfds = nfds;
+
+               if (timeout_msecs >= 0) {
+                       restart_block->poll.tv_sec = end_time.tv_sec;
+                       restart_block->poll.tv_nsec = end_time.tv_nsec;
+                       restart_block->poll.has_timeout = 1;
+               } else
+                       restart_block->poll.has_timeout = 0;
+
                ret = -ERESTART_RESTARTBLOCK;
        }
        return ret;
@@ -766,21 +845,16 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
        size_t sigsetsize)
 {
        sigset_t ksigmask, sigsaved;
-       struct timespec ts;
-       s64 timeout = -1;
+       struct timespec ts, end_time, *to = NULL;
        int ret;
 
        if (tsp) {
                if (copy_from_user(&ts, tsp, sizeof(ts)))
                        return -EFAULT;
 
-               /* Cast to u64 to make GCC stop complaining */
-               if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS)
-                       timeout = -1;   /* infinite */
-               else {
-                       timeout = DIV_ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ);
-                       timeout += ts.tv_sec * HZ;
-               }
+               to = &end_time;
+               if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
+                       return -EINVAL;
        }
 
        if (sigmask) {
@@ -794,7 +868,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
                sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
        }
 
-       ret = do_sys_poll(ufds, nfds, &timeout);
+       ret = do_sys_poll(ufds, nfds, to);
 
        /* We can restart this syscall, usually */
        if (ret == -EINTR) {
@@ -812,31 +886,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
        } else if (sigmask)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 
-       if (tsp && timeout >= 0) {
-               struct timespec rts;
-
-               if (current->personality & STICKY_TIMEOUTS)
-                       goto sticky;
-               /* Yes, we know it's actually an s64, but it's also positive. */
-               rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
-                                               1000;
-               rts.tv_sec = timeout;
-               if (timespec_compare(&rts, &ts) >= 0)
-                       rts = ts;
-               if (copy_to_user(tsp, &rts, sizeof(rts))) {
-               sticky:
-                       /*
-                        * If an application puts its timeval in read-only
-                        * memory, we don't want the Linux-specific update to
-                        * the timeval to cause a fault after the select has
-                        * completed successfully. However, because we're not
-                        * updating the timeval, we can't restart the system
-                        * call.
-                        */
-                       if (ret == -ERESTARTNOHAND && timeout >= 0)
-                               ret = -EINTR;
-               }
-       }
+       ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 
        return ret;
 }
index c502c60..0862f0e 100644 (file)
@@ -52,11 +52,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
 
 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
 {
-       ktime_t now, remaining;
-
-       now = ctx->tmr.base->get_time();
-       remaining = ktime_sub(ctx->tmr.expires, now);
+       ktime_t remaining;
 
+       remaining = hrtimer_expires_remaining(&ctx->tmr);
        return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
 }
 
@@ -74,7 +72,7 @@ static void timerfd_setup(struct timerfd_ctx *ctx, int flags,
        ctx->ticks = 0;
        ctx->tintv = timespec_to_ktime(ktmr->it_interval);
        hrtimer_init(&ctx->tmr, ctx->clockid, htmode);
-       ctx->tmr.expires = texp;
+       hrtimer_set_expires(&ctx->tmr, texp);
        ctx->tmr.function = timerfd_tmrproc;
        if (texp.tv64 != 0)
                hrtimer_start(&ctx->tmr, texp, htmode);
index 2f245fe..cb25c1c 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/wait.h>
+#include <linux/percpu.h>
+
 
 struct hrtimer_clock_base;
 struct hrtimer_cpu_base;
@@ -121,7 +123,8 @@ enum hrtimer_cb_mode {
  */
 struct hrtimer {
        struct rb_node                  node;
-       ktime_t                         expires;
+       ktime_t                         _expires;
+       ktime_t                         _softexpires;
        enum hrtimer_restart            (*function)(struct hrtimer *);
        struct hrtimer_clock_base       *base;
        unsigned long                   state;
@@ -207,6 +210,71 @@ struct hrtimer_cpu_base {
 #endif
 };
 
+static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+{
+       timer->_expires = time;
+       timer->_softexpires = time;
+}
+
+static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
+{
+       timer->_softexpires = time;
+       timer->_expires = ktime_add_safe(time, delta);
+}
+
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+{
+       timer->_softexpires = time;
+       timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
+}
+
+static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
+{
+       timer->_expires.tv64 = tv64;
+       timer->_softexpires.tv64 = tv64;
+}
+
+static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
+{
+       timer->_expires = ktime_add_safe(timer->_expires, time);
+       timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
+}
+
+static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns)
+{
+       timer->_expires = ktime_add_ns(timer->_expires, ns);
+       timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
+}
+
+static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
+{
+       return timer->_expires;
+}
+
+static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
+{
+       return timer->_softexpires;
+}
+
+static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
+{
+       return timer->_expires.tv64;
+}
+static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
+{
+       return timer->_softexpires.tv64;
+}
+
+static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
+{
+       return ktime_to_ns(timer->_expires);
+}
+
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
+{
+    return ktime_sub(timer->_expires, timer->base->get_time());
+}
+
 #ifdef CONFIG_HIGH_RES_TIMERS
 struct clock_event_device;
 
@@ -227,6 +295,8 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
        return timer->base->cpu_base->hres_active;
 }
 
+extern void hrtimer_peek_ahead_timers(void);
+
 /*
  * The resolution of the clocks. The resolution value is returned in
  * the clock_getres() system call to give application programmers an
@@ -249,6 +319,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
  * is expired in the next softirq when the clock was advanced.
  */
 static inline void clock_was_set(void) { }
+static inline void hrtimer_peek_ahead_timers(void) { }
 
 static inline void hres_timers_resume(void) { }
 
@@ -270,6 +341,10 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
 
+
+DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+
+
 /* Exported timer functions: */
 
 /* Initialize timers: */
@@ -294,12 +369,25 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
 /* Basic timer operations: */
 extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
                         const enum hrtimer_mode mode);
+extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+                       unsigned long range_ns, const enum hrtimer_mode mode);
 extern int hrtimer_cancel(struct hrtimer *timer);
 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
 
+static inline int hrtimer_start_expires(struct hrtimer *timer,
+                                               enum hrtimer_mode mode)
+{
+       unsigned long delta;
+       ktime_t soft, hard;
+       soft = hrtimer_get_softexpires(timer);
+       hard = hrtimer_get_expires(timer);
+       delta = ktime_to_ns(ktime_sub(hard, soft));
+       return hrtimer_start_range_ns(timer, soft, delta, mode);
+}
+
 static inline int hrtimer_restart(struct hrtimer *timer)
 {
-       return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+       return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
 /* Query timers: */
@@ -356,6 +444,10 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
                                 struct task_struct *tsk);
 
+extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+                                               const enum hrtimer_mode mode);
+extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
+
 /* Soft interrupt function to run the hrtimer queues: */
 extern void hrtimer_run_queues(void);
 extern void hrtimer_run_pending(void);
index 021d8e7..23fd890 100644 (file)
@@ -170,6 +170,7 @@ extern struct group_info init_groups;
        .cpu_timers     = INIT_CPU_TIMERS(tsk.cpu_timers),              \
        .fs_excl        = ATOMIC_INIT(0),                               \
        .pi_lock        = __SPIN_LOCK_UNLOCKED(tsk.pi_lock),            \
+       .timer_slack_ns = 50000, /* 50 usec default slack */            \
        .pids = {                                                       \
                [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),            \
                [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),           \
index ef45382..badd98a 100644 (file)
@@ -114,11 +114,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
 
 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
 
-extern int do_select(int n, fd_set_bits *fds, s64 *timeout);
+extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
 extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
-                      s64 *timeout);
+                      struct timespec *end_time);
 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
-                          fd_set __user *exp, s64 *timeout);
+                          fd_set __user *exp, struct timespec *end_time);
+
+extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
 
 #endif /* KERNEL */
 
index 5ad7919..48d887e 100644 (file)
 #define PR_GET_SECUREBITS 27
 #define PR_SET_SECUREBITS 28
 
+/*
+ * Get/set the timerslack as used by poll/select/nanosleep
+ * A value of 0 means "use default"
+ */
+#define PR_SET_TIMERSLACK 29
+#define PR_GET_TIMERSLACK 30
+
 #endif /* _LINUX_PRCTL_H */
index c226c7b..de53c10 100644 (file)
@@ -1304,6 +1304,12 @@ struct task_struct {
        int latency_record_count;
        struct latency_record latency_record[LT_SAVECOUNT];
 #endif
+       /*
+        * time slack values; these are used to round up poll() and
+        * select() etc timeout values. These are in nanoseconds.
+        */
+       unsigned long timer_slack_ns;
+       unsigned long default_timer_slack_ns;
 };
 
 /*
index 38a5647..e6b820f 100644 (file)
@@ -38,6 +38,14 @@ struct restart_block {
 #endif
                        u64 expires;
                } nanosleep;
+               /* For poll */
+               struct {
+                       struct pollfd __user *ufds;
+                       int nfds;
+                       int has_timeout;
+                       unsigned long tv_sec;
+                       unsigned long tv_nsec;
+               } poll;
        };
 };
 
index 51e883d..c911ef6 100644 (file)
@@ -40,6 +40,8 @@ extern struct timezone sys_tz;
 #define NSEC_PER_SEC   1000000000L
 #define FSEC_PER_SEC   1000000000000000L
 
+#define TIME_T_MAX     (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+
 static inline int timespec_equal(const struct timespec *a,
                                  const struct timespec *b)
 {
@@ -74,6 +76,8 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
                            const unsigned int min, const unsigned int sec);
 
 extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
+extern struct timespec timespec_add_safe(const struct timespec lhs,
+                                        const struct timespec rhs);
 
 /*
  * sub = lhs - rhs, in normalized form
index 30de644..37b3e15 100644 (file)
@@ -989,6 +989,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->prev_utime = cputime_zero;
        p->prev_stime = cputime_zero;
 
+       p->default_timer_slack_ns = current->timer_slack_ns;
+
 #ifdef CONFIG_DETECT_SOFTLOCKUP
        p->last_switch_count = 0;
        p->last_switch_timestamp = 0;
index 7d1136e..8af1002 100644 (file)
@@ -1296,13 +1296,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
                if (!abs_time)
                        schedule();
                else {
+                       unsigned long slack;
+                       slack = current->timer_slack_ns;
+                       if (rt_task(current))
+                               slack = 0;
                        hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
                                                HRTIMER_MODE_ABS);
                        hrtimer_init_sleeper(&t, current);
-                       t.timer.expires = *abs_time;
+                       hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
 
-                       hrtimer_start(&t.timer, t.timer.expires,
-                                               HRTIMER_MODE_ABS);
+                       hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
                        if (!hrtimer_active(&t.timer))
                                t.task = NULL;
 
@@ -1404,7 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
                hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
                                      HRTIMER_MODE_ABS);
                hrtimer_init_sleeper(to, current);
-               to->timer.expires = *time;
+               hrtimer_set_expires(&to->timer, *time);
        }
 
        q.pi_state = NULL;
index cdec83e..51ee90b 100644 (file)
@@ -517,7 +517,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
                if (!base->first)
                        continue;
                timer = rb_entry(base->first, struct hrtimer, node);
-               expires = ktime_sub(timer->expires, base->offset);
+               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
                if (expires.tv64 < cpu_base->expires_next.tv64)
                        cpu_base->expires_next = expires;
        }
@@ -539,10 +539,10 @@ static int hrtimer_reprogram(struct hrtimer *timer,
                             struct hrtimer_clock_base *base)
 {
        ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
-       ktime_t expires = ktime_sub(timer->expires, base->offset);
+       ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
        int res;
 
-       WARN_ON_ONCE(timer->expires.tv64 < 0);
+       WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 
        /*
         * When the callback is running, we do not reprogram the clock event
@@ -795,7 +795,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
        u64 orun = 1;
        ktime_t delta;
 
-       delta = ktime_sub(now, timer->expires);
+       delta = ktime_sub(now, hrtimer_get_expires(timer));
 
        if (delta.tv64 < 0)
                return 0;
@@ -807,8 +807,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
                s64 incr = ktime_to_ns(interval);
 
                orun = ktime_divns(delta, incr);
-               timer->expires = ktime_add_ns(timer->expires, incr * orun);
-               if (timer->expires.tv64 > now.tv64)
+               hrtimer_add_expires_ns(timer, incr * orun);
+               if (hrtimer_get_expires_tv64(timer) > now.tv64)
                        return orun;
                /*
                 * This (and the ktime_add() below) is the
@@ -816,7 +816,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
                 */
                orun++;
        }
-       timer->expires = ktime_add_safe(timer->expires, interval);
+       hrtimer_add_expires(timer, interval);
 
        return orun;
 }
@@ -848,7 +848,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
                 * We dont care about collisions. Nodes with
                 * the same expiry time stay together.
                 */
-               if (timer->expires.tv64 < entry->expires.tv64) {
+               if (hrtimer_get_expires_tv64(timer) <
+                               hrtimer_get_expires_tv64(entry)) {
                        link = &(*link)->rb_left;
                } else {
                        link = &(*link)->rb_right;
@@ -945,9 +946,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 }
 
 /**
- * hrtimer_start - (re)start an relative timer on the current CPU
+ * hrtimer_start_range_ns - (re)start an relative timer on the current CPU
  * @timer:     the timer to be added
  * @tim:       expiry time
+ * @delta_ns:  "slack" range for the timer
  * @mode:      expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
  *
  * Returns:
@@ -955,7 +957,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
  *  1 when the timer was active
  */
 int
-hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
+hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
+                       const enum hrtimer_mode mode)
 {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
@@ -983,7 +986,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
 #endif
        }
 
-       timer->expires = tim;
+       hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
        timer_stats_hrtimer_set_start_info(timer);
 
@@ -1016,8 +1019,26 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
+
+/**
+ * hrtimer_start - (re)start an relative timer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @mode:      expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
+ *
+ * Returns:
+ *  0 on success
+ *  1 when the timer was active
+ */
+int
+hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
+{
+       return hrtimer_start_range_ns(timer, tim, 0, mode);
+}
 EXPORT_SYMBOL_GPL(hrtimer_start);
 
+
 /**
  * hrtimer_try_to_cancel - try to deactivate a timer
  * @timer:     hrtimer to stop
@@ -1077,7 +1098,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
        ktime_t rem;
 
        base = lock_hrtimer_base(timer, &flags);
-       rem = ktime_sub(timer->expires, base->get_time());
+       rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
        return rem;
@@ -1109,7 +1130,7 @@ ktime_t hrtimer_get_next_event(void)
                                continue;
 
                        timer = rb_entry(base->first, struct hrtimer, node);
-                       delta.tv64 = timer->expires.tv64;
+                       delta.tv64 = hrtimer_get_expires_tv64(timer);
                        delta = ktime_sub(delta, base->get_time());
                        if (delta.tv64 < mindelta.tv64)
                                mindelta.tv64 = delta.tv64;
@@ -1310,10 +1331,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 
                        timer = rb_entry(node, struct hrtimer, node);
 
-                       if (basenow.tv64 < timer->expires.tv64) {
+                       /*
+                        * The immediate goal for using the softexpires is
+                        * minimizing wakeups, not running timers at the
+                        * earliest interrupt after their soft expiration.
+                        * This allows us to avoid using a Priority Search
+                        * Tree, which can answer a stabbing querry for
+                        * overlapping intervals and instead use the simple
+                        * BST we already have.
+                        * We don't add extra wakeups by delaying timers that
+                        * are right-of a not yet expired timer, because that
+                        * timer will have to trigger a wakeup anyway.
+                        */
+
+                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
                                ktime_t expires;
 
-                               expires = ktime_sub(timer->expires,
+                               expires = ktime_sub(hrtimer_get_expires(timer),
                                                    base->offset);
                                if (expires.tv64 < expires_next.tv64)
                                        expires_next = expires;
@@ -1349,6 +1383,36 @@ void hrtimer_interrupt(struct clock_event_device *dev)
                raise_softirq(HRTIMER_SOFTIRQ);
 }
 
+/**
+ * hrtimer_peek_ahead_timers -- run soft-expired timers now
+ *
+ * hrtimer_peek_ahead_timers will peek at the timer queue of
+ * the current cpu and check if there are any timers for which
+ * the soft expires time has passed. If any such timers exist,
+ * they are run immediately and then removed from the timer queue.
+ *
+ */
+void hrtimer_peek_ahead_timers(void)
+{
+       unsigned long flags;
+       struct tick_device *td;
+       struct clock_event_device *dev;
+
+       if (!hrtimer_hres_active())
+               return;
+
+       local_irq_save(flags);
+       td = &__get_cpu_var(tick_cpu_device);
+       if (!td)
+               goto out;
+       dev = td->evtdev;
+       if (!dev)
+               goto out;
+       hrtimer_interrupt(dev);
+out:
+       local_irq_restore(flags);
+}
+
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
        run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
@@ -1416,7 +1480,8 @@ void hrtimer_run_queues(void)
                        struct hrtimer *timer;
 
                        timer = rb_entry(node, struct hrtimer, node);
-                       if (base->softirq_time.tv64 <= timer->expires.tv64)
+                       if (base->softirq_time.tv64 <=
+                                       hrtimer_get_expires_tv64(timer))
                                break;
 
                        if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
@@ -1464,7 +1529,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
 
        do {
                set_current_state(TASK_INTERRUPTIBLE);
-               hrtimer_start(&t->timer, t->timer.expires, mode);
+               hrtimer_start_expires(&t->timer, mode);
                if (!hrtimer_active(&t->timer))
                        t->task = NULL;
 
@@ -1486,7 +1551,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
        struct timespec rmt;
        ktime_t rem;
 
-       rem = ktime_sub(timer->expires, timer->base->get_time());
+       rem = hrtimer_expires_remaining(timer);
        if (rem.tv64 <= 0)
                return 0;
        rmt = ktime_to_timespec(rem);
@@ -1505,7 +1570,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
 
        hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
                                HRTIMER_MODE_ABS);
-       t.timer.expires.tv64 = restart->nanosleep.expires;
+       hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
 
        if (do_nanosleep(&t, HRTIMER_MODE_ABS))
                goto out;
@@ -1530,9 +1595,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
        struct restart_block *restart;
        struct hrtimer_sleeper t;
        int ret = 0;
+       unsigned long slack;
+
+       slack = current->timer_slack_ns;
+       if (rt_task(current))
+               slack = 0;
 
        hrtimer_init_on_stack(&t.timer, clockid, mode);
-       t.timer.expires = timespec_to_ktime(*rqtp);
+       hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
        if (do_nanosleep(&t, mode))
                goto out;
 
@@ -1552,7 +1622,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
        restart->fn = hrtimer_nanosleep_restart;
        restart->nanosleep.index = t.timer.base->index;
        restart->nanosleep.rmtp = rmtp;
-       restart->nanosleep.expires = t.timer.expires.tv64;
+       restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
 
        ret = -ERESTART_RESTARTBLOCK;
 out:
@@ -1753,3 +1823,103 @@ void __init hrtimers_init(void)
 #endif
 }
 
+/**
+ * schedule_hrtimeout_range - sleep until timeout
+ * @expires:   timeout value (ktime_t)
+ * @delta:     slack in expires timeout (ktime_t)
+ * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * The @delta argument gives the kernel the freedom to schedule the
+ * actual wakeup to a time that is both power and performance friendly.
+ * The kernel give the normal best effort behavior for "@expires+@delta",
+ * but may decide to fire the timer earlier, but no earlier than @expires.
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
+ * pass before the routine returns.
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task.
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Returns 0 when the timer has expired otherwise -EINTR
+ */
+int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+                              const enum hrtimer_mode mode)
+{
+       struct hrtimer_sleeper t;
+
+       /*
+        * Optimize when a zero timeout value is given. It does not
+        * matter whether this is an absolute or a relative time.
+        */
+       if (expires && !expires->tv64) {
+               __set_current_state(TASK_RUNNING);
+               return 0;
+       }
+
+       /*
+        * A NULL parameter means "inifinte"
+        */
+       if (!expires) {
+               schedule();
+               __set_current_state(TASK_RUNNING);
+               return -EINTR;
+       }
+
+       hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
+       hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
+
+       hrtimer_init_sleeper(&t, current);
+
+       hrtimer_start_expires(&t.timer, mode);
+       if (!hrtimer_active(&t.timer))
+               t.task = NULL;
+
+       if (likely(t.task))
+               schedule();
+
+       hrtimer_cancel(&t.timer);
+       destroy_hrtimer_on_stack(&t.timer);
+
+       __set_current_state(TASK_RUNNING);
+
+       return !t.task ? 0 : -EINTR;
+}
+EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
+
+/**
+ * schedule_hrtimeout - sleep until timeout
+ * @expires:   timeout value (ktime_t)
+ * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
+ * pass before the routine returns.
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task.
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Returns 0 when the timer has expired otherwise -EINTR
+ */
+int __sched schedule_hrtimeout(ktime_t *expires,
+                              const enum hrtimer_mode mode)
+{
+       return schedule_hrtimeout_range(expires, 0, mode);
+}
+EXPORT_SYMBOL_GPL(schedule_hrtimeout);
index 5131e54..ee20458 100644 (file)
@@ -668,7 +668,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
            (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
                timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
 
-       remaining = ktime_sub(timer->expires, now);
+       remaining = ktime_sub(hrtimer_get_expires(timer), now);
        /* Return 0 only, when the timer is expired and not pending */
        if (remaining.tv64 <= 0) {
                /*
@@ -762,7 +762,7 @@ common_timer_set(struct k_itimer *timr, int flags,
        hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
        timr->it.real.timer.function = posix_timer_fn;
 
-       timer->expires = timespec_to_ktime(new_setting->it_value);
+       hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
 
        /* Convert interval */
        timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
@@ -771,14 +771,12 @@ common_timer_set(struct k_itimer *timr, int flags,
        if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
                /* Setup correct expiry time for relative timers */
                if (mode == HRTIMER_MODE_REL) {
-                       timer->expires =
-                               ktime_add_safe(timer->expires,
-                                              timer->base->get_time());
+                       hrtimer_add_expires(timer, timer->base->get_time());
                }
                return 0;
        }
 
-       hrtimer_start(timer, timer->expires, mode);
+       hrtimer_start_expires(timer, mode);
        return 0;
 }
 
index 6522ae5..69d9cb9 100644 (file)
@@ -631,8 +631,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 
        /* Setup the timer, when timeout != NULL */
        if (unlikely(timeout)) {
-               hrtimer_start(&timeout->timer, timeout->timer.expires,
-                             HRTIMER_MODE_ABS);
+               hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
                if (!hrtimer_active(&timeout->timer))
                        timeout->task = NULL;
        }
index 6f23059..eb3c729 100644 (file)
@@ -226,9 +226,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 
                now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
                hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
-               hrtimer_start(&rt_b->rt_period_timer,
-                             rt_b->rt_period_timer.expires,
-                             HRTIMER_MODE_ABS);
+               hrtimer_start_expires(&rt_b->rt_period_timer,
+                               HRTIMER_MODE_ABS);
        }
        spin_unlock(&rt_b->rt_runtime_lock);
 }
@@ -1063,7 +1062,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
        struct hrtimer *timer = &rq->hrtick_timer;
        ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
 
-       timer->expires = time;
+       hrtimer_set_expires(timer, time);
 
        if (rq == this_rq()) {
                hrtimer_restart(timer);
index 0bc8fa3..fc71f99 100644 (file)
@@ -1739,6 +1739,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
                case PR_SET_TSC:
                        error = SET_TSC_CTL(arg2);
                        break;
+               case PR_GET_TIMERSLACK:
+                       error = current->timer_slack_ns;
+                       break;
+               case PR_SET_TIMERSLACK:
+                       if (arg2 <= 0)
+                               current->timer_slack_ns =
+                                       current->default_timer_slack_ns;
+                       else
+                               current->timer_slack_ns = arg2;
+                       break;
                default:
                        error = -EINVAL;
                        break;
index 6a08660..d63a433 100644 (file)
@@ -669,3 +669,21 @@ EXPORT_SYMBOL(get_jiffies_64);
 #endif
 
 EXPORT_SYMBOL(jiffies);
+
+/*
+ * Add two timespec values and do a safety check for overflow.
+ * It's assumed that both values are valid (>= 0)
+ */
+struct timespec timespec_add_safe(const struct timespec lhs,
+                                 const struct timespec rhs)
+{
+       struct timespec res;
+
+       set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
+                               lhs.tv_nsec + rhs.tv_nsec);
+
+       if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
+               res.tv_sec = TIME_T_MAX;
+
+       return res;
+}
index 1ad46f3..9c114b7 100644 (file)
@@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
                time_state = TIME_OOP;
                printk(KERN_NOTICE "Clock: "
                       "inserting leap second 23:59:60 UTC\n");
-               leap_timer.expires = ktime_add_ns(leap_timer.expires,
-                                                 NSEC_PER_SEC);
+               hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
                res = HRTIMER_RESTART;
                break;
        case TIME_DEL:
index b711ffc..a547be1 100644 (file)
@@ -300,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle)
                                goto out;
                        }
 
-                       ts->idle_tick = ts->sched_timer.expires;
+                       ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
                        ts->tick_stopped = 1;
                        ts->idle_jiffies = last_jiffies;
                        rcu_enter_nohz();
@@ -431,21 +431,21 @@ void tick_nohz_restart_sched_tick(void)
        ts->tick_stopped  = 0;
        ts->idle_exittime = now;
        hrtimer_cancel(&ts->sched_timer);
-       ts->sched_timer.expires = ts->idle_tick;
+       hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
 
        while (1) {
                /* Forward the time to expire in the future */
                hrtimer_forward(&ts->sched_timer, now, tick_period);
 
                if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start(&ts->sched_timer,
-                                     ts->sched_timer.expires,
+                       hrtimer_start_expires(&ts->sched_timer,
                                      HRTIMER_MODE_ABS);
                        /* Check, if the timer was already in the past */
                        if (hrtimer_active(&ts->sched_timer))
                                break;
                } else {
-                       if (!tick_program_event(ts->sched_timer.expires, 0))
+                       if (!tick_program_event(
+                               hrtimer_get_expires(&ts->sched_timer), 0))
                                break;
                }
                /* Update jiffies and reread time */
@@ -458,7 +458,7 @@ void tick_nohz_restart_sched_tick(void)
 static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
 {
        hrtimer_forward(&ts->sched_timer, now, tick_period);
-       return tick_program_event(ts->sched_timer.expires, 0);
+       return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
 }
 
 /*
@@ -541,7 +541,7 @@ static void tick_nohz_switch_to_nohz(void)
        next = tick_init_jiffy_update();
 
        for (;;) {
-               ts->sched_timer.expires = next;
+               hrtimer_set_expires(&ts->sched_timer, next);
                if (!tick_program_event(next, 0))
                        break;
                next = ktime_add(next, tick_period);
@@ -637,16 +637,15 @@ void tick_setup_sched_timer(void)
        ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
 
        /* Get the next period (per cpu) */
-       ts->sched_timer.expires = tick_init_jiffy_update();
+       hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
        offset = ktime_to_ns(tick_period) >> 1;
        do_div(offset, num_possible_cpus());
        offset *= smp_processor_id();
-       ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
+       hrtimer_add_expires_ns(&ts->sched_timer, offset);
 
        for (;;) {
                hrtimer_forward(&ts->sched_timer, now, tick_period);
-               hrtimer_start(&ts->sched_timer, ts->sched_timer.expires,
-                             HRTIMER_MODE_ABS);
+               hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS);
                /* Check, if the timer was already in the past */
                if (hrtimer_active(&ts->sched_timer))
                        break;
index a40e20f..122ee75 100644 (file)
@@ -65,9 +65,11 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
        SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
 #endif
        SEQ_printf(m, "\n");
-       SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n",
-               (unsigned long long)ktime_to_ns(timer->expires),
-               (long long)(ktime_to_ns(timer->expires) - now));
+       SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
+               (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
+               (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)),
+               (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now),
+               (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now));
 }
 
 static void
index 8b06fa9..03e389e 100644 (file)
@@ -545,9 +545,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
                        expires = ktime_set(0, 0);
                        expires = ktime_add_ns(expires, PSCHED_US2NS(sched));
                        if (hrtimer_try_to_cancel(&q->delay_timer) &&
-                           ktime_to_ns(ktime_sub(q->delay_timer.expires,
-                                                 expires)) > 0)
-                               q->delay_timer.expires = expires;
+                           ktime_to_ns(ktime_sub(
+                                       hrtimer_get_expires(&q->delay_timer),
+                                       expires)) > 0)
+                               hrtimer_set_expires(&q->delay_timer, expires);
                        hrtimer_restart(&q->delay_timer);
                        cl->delayed = 1;
                        cl->xstats.overactions++;
index e341f3f..1f42e40 100644 (file)
@@ -34,7 +34,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
                chip->thalf = 0;
                if (!atomic_read(&chip->timer_active))
                        return HRTIMER_NORESTART;
-               hrtimer_forward(&chip->timer, chip->timer.expires,
+               hrtimer_forward(&chip->timer, hrtimer_get_expires(&chip->timer),
                                ktime_set(0, chip->ns_rem));
                return HRTIMER_RESTART;
        }
@@ -118,7 +118,8 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
        chip->ns_rem = PCSP_PERIOD_NS();
        ns = (chip->thalf ? PCSP_CALC_NS(timer_cnt) : chip->ns_rem);
        chip->ns_rem -= ns;
-       hrtimer_forward(&chip->timer, chip->timer.expires, ktime_set(0, ns));
+       hrtimer_forward(&chip->timer, hrtimer_get_expires(&chip->timer),
+                                                       ktime_set(0, ns));
        return HRTIMER_RESTART;
 
 exit_nr_unlock2: