irq: enable suspended EARLY_RESUME irqs forcefully if not resumed
[linux-3.10.git] / kernel / hung_task.c
index 481ca8b..6df6149 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/lockdep.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/sysctl.h>
 
 /*
@@ -33,8 +33,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
 /*
  * Zero means infinite timeout - no checking done:
  */
-unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
-static unsigned long __read_mostly hung_task_poll_jiffies;
+unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
 
 unsigned long __read_mostly sysctl_hung_task_warnings = 10;
 
@@ -69,33 +68,29 @@ static struct notifier_block panic_block = {
        .notifier_call = hung_task_panic,
 };
 
-/*
- * Returns seconds, approximately.  We don't need nanosecond
- * resolution, and we don't need to waste time with a big divide when
- * 2^30ns == 1.074s.
- */
-static unsigned long get_timestamp(void)
-{
-       int this_cpu = raw_smp_processor_id();
-
-       return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
-}
-
-static void check_hung_task(struct task_struct *t, unsigned long now,
-                           unsigned long timeout)
+static void check_hung_task(struct task_struct *t, unsigned long timeout)
 {
        unsigned long switch_count = t->nvcsw + t->nivcsw;
 
-       if (t->flags & PF_FROZEN)
+       /*
+        * Ensure the task is not frozen.
+        * Also, skip vfork and any other user process that freezer should skip.
+        */
+       if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+           return;
+
+       /*
+        * When a freshly created task is scheduled once, changes its state to
+        * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+        * musn't be checked.
+        */
+       if (unlikely(!switch_count))
                return;
 
-       if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
+       if (switch_count != t->last_switch_count) {
                t->last_switch_count = switch_count;
-               t->last_switch_timestamp = now;
                return;
        }
-       if ((long)(now - t->last_switch_timestamp) < timeout)
-               return;
        if (!sysctl_hung_task_warnings)
                return;
        sysctl_hung_task_warnings--;
@@ -109,13 +104,14 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
        printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
                        " disables this message.\n");
        sched_show_task(t);
-       __debug_show_held_locks(t);
+       debug_show_held_locks(t);
 
-       t->last_switch_timestamp = now;
        touch_nmi_watchdog();
 
-       if (sysctl_hung_task_panic)
+       if (sysctl_hung_task_panic) {
+               trigger_all_cpu_backtrace();
                panic("hung_task: blocked tasks");
+       }
 }
 
 /*
@@ -123,17 +119,22 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
  * periodically exit the critical section and enter a new one.
  *
  * For preemptible RCU it is sufficient to call rcu_read_unlock in order
- * exit the grace period. For classic RCU, a reschedule is required.
+ * to exit the grace period. For classic RCU, a reschedule is required.
  */
-static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
+static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
 {
+       bool can_cont;
+
        get_task_struct(g);
        get_task_struct(t);
        rcu_read_unlock();
        cond_resched();
        rcu_read_lock();
+       can_cont = pid_alive(g) && pid_alive(t);
        put_task_struct(t);
        put_task_struct(g);
+
+       return can_cont;
 }
 
 /*
@@ -145,7 +146,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 {
        int max_count = sysctl_hung_task_check_count;
        int batch_count = HUNG_TASK_BATCHING;
-       unsigned long now = get_timestamp();
        struct task_struct *g, *t;
 
        /*
@@ -155,50 +155,43 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
        if (test_taint(TAINT_DIE) || did_panic)
                return;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        do_each_thread(g, t) {
-               if (!--max_count)
+               if (!max_count--)
                        goto unlock;
                if (!--batch_count) {
                        batch_count = HUNG_TASK_BATCHING;
-                       rcu_lock_break(g, t);
-                       /* Exit if t or g was unhashed during refresh. */
-                       if (t->state == TASK_DEAD || g->state == TASK_DEAD)
+                       if (!rcu_lock_break(g, t))
                                goto unlock;
                }
                /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
                if (t->state == TASK_UNINTERRUPTIBLE)
-                       check_hung_task(t, now, timeout);
+                       check_hung_task(t, timeout);
        } while_each_thread(g, t);
  unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 }
 
-static void update_poll_jiffies(void)
+static unsigned long timeout_jiffies(unsigned long timeout)
 {
        /* timeout of 0 will disable the watchdog */
-       if (sysctl_hung_task_timeout_secs == 0)
-               hung_task_poll_jiffies = MAX_SCHEDULE_TIMEOUT;
-       else
-               hung_task_poll_jiffies = sysctl_hung_task_timeout_secs * HZ / 2;
+       return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
 }
 
 /*
  * Process updating of timeout sysctl
  */
 int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
-                                 struct file *filp, void __user *buffer,
+                                 void __user *buffer,
                                  size_t *lenp, loff_t *ppos)
 {
        int ret;
 
-       ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 
        if (ret || !write)
                goto out;
 
-       update_poll_jiffies();
-
        wake_up_process(watchdog_task);
 
  out:
@@ -211,20 +204,14 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
 static int watchdog(void *dummy)
 {
        set_user_nice(current, 0);
-       update_poll_jiffies();
 
        for ( ; ; ) {
-               unsigned long timeout;
+               unsigned long timeout = sysctl_hung_task_timeout_secs;
 
-               while (schedule_timeout_interruptible(hung_task_poll_jiffies));
+               while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
+                       timeout = sysctl_hung_task_timeout_secs;
 
-               /*
-                * Need to cache timeout here to avoid timeout being set
-                * to 0 via sysctl while inside check_hung_*_tasks().
-                */
-               timeout = sysctl_hung_task_timeout_secs;
-               if (timeout)
-                       check_hung_uninterruptible_tasks(timeout);
+               check_hung_uninterruptible_tasks(timeout);
        }
 
        return 0;