]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/sched_debug.c
gcov: enable GCOV_PROFILE_ALL for x86_64
[linux-2.6.git] / kernel / sched_debug.c
index ca198a797bfab87691616f05da95c16912bd105b..467ca72f1657a3db824cdb460d8684b65955f8d7 100644 (file)
@@ -31,9 +31,9 @@
 /*
  * Ease the printing of nsec fields:
  */
-static long long nsec_high(long long nsec)
+static long long nsec_high(unsigned long long nsec)
 {
-       if (nsec < 0) {
+       if ((long long)nsec < 0) {
                nsec = -nsec;
                do_div(nsec, 1000000);
                return -nsec;
@@ -43,9 +43,9 @@ static long long nsec_high(long long nsec)
        return nsec;
 }
 
-static unsigned long nsec_low(long long nsec)
+static unsigned long nsec_low(unsigned long long nsec)
 {
-       if (nsec < 0)
+       if ((long long)nsec < 0)
                nsec = -nsec;
 
        return do_div(nsec, 1000000);
@@ -53,6 +53,40 @@ static unsigned long nsec_low(long long nsec)
 
 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void print_cfs_group_stats(struct seq_file *m, int cpu,
+               struct task_group *tg)
+{
+       struct sched_entity *se = tg->se[cpu];
+       if (!se)
+               return;
+
+#define P(F) \
+       SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
+#define PN(F) \
+       SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
+
+       PN(se->exec_start);
+       PN(se->vruntime);
+       PN(se->sum_exec_runtime);
+#ifdef CONFIG_SCHEDSTATS
+       PN(se->wait_start);
+       PN(se->sleep_start);
+       PN(se->block_start);
+       PN(se->sleep_max);
+       PN(se->block_max);
+       PN(se->exec_max);
+       PN(se->slice_max);
+       PN(se->wait_max);
+       PN(se->wait_sum);
+       P(se->wait_count);
+#endif
+       P(se->load.weight);
+#undef PN
+#undef P
+}
+#endif
+
 static void
 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 {
@@ -67,14 +101,24 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                (long long)(p->nvcsw + p->nivcsw),
                p->prio);
 #ifdef CONFIG_SCHEDSTATS
-       SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
+       SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
                SPLIT_NS(p->se.vruntime),
                SPLIT_NS(p->se.sum_exec_runtime),
                SPLIT_NS(p->se.sum_sleep_runtime));
 #else
-       SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
+       SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
                0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
 #endif
+
+#ifdef CONFIG_CGROUP_SCHED
+       {
+               char path[64];
+
+               cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
+               SEQ_printf(m, " %s", path);
+       }
+#endif
+       SEQ_printf(m, "\n");
 }
 
 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
@@ -101,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
        read_unlock_irqrestore(&tasklist_lock, flags);
 }
 
+#if defined(CONFIG_CGROUP_SCHED) && \
+       (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
+static void task_group_path(struct task_group *tg, char *buf, int buflen)
+{
+       /* may be NULL if the underlying cgroup isn't fully-created yet */
+       if (!tg->css.cgroup) {
+               buf[0] = '\0';
+               return;
+       }
+       cgroup_path(tg->css.cgroup, buf, buflen);
+}
+#endif
+
 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 {
        s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -109,8 +166,21 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        struct sched_entity *last;
        unsigned long flags;
 
-       SEQ_printf(m, "\ncfs_rq\n");
+#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
+       char path[128];
+       struct task_group *tg = cfs_rq->tg;
 
+       task_group_path(tg, path, sizeof(path));
+
+       SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
+#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
+       {
+               uid_t uid = cfs_rq->tg->uid;
+               SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
+       }
+#else
+       SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
+#endif
        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
                        SPLIT_NS(cfs_rq->exec_clock));
 
@@ -120,7 +190,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        last = __pick_last_entity(cfs_rq);
        if (last)
                max_vruntime = last->vruntime;
-       min_vruntime = rq->cfs.min_vruntime;
+       min_vruntime = cfs_rq->min_vruntime;
        rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
        spin_unlock_irqrestore(&rq->lock, flags);
        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
@@ -137,12 +207,43 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        SPLIT_NS(spread0));
        SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
-#ifdef CONFIG_SCHEDSTATS
-       SEQ_printf(m, "  .%-30s: %d\n", "bkl_count",
-                       rq->bkl_count);
-#endif
-       SEQ_printf(m, "  .%-30s: %ld\n", "nr_spread_over",
+
+       SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
                        cfs_rq->nr_spread_over);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_SMP
+       SEQ_printf(m, "  .%-30s: %lu\n", "shares", cfs_rq->shares);
+#endif
+       print_cfs_group_stats(m, cpu, cfs_rq->tg);
+#endif
+}
+
+void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
+{
+#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
+       char path[128];
+       struct task_group *tg = rt_rq->tg;
+
+       task_group_path(tg, path, sizeof(path));
+
+       SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
+#else
+       SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
+#endif
+
+
+#define P(x) \
+       SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
+#define PN(x) \
+       SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
+
+       P(rt_nr_running);
+       P(rt_throttled);
+       PN(rt_time);
+       PN(rt_runtime);
+
+#undef PN
+#undef P
 }
 
 static void print_cpu(struct seq_file *m, int cpu)
@@ -171,16 +272,9 @@ static void print_cpu(struct seq_file *m, int cpu)
        P(nr_switches);
        P(nr_load_updates);
        P(nr_uninterruptible);
-       SEQ_printf(m, "  .%-30s: %lu\n", "jiffies", jiffies);
        PN(next_balance);
        P(curr->pid);
        PN(clock);
-       PN(idle_clock);
-       PN(prev_clock_raw);
-       P(clock_warps);
-       P(clock_overflows);
-       P(clock_deep_idle_events);
-       PN(clock_max_delta);
        P(cpu_load[0]);
        P(cpu_load[1]);
        P(cpu_load[2]);
@@ -189,7 +283,24 @@ static void print_cpu(struct seq_file *m, int cpu)
 #undef P
 #undef PN
 
+#ifdef CONFIG_SCHEDSTATS
+#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
+
+       P(yld_count);
+
+       P(sched_switch);
+       P(sched_count);
+       P(sched_goidle);
+
+       P(ttwu_count);
+       P(ttwu_local);
+
+       P(bkl_count);
+
+#undef P
+#endif
        print_cfs_stats(m, cpu);
+       print_rt_stats(m, cpu);
 
        print_rq(m, rq, cpu);
 }
@@ -199,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
        u64 now = ktime_to_ns(ktime_get());
        int cpu;
 
-       SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n",
+       SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
                init_utsname()->release,
                (int)strcspn(init_utsname()->version, " "),
                init_utsname()->version);
@@ -210,10 +321,10 @@ static int sched_debug_show(struct seq_file *m, void *v)
        SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 #define PN(x) \
        SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
+       P(jiffies);
        PN(sysctl_sched_latency);
        PN(sysctl_sched_min_granularity);
        PN(sysctl_sched_wakeup_granularity);
-       PN(sysctl_sched_batch_wakeup_granularity);
        PN(sysctl_sched_child_runs_first);
        P(sysctl_sched_features);
 #undef PN
@@ -248,12 +359,9 @@ static int __init init_sched_debug_procfs(void)
 {
        struct proc_dir_entry *pe;
 
-       pe = create_proc_entry("sched_debug", 0644, NULL);
+       pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
        if (!pe)
                return -ENOMEM;
-
-       pe->proc_fops = &sched_debug_fops;
-
        return 0;
 }
 
@@ -265,12 +373,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        unsigned long flags;
        int num_threads = 1;
 
-       rcu_read_lock();
        if (lock_task_sighand(p, &flags)) {
                num_threads = atomic_read(&p->signal->count);
                unlock_task_sighand(p, &flags);
        }
-       rcu_read_unlock();
 
        SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
        SEQ_printf(m,
@@ -287,6 +393,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        PN(se.exec_start);
        PN(se.vruntime);
        PN(se.sum_exec_runtime);
+       PN(se.avg_overlap);
+       PN(se.avg_wakeup);
 
        nr_switches = p->nvcsw + p->nivcsw;
 
@@ -299,6 +407,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        PN(se.exec_max);
        PN(se.slice_max);
        PN(se.wait_max);
+       PN(se.wait_sum);
+       P(se.wait_count);
        P(sched_info.bkl_count);
        P(se.nr_migrations);
        P(se.nr_migrations_cold);
@@ -327,10 +437,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
                        avg_atom = -1LL;
 
                avg_per_cpu = p->se.sum_exec_runtime;
-               if (p->se.nr_migrations)
-                       avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
-               else
+               if (p->se.nr_migrations) {
+                       avg_per_cpu = div64_u64(avg_per_cpu,
+                                               p->se.nr_migrations);
+               } else {
                        avg_per_cpu = -1LL;
+               }
 
                __PN(avg_atom);
                __PN(avg_per_cpu);
@@ -351,10 +463,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 #undef __P
 
        {
+               unsigned int this_cpu = raw_smp_processor_id();
                u64 t0, t1;
 
-               t0 = sched_clock();
-               t1 = sched_clock();
+               t0 = cpu_clock(this_cpu);
+               t1 = cpu_clock(this_cpu);
                SEQ_printf(m, "%-35s:%21Ld\n",
                           "clock-delta", (long long)(t1-t0));
        }
@@ -364,6 +477,8 @@ void proc_sched_set_task(struct task_struct *p)
 {
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_max                          = 0;
+       p->se.wait_sum                          = 0;
+       p->se.wait_count                        = 0;
        p->se.sleep_max                         = 0;
        p->se.sum_sleep_runtime                 = 0;
        p->se.block_max                         = 0;