sched: latencytop support
[linux-2.6.git] / arch / x86 / kernel / stacktrace.c
index 6fa6cf0..55771fd 100644 (file)
@@ -33,6 +33,19 @@ static void save_stack_address(void *data, unsigned long addr)
                trace->entries[trace->nr_entries++] = addr;
 }
 
+static void save_stack_address_nosched(void *data, unsigned long addr)
+{
+       struct stack_trace *trace = (struct stack_trace *)data;
+       if (in_sched_functions(addr))
+               return;
+       if (trace->skip > 0) {
+               trace->skip--;
+               return;
+       }
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = addr;
+}
+
 static const struct stacktrace_ops save_stack_ops = {
        .warning = save_stack_warning,
        .warning_symbol = save_stack_warning_symbol,
@@ -40,6 +53,13 @@ static const struct stacktrace_ops save_stack_ops = {
        .address = save_stack_address,
 };
 
+static const struct stacktrace_ops save_stack_ops_nosched = {
+       .warning = save_stack_warning,
+       .warning_symbol = save_stack_warning_symbol,
+       .stack = save_stack_stack,
+       .address = save_stack_address_nosched,
+};
+
 /*
  * Save stack-backtrace addresses into a stack_trace buffer.
  */
@@ -50,3 +70,10 @@ void save_stack_trace(struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}