include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6.git] / arch / sh / kernel / dwarf.c
index 4d8c7bd..a8234b2 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/mempool.h>
 #include <linux/mm.h>
 #include <linux/elf.h>
+#include <linux/ftrace.h>
+#include <linux/slab.h>
 #include <asm/dwarf.h>
 #include <asm/unwinder.h>
 #include <asm/sections.h>
@@ -38,10 +40,10 @@ static mempool_t *dwarf_frame_pool;
 static struct kmem_cache *dwarf_reg_cachep;
 static mempool_t *dwarf_reg_pool;
 
-static LIST_HEAD(dwarf_cie_list);
+static struct rb_root cie_root;
 static DEFINE_SPINLOCK(dwarf_cie_lock);
 
-static LIST_HEAD(dwarf_fde_list);
+static struct rb_root fde_root;
 static DEFINE_SPINLOCK(dwarf_fde_lock);
 
 static struct dwarf_cie *cached_cie;
@@ -300,7 +302,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
  */
 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
 {
-       struct dwarf_cie *cie;
+       struct rb_node **rb_node = &cie_root.rb_node;
+       struct dwarf_cie *cie = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -314,16 +317,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
                goto out;
        }
 
-       list_for_each_entry(cie, &dwarf_cie_list, link) {
-               if (cie->cie_pointer == cie_ptr) {
-                       cached_cie = cie;
-                       break;
+       while (*rb_node) {
+               struct dwarf_cie *cie_tmp;
+
+               cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+               BUG_ON(!cie_tmp);
+
+               if (cie_ptr == cie_tmp->cie_pointer) {
+                       cie = cie_tmp;
+                       cached_cie = cie_tmp;
+                       goto out;
+               } else {
+                       if (cie_ptr < cie_tmp->cie_pointer)
+                               rb_node = &(*rb_node)->rb_left;
+                       else
+                               rb_node = &(*rb_node)->rb_right;
                }
        }
 
-       /* Couldn't find the entry in the list. */
-       if (&cie->link == &dwarf_cie_list)
-               cie = NULL;
 out:
        spin_unlock_irqrestore(&dwarf_cie_lock, flags);
        return cie;
@@ -335,25 +346,34 @@ out:
  */
 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
 {
-       struct dwarf_fde *fde;
+       struct rb_node **rb_node = &fde_root.rb_node;
+       struct dwarf_fde *fde = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&dwarf_fde_lock, flags);
 
-       list_for_each_entry(fde, &dwarf_fde_list, link) {
-               unsigned long start, end;
+       while (*rb_node) {
+               struct dwarf_fde *fde_tmp;
+               unsigned long tmp_start, tmp_end;
 
-               start = fde->initial_location;
-               end = fde->initial_location + fde->address_range;
+               fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+               BUG_ON(!fde_tmp);
 
-               if (pc >= start && pc < end)
-                       break;
-       }
+               tmp_start = fde_tmp->initial_location;
+               tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
 
-       /* Couldn't find the entry in the list. */
-       if (&fde->link == &dwarf_fde_list)
-               fde = NULL;
+               if (pc < tmp_start) {
+                       rb_node = &(*rb_node)->rb_left;
+               } else {
+                       if (pc < tmp_end) {
+                               fde = fde_tmp;
+                               goto out;
+                       } else
+                               rb_node = &(*rb_node)->rb_right;
+               }
+       }
 
+out:
        spin_unlock_irqrestore(&dwarf_fde_lock, flags);
 
        return fde;
@@ -539,6 +559,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
        mempool_free(frame, dwarf_frame_pool);
 }
 
+extern void ret_from_irq(void);
+
 /**
  *     dwarf_unwind_stack - unwind the stack
  *
@@ -549,8 +571,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
  *     on the callstack. Each of the lower (older) stack frames are
  *     linked via the "prev" member.
  */
-struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
-                                       struct dwarf_frame *prev)
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+                                      struct dwarf_frame *prev)
 {
        struct dwarf_frame *frame;
        struct dwarf_cie *cie;
@@ -566,9 +588,30 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
         * NOTE: the return address is guaranteed to be setup by the
         * time this function makes its first function call.
         */
-       if (!pc && !prev)
+       if (!pc || !prev)
                pc = (unsigned long)current_text_addr();
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /*
+        * If our stack has been patched by the function graph tracer
+        * then we might see the address of return_to_handler() where we
+        * expected to find the real return address.
+        */
+       if (pc == (unsigned long)&return_to_handler) {
+               int index = current->curr_ret_stack;
+
+               /*
+                * We currently have no way of tracking how many
+                * return_to_handler()'s we've seen. If there is more
+                * than one patched return address on our stack,
+                * complain loudly.
+                */
+               WARN_ON(index > 0);
+
+               pc = current->ret_stack[index].ret;
+       }
+#endif
+
        frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
        if (!frame) {
                printk(KERN_ERR "Unable to allocate a dwarf frame\n");
@@ -656,6 +699,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
        addr = frame->cfa + reg->addr;
        frame->return_addr = __raw_readl(addr);
 
+       /*
+        * Ah, the joys of unwinding through interrupts.
+        *
+        * Interrupts are tricky - the DWARF info needs to be _really_
+        * accurate and unfortunately I'm seeing a lot of bogus DWARF
+        * info. For example, I've seen interrupts occur in epilogues
+        * just after the frame pointer (r14) had been restored. The
+        * problem was that the DWARF info claimed that the CFA could be
+        * reached by using the value of the frame pointer before it was
+        * restored.
+        *
+        * So until the compiler can be trusted to produce reliable
+        * DWARF info when it really matters, let's stop unwinding once
+        * we've calculated the function that was interrupted.
+        */
+       if (prev && prev->pc == (unsigned long)ret_from_irq)
+               frame->return_addr = 0;
+
        return frame;
 
 bail:
@@ -666,6 +727,8 @@ bail:
 static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
                           unsigned char *end, struct module *mod)
 {
+       struct rb_node **rb_node = &cie_root.rb_node;
+       struct rb_node *parent = *rb_node;
        struct dwarf_cie *cie;
        unsigned long flags;
        int count;
@@ -760,11 +823,30 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
        cie->initial_instructions = p;
        cie->instructions_end = end;
 
-       cie->mod = mod;
-
        /* Add to list */
        spin_lock_irqsave(&dwarf_cie_lock, flags);
-       list_add_tail(&cie->link, &dwarf_cie_list);
+
+       while (*rb_node) {
+               struct dwarf_cie *cie_tmp;
+
+               cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+
+               parent = *rb_node;
+
+               if (cie->cie_pointer < cie_tmp->cie_pointer)
+                       rb_node = &parent->rb_left;
+               else if (cie->cie_pointer >= cie_tmp->cie_pointer)
+                       rb_node = &parent->rb_right;
+               else
+                       WARN_ON(1);
+       }
+
+       rb_link_node(&cie->node, parent, rb_node);
+       rb_insert_color(&cie->node, &cie_root);
+
+       if (mod != NULL)
+               list_add_tail(&cie->link, &mod->arch.cie_list);
+
        spin_unlock_irqrestore(&dwarf_cie_lock, flags);
 
        return 0;
@@ -774,6 +856,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
                           void *start, unsigned long len,
                           unsigned char *end, struct module *mod)
 {
+       struct rb_node **rb_node = &fde_root.rb_node;
+       struct rb_node *parent = *rb_node;
        struct dwarf_fde *fde;
        struct dwarf_cie *cie;
        unsigned long flags;
@@ -821,11 +905,38 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
        fde->instructions = p;
        fde->end = end;
 
-       fde->mod = mod;
-
        /* Add to list. */
        spin_lock_irqsave(&dwarf_fde_lock, flags);
-       list_add_tail(&fde->link, &dwarf_fde_list);
+
+       while (*rb_node) {
+               struct dwarf_fde *fde_tmp;
+               unsigned long tmp_start, tmp_end;
+               unsigned long start, end;
+
+               fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+
+               start = fde->initial_location;
+               end = fde->initial_location + fde->address_range;
+
+               tmp_start = fde_tmp->initial_location;
+               tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+               parent = *rb_node;
+
+               if (start < tmp_start)
+                       rb_node = &parent->rb_left;
+               else if (start >= tmp_end)
+                       rb_node = &parent->rb_right;
+               else
+                       WARN_ON(1);
+       }
+
+       rb_link_node(&fde->node, parent, rb_node);
+       rb_insert_color(&fde->node, &fde_root);
+
+       if (mod != NULL)
+               list_add_tail(&fde->link, &mod->arch.fde_list);
+
        spin_unlock_irqrestore(&dwarf_fde_lock, flags);
 
        return 0;
@@ -870,19 +981,29 @@ static struct unwinder dwarf_unwinder = {
 
 static void dwarf_unwinder_cleanup(void)
 {
-       struct dwarf_cie *cie;
-       struct dwarf_fde *fde;
+       struct rb_node **fde_rb_node = &fde_root.rb_node;
+       struct rb_node **cie_rb_node = &cie_root.rb_node;
 
        /*
         * Deallocate all the memory allocated for the DWARF unwinder.
         * Traverse all the FDE/CIE lists and remove and free all the
         * memory associated with those data structures.
         */
-       list_for_each_entry(cie, &dwarf_cie_list, link)
-               kfree(cie);
+       while (*fde_rb_node) {
+               struct dwarf_fde *fde;
 
-       list_for_each_entry(fde, &dwarf_fde_list, link)
+               fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
+               rb_erase(*fde_rb_node, &fde_root);
                kfree(fde);
+       }
+
+       while (*cie_rb_node) {
+               struct dwarf_cie *cie;
+
+               cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
+               rb_erase(*cie_rb_node, &cie_root);
+               kfree(cie);
+       }
 
        kmem_cache_destroy(dwarf_reg_cachep);
        kmem_cache_destroy(dwarf_frame_cachep);
@@ -982,6 +1103,8 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
 
        /* Did we find the .eh_frame section? */
        if (i != hdr->e_shnum) {
+               INIT_LIST_HEAD(&me->arch.cie_list);
+               INIT_LIST_HEAD(&me->arch.fde_list);
                err = dwarf_parse_section((char *)start, (char *)end, me);
                if (err) {
                        printk(KERN_WARNING "%s: failed to parse DWARF info\n",
@@ -1002,38 +1125,26 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
  */
 void module_dwarf_cleanup(struct module *mod)
 {
-       struct dwarf_fde *fde;
-       struct dwarf_cie *cie;
+       struct dwarf_fde *fde, *ftmp;
+       struct dwarf_cie *cie, *ctmp;
        unsigned long flags;
 
        spin_lock_irqsave(&dwarf_cie_lock, flags);
 
-again_cie:
-       list_for_each_entry(cie, &dwarf_cie_list, link) {
-               if (cie->mod == mod)
-                       break;
-       }
-
-       if (&cie->link != &dwarf_cie_list) {
+       list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
                list_del(&cie->link);
+               rb_erase(&cie->node, &cie_root);
                kfree(cie);
-               goto again_cie;
        }
 
        spin_unlock_irqrestore(&dwarf_cie_lock, flags);
 
        spin_lock_irqsave(&dwarf_fde_lock, flags);
 
-again_fde:
-       list_for_each_entry(fde, &dwarf_fde_list, link) {
-               if (fde->mod == mod)
-                       break;
-       }
-
-       if (&fde->link != &dwarf_fde_list) {
+       list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
                list_del(&fde->link);
+               rb_erase(&fde->node, &fde_root);
                kfree(fde);
-               goto again_fde;
        }
 
        spin_unlock_irqrestore(&dwarf_fde_lock, flags);
@@ -1052,8 +1163,6 @@ again_fde:
 static int __init dwarf_unwinder_init(void)
 {
        int err;
-       INIT_LIST_HEAD(&dwarf_cie_list);
-       INIT_LIST_HEAD(&dwarf_fde_list);
 
        dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
                        sizeof(struct dwarf_frame), 0,