include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6.git] / arch / sh / kernel / dwarf.c
index 09c6fd7..a8234b2 100644 (file)
  *
  * TODO:
  *     - DWARF64 doesn't work.
+ *     - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
  */
 
 /* #define DEBUG */
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/list.h>
+#include <linux/mempool.h>
 #include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/ftrace.h>
+#include <linux/slab.h>
 #include <asm/dwarf.h>
 #include <asm/unwinder.h>
 #include <asm/sections.h>
-#include <asm-generic/unaligned.h>
-#include <asm/dwarf.h>
+#include <asm/unaligned.h>
 #include <asm/stacktrace.h>
 
-static LIST_HEAD(dwarf_cie_list);
-DEFINE_SPINLOCK(dwarf_cie_lock);
+/* Reserve enough memory for two stack frames */
+#define DWARF_FRAME_MIN_REQ    2
+/* ... with 4 registers per frame. */
+#define DWARF_REG_MIN_REQ      (DWARF_FRAME_MIN_REQ * 4)
+
+static struct kmem_cache *dwarf_frame_cachep;
+static mempool_t *dwarf_frame_pool;
+
+static struct kmem_cache *dwarf_reg_cachep;
+static mempool_t *dwarf_reg_pool;
 
-static LIST_HEAD(dwarf_fde_list);
-DEFINE_SPINLOCK(dwarf_fde_lock);
+static struct rb_root cie_root;
+static DEFINE_SPINLOCK(dwarf_cie_lock);
+
+static struct rb_root fde_root;
+static DEFINE_SPINLOCK(dwarf_fde_lock);
 
 static struct dwarf_cie *cached_cie;
 
-/*
- * Figure out whether we need to allocate some dwarf registers. If dwarf
- * registers have already been allocated then we may need to realloc
- * them. "reg" is a register number that we need to be able to access
- * after this call.
+/**
+ *     dwarf_frame_alloc_reg - allocate memory for a DWARF register
+ *     @frame: the DWARF frame whose list of registers we insert on
+ *     @reg_num: the register number
  *
- * Register numbers start at zero, therefore we need to allocate space
- * for "reg" + 1 registers.
+ *     Allocate space for, and initialise, a dwarf reg from
+ *     dwarf_reg_pool and insert it onto the (unsorted) linked-list of
+ *     dwarf registers for @frame.
+ *
+ *     Return the initialised DWARF reg.
  */
-static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
-                                  unsigned int reg)
+static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
+                                              unsigned int reg_num)
 {
-       struct dwarf_reg *regs;
-       unsigned int num_regs = reg + 1;
-       size_t new_size;
-       size_t old_size;
-
-       new_size = num_regs * sizeof(*regs);
-       old_size = frame->num_regs * sizeof(*regs);
-
-       /* Fast path: don't allocate any regs if we've already got enough. */
-       if (frame->num_regs >= num_regs)
-               return;
+       struct dwarf_reg *reg;
 
-       regs = kzalloc(new_size, GFP_KERNEL);
-       if (!regs) {
-               printk(KERN_WARNING "Unable to allocate DWARF registers\n");
+       reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
+       if (!reg) {
+               printk(KERN_WARNING "Unable to allocate a DWARF register\n");
                /*
                 * Let's just bomb hard here, we have no way to
                 * gracefully recover.
                 */
-               BUG();
+               UNWINDER_BUG();
        }
 
-       if (frame->regs) {
-               memcpy(regs, frame->regs, old_size);
-               kfree(frame->regs);
+       reg->number = reg_num;
+       reg->addr = 0;
+       reg->flags = 0;
+
+       list_add(&reg->link, &frame->reg_list);
+
+       return reg;
+}
+
+static void dwarf_frame_free_regs(struct dwarf_frame *frame)
+{
+       struct dwarf_reg *reg, *n;
+
+       list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
+               list_del(&reg->link);
+               mempool_free(reg, dwarf_reg_pool);
+       }
+}
+
+/**
+ *     dwarf_frame_reg - return a DWARF register
+ *     @frame: the DWARF frame to search in for @reg_num
+ *     @reg_num: the register number to search for
+ *
+ *     Lookup and return the dwarf reg @reg_num for this frame. Return
+ *     NULL if @reg_num is an register invalid number.
+ */
+static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
+                                        unsigned int reg_num)
+{
+       struct dwarf_reg *reg;
+
+       list_for_each_entry(reg, &frame->reg_list, link) {
+               if (reg->number == reg_num)
+                       return reg;
        }
 
-       frame->regs = regs;
-       frame->num_regs = num_regs;
+       return NULL;
 }
 
 /**
@@ -87,11 +125,10 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
  *     from @src and writing to @dst, because they can be arbitrarily
  *     aligned. Return 'n' - the number of bytes read.
  */
-static inline int dwarf_read_addr(void *src, void *dst)
+static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
 {
-       u32 val = __get_unaligned_cpu32(src);
-       __put_unaligned_cpu32(val, dst);
-
+       u32 val = get_unaligned(src);
+       put_unaligned(val, dst);
        return sizeof(unsigned long *);
 }
 
@@ -197,7 +234,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
                break;
        default:
                pr_debug("encoding=0x%x\n", (encoding & 0x70));
-               BUG();
+               UNWINDER_BUG();
        }
 
        if ((encoding & 0x07) == 0x00)
@@ -207,12 +244,12 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val,
        case DW_EH_PE_sdata4:
        case DW_EH_PE_udata4:
                count += 4;
-               decoded_addr += __get_unaligned_cpu32(addr);
+               decoded_addr += get_unaligned((u32 *)addr);
                __raw_writel(decoded_addr, val);
                break;
        default:
                pr_debug("encoding=0x%x\n", encoding);
-               BUG();
+               UNWINDER_BUG();
        }
 
        return count;
@@ -232,7 +269,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
        u32 initial_len;
        int count;
 
-       initial_len = __get_unaligned_cpu32(addr);
+       initial_len = get_unaligned((u32 *)addr);
        count = 4;
 
        /*
@@ -247,7 +284,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
                 * compulsory 32-bit length field.
                 */
                if (initial_len == DW_EXT_DWARF64) {
-                       *len = __get_unaligned_cpu64(addr + 4);
+                       *len = get_unaligned((u64 *)addr + 4);
                        count = 12;
                } else {
                        printk(KERN_WARNING "Unknown DWARF extension\n");
@@ -265,7 +302,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
  */
 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
 {
-       struct dwarf_cie *cie, *n;
+       struct rb_node **rb_node = &cie_root.rb_node;
+       struct dwarf_cie *cie = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -279,16 +317,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
                goto out;
        }
 
-       list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) {
-               if (cie->cie_pointer == cie_ptr) {
-                       cached_cie = cie;
-                       break;
+       while (*rb_node) {
+               struct dwarf_cie *cie_tmp;
+
+               cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+               BUG_ON(!cie_tmp);
+
+               if (cie_ptr == cie_tmp->cie_pointer) {
+                       cie = cie_tmp;
+                       cached_cie = cie_tmp;
+                       goto out;
+               } else {
+                       if (cie_ptr < cie_tmp->cie_pointer)
+                               rb_node = &(*rb_node)->rb_left;
+                       else
+                               rb_node = &(*rb_node)->rb_right;
                }
        }
 
-       /* Couldn't find the entry in the list. */
-       if (&cie->link == &dwarf_cie_list)
-               cie = NULL;
 out:
        spin_unlock_irqrestore(&dwarf_cie_lock, flags);
        return cie;
@@ -300,24 +346,34 @@ out:
  */
 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
 {
+       struct rb_node **rb_node = &fde_root.rb_node;
+       struct dwarf_fde *fde = NULL;
        unsigned long flags;
-       struct dwarf_fde *fde, *n;
 
        spin_lock_irqsave(&dwarf_fde_lock, flags);
-       list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) {
-               unsigned long start, end;
 
-               start = fde->initial_location;
-               end = fde->initial_location + fde->address_range;
+       while (*rb_node) {
+               struct dwarf_fde *fde_tmp;
+               unsigned long tmp_start, tmp_end;
 
-               if (pc >= start && pc < end)
-                       break;
-       }
+               fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+               BUG_ON(!fde_tmp);
+
+               tmp_start = fde_tmp->initial_location;
+               tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
 
-       /* Couldn't find the entry in the list. */
-       if (&fde->link == &dwarf_fde_list)
-               fde = NULL;
+               if (pc < tmp_start) {
+                       rb_node = &(*rb_node)->rb_left;
+               } else {
+                       if (pc < tmp_end) {
+                               fde = fde_tmp;
+                               goto out;
+                       } else
+                               rb_node = &(*rb_node)->rb_right;
+               }
+       }
 
+out:
        spin_unlock_irqrestore(&dwarf_fde_lock, flags);
 
        return fde;
@@ -347,6 +403,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
        unsigned char insn;
        unsigned char *current_insn;
        unsigned int count, delta, reg, expr_len, offset;
+       struct dwarf_reg *regp;
 
        current_insn = insn_start;
 
@@ -369,9 +426,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
                        count = dwarf_read_uleb128(current_insn, &offset);
                        current_insn += count;
                        offset *= cie->data_alignment_factor;
-                       dwarf_frame_alloc_regs(frame, reg);
-                       frame->regs[reg].addr = offset;
-                       frame->regs[reg].flags |= DWARF_REG_OFFSET;
+                       regp = dwarf_frame_alloc_reg(frame, reg);
+                       regp->addr = offset;
+                       regp->flags |= DWARF_REG_OFFSET;
                        continue;
                        /* NOTREACHED */
                case DW_CFA_restore:
@@ -392,12 +449,12 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
                        frame->pc += delta * cie->code_alignment_factor;
                        break;
                case DW_CFA_advance_loc2:
-                       delta = __get_unaligned_cpu16(current_insn);
+                       delta = get_unaligned((u16 *)current_insn);
                        current_insn += 2;
                        frame->pc += delta * cie->code_alignment_factor;
                        break;
                case DW_CFA_advance_loc4:
-                       delta = __get_unaligned_cpu32(current_insn);
+                       delta = get_unaligned((u32 *)current_insn);
                        current_insn += 4;
                        frame->pc += delta * cie->code_alignment_factor;
                        break;
@@ -415,6 +472,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
                case DW_CFA_undefined:
                        count = dwarf_read_uleb128(current_insn, &reg);
                        current_insn += count;
+                       regp = dwarf_frame_alloc_reg(frame, reg);
+                       regp->flags |= DWARF_UNDEFINED;
                        break;
                case DW_CFA_def_cfa:
                        count = dwarf_read_uleb128(current_insn,
@@ -453,20 +512,36 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
                        count = dwarf_read_leb128(current_insn, &offset);
                        current_insn += count;
                        offset *= cie->data_alignment_factor;
-                       dwarf_frame_alloc_regs(frame, reg);
-                       frame->regs[reg].flags |= DWARF_REG_OFFSET;
-                       frame->regs[reg].addr = offset;
+                       regp = dwarf_frame_alloc_reg(frame, reg);
+                       regp->flags |= DWARF_REG_OFFSET;
+                       regp->addr = offset;
                        break;
                case DW_CFA_val_offset:
                        count = dwarf_read_uleb128(current_insn, &reg);
                        current_insn += count;
                        count = dwarf_read_leb128(current_insn, &offset);
                        offset *= cie->data_alignment_factor;
-                       frame->regs[reg].flags |= DWARF_REG_OFFSET;
-                       frame->regs[reg].addr = offset;
+                       regp = dwarf_frame_alloc_reg(frame, reg);
+                       regp->flags |= DWARF_VAL_OFFSET;
+                       regp->addr = offset;
+                       break;
+               case DW_CFA_GNU_args_size:
+                       count = dwarf_read_uleb128(current_insn, &offset);
+                       current_insn += count;
+                       break;
+               case DW_CFA_GNU_negative_offset_extended:
+                       count = dwarf_read_uleb128(current_insn, &reg);
+                       current_insn += count;
+                       count = dwarf_read_uleb128(current_insn, &offset);
+                       offset *= cie->data_alignment_factor;
+
+                       regp = dwarf_frame_alloc_reg(frame, reg);
+                       regp->flags |= DWARF_REG_OFFSET;
+                       regp->addr = -offset;
                        break;
                default:
                        pr_debug("unhandled DWARF instruction 0x%x\n", insn);
+                       UNWINDER_BUG();
                        break;
                }
        }
@@ -475,7 +550,20 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
 }
 
 /**
- *     dwarf_unwind_stack - recursively unwind the stack
+ *     dwarf_free_frame - free the memory allocated for @frame
+ *     @frame: the frame to free
+ */
+void dwarf_free_frame(struct dwarf_frame *frame)
+{
+       dwarf_frame_free_regs(frame);
+       mempool_free(frame, dwarf_frame_pool);
+}
+
+extern void ret_from_irq(void);
+
+/**
+ *     dwarf_unwind_stack - unwind the stack
+ *
  *     @pc: address of the function to unwind
  *     @prev: struct dwarf_frame of the previous stackframe on the callstack
  *
@@ -489,36 +577,57 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
        struct dwarf_frame *frame;
        struct dwarf_cie *cie;
        struct dwarf_fde *fde;
+       struct dwarf_reg *reg;
        unsigned long addr;
-       int i, offset;
 
        /*
-        * If this is the first invocation of this recursive function we
-        * need get the contents of a physical register to get the CFA
-        * in order to begin the virtual unwinding of the stack.
+        * If we're starting at the top of the stack we need get the
+        * contents of a physical register to get the CFA in order to
+        * begin the virtual unwinding of the stack.
         *
-        * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of
-        * this function because the return address register
-        * (DWARF_ARCH_RA_REG) will probably not be initialised until a
-        * few instructions into the prologue.
+        * NOTE: the return address is guaranteed to be setup by the
+        * time this function makes its first function call.
         */
-       if (!pc && !prev) {
-               pc = (unsigned long)&dwarf_unwind_stack;
-               pc += DWARF_ARCH_UNWIND_OFFSET;
+       if (!pc || !prev)
+               pc = (unsigned long)current_text_addr();
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /*
+        * If our stack has been patched by the function graph tracer
+        * then we might see the address of return_to_handler() where we
+        * expected to find the real return address.
+        */
+       if (pc == (unsigned long)&return_to_handler) {
+               int index = current->curr_ret_stack;
+
+               /*
+                * We currently have no way of tracking how many
+                * return_to_handler()'s we've seen. If there is more
+                * than one patched return address on our stack,
+                * complain loudly.
+                */
+               WARN_ON(index > 0);
+
+               pc = current->ret_stack[index].ret;
        }
+#endif
 
-       frame = kzalloc(sizeof(*frame), GFP_KERNEL);
-       if (!frame)
-               return NULL;
+       frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
+       if (!frame) {
+               printk(KERN_ERR "Unable to allocate a dwarf frame\n");
+               UNWINDER_BUG();
+       }
 
+       INIT_LIST_HEAD(&frame->reg_list);
+       frame->flags = 0;
        frame->prev = prev;
+       frame->return_addr = 0;
 
        fde = dwarf_lookup_fde(pc);
        if (!fde) {
                /*
-                * This is our normal exit path - the one that stops the
-                * recursion. There's two reasons why we might exit
-                * here,
+                * This is our normal exit path. There are two reasons
+                * why we might exit here,
                 *
                 *      a) pc has no asscociated DWARF frame info and so
                 *      we don't know how to unwind this frame. This is
@@ -531,7 +640,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
                 *      case above, which sucks because we could print a
                 *      warning here.
                 */
-               return NULL;
+               goto bail;
        }
 
        cie = dwarf_lookup_cie(fde->cie_pointer);
@@ -540,7 +649,8 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
 
        /* CIE initial instructions */
        dwarf_cfa_execute_insns(cie->initial_instructions,
-                               cie->instructions_end, cie, fde, frame, pc);
+                               cie->instructions_end, cie, fde,
+                               frame, pc);
 
        /* FDE instructions */
        dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
@@ -550,18 +660,19 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
        switch (frame->flags) {
        case DWARF_FRAME_CFA_REG_OFFSET:
                if (prev) {
-                       BUG_ON(!prev->regs[frame->cfa_register].flags);
+                       reg = dwarf_frame_reg(prev, frame->cfa_register);
+                       UNWINDER_BUG_ON(!reg);
+                       UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
 
-                       addr = prev->cfa;
-                       addr += prev->regs[frame->cfa_register].addr;
+                       addr = prev->cfa + reg->addr;
                        frame->cfa = __raw_readl(addr);
 
                } else {
                        /*
-                        * Again, this is the first invocation of this
-                        * recurisve function. We need to physically
-                        * read the contents of a register in order to
-                        * get the Canonical Frame Address for this
+                        * Again, we're starting from the top of the
+                        * stack. We need to physically read
+                        * the contents of a register in order to get
+                        * the Canonical Frame Address for this
                         * function.
                         */
                        frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
@@ -570,32 +681,54 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
                frame->cfa += frame->cfa_offset;
                break;
        default:
-               BUG();
+               UNWINDER_BUG();
        }
 
-       /* If we haven't seen the return address reg, we're screwed. */
-       BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
-
-       for (i = 0; i <= frame->num_regs; i++) {
-               struct dwarf_reg *reg = &frame->regs[i];
+       reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
 
-               if (!reg->flags)
-                       continue;
+       /*
+        * If we haven't seen the return address register or the return
+        * address column is undefined then we must assume that this is
+        * the end of the callstack.
+        */
+       if (!reg || reg->flags == DWARF_UNDEFINED)
+               goto bail;
 
-               offset = reg->addr;
-               offset += frame->cfa;
-       }
+       UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
 
-       addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
+       addr = frame->cfa + reg->addr;
        frame->return_addr = __raw_readl(addr);
 
-       frame->next = dwarf_unwind_stack(frame->return_addr, frame);
+       /*
+        * Ah, the joys of unwinding through interrupts.
+        *
+        * Interrupts are tricky - the DWARF info needs to be _really_
+        * accurate and unfortunately I'm seeing a lot of bogus DWARF
+        * info. For example, I've seen interrupts occur in epilogues
+        * just after the frame pointer (r14) had been restored. The
+        * problem was that the DWARF info claimed that the CFA could be
+        * reached by using the value of the frame pointer before it was
+        * restored.
+        *
+        * So until the compiler can be trusted to produce reliable
+        * DWARF info when it really matters, let's stop unwinding once
+        * we've calculated the function that was interrupted.
+        */
+       if (prev && prev->pc == (unsigned long)ret_from_irq)
+               frame->return_addr = 0;
+
        return frame;
+
+bail:
+       dwarf_free_frame(frame);
+       return NULL;
 }
 
 static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
-                          unsigned char *end)
+                          unsigned char *end, struct module *mod)
 {
+       struct rb_node **rb_node = &cie_root.rb_node;
+       struct rb_node *parent = *rb_node;
        struct dwarf_cie *cie;
        unsigned long flags;
        int count;
@@ -615,7 +748,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
        cie->cie_pointer = (unsigned long)entry;
 
        cie->version = *(char *)p++;
-       BUG_ON(cie->version != 1);
+       UNWINDER_BUG_ON(cie->version != 1);
 
        cie->augmentation = p;
        p += strlen(cie->augmentation) + 1;
@@ -645,7 +778,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
                count = dwarf_read_uleb128(p, &length);
                p += count;
 
-               BUG_ON((unsigned char *)p > end);
+               UNWINDER_BUG_ON((unsigned char *)p > end);
 
                cie->initial_instructions = p + length;
                cie->augmentation++;
@@ -673,16 +806,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
                         * routine in the CIE
                         * augmentation.
                         */
-                       BUG();
+                       UNWINDER_BUG();
                } else if (*cie->augmentation == 'S') {
-                       BUG();
+                       UNWINDER_BUG();
                } else {
                        /*
                         * Unknown augmentation. Assume
                         * 'z' augmentation.
                         */
                        p = cie->initial_instructions;
-                       BUG_ON(!p);
+                       UNWINDER_BUG_ON(!p);
                        break;
                }
        }
@@ -692,15 +825,39 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
 
        /* Add to list */
        spin_lock_irqsave(&dwarf_cie_lock, flags);
-       list_add_tail(&cie->link, &dwarf_cie_list);
+
+       while (*rb_node) {
+               struct dwarf_cie *cie_tmp;
+
+               cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+
+               parent = *rb_node;
+
+               if (cie->cie_pointer < cie_tmp->cie_pointer)
+                       rb_node = &parent->rb_left;
+               else if (cie->cie_pointer >= cie_tmp->cie_pointer)
+                       rb_node = &parent->rb_right;
+               else
+                       WARN_ON(1);
+       }
+
+       rb_link_node(&cie->node, parent, rb_node);
+       rb_insert_color(&cie->node, &cie_root);
+
+       if (mod != NULL)
+               list_add_tail(&cie->link, &mod->arch.cie_list);
+
        spin_unlock_irqrestore(&dwarf_cie_lock, flags);
 
        return 0;
 }
 
 static int dwarf_parse_fde(void *entry, u32 entry_type,
-                          void *start, unsigned long len)
+                          void *start, unsigned long len,
+                          unsigned char *end, struct module *mod)
 {
+       struct rb_node **rb_node = &fde_root.rb_node;
+       struct rb_node *parent = *rb_node;
        struct dwarf_fde *fde;
        struct dwarf_cie *cie;
        unsigned long flags;
@@ -746,28 +903,74 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
 
        /* Call frame instructions. */
        fde->instructions = p;
-       fde->end = start + len;
+       fde->end = end;
 
        /* Add to list. */
        spin_lock_irqsave(&dwarf_fde_lock, flags);
-       list_add_tail(&fde->link, &dwarf_fde_list);
+
+       while (*rb_node) {
+               struct dwarf_fde *fde_tmp;
+               unsigned long tmp_start, tmp_end;
+               unsigned long start, end;
+
+               fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+
+               start = fde->initial_location;
+               end = fde->initial_location + fde->address_range;
+
+               tmp_start = fde_tmp->initial_location;
+               tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+               parent = *rb_node;
+
+               if (start < tmp_start)
+                       rb_node = &parent->rb_left;
+               else if (start >= tmp_end)
+                       rb_node = &parent->rb_right;
+               else
+                       WARN_ON(1);
+       }
+
+       rb_link_node(&fde->node, parent, rb_node);
+       rb_insert_color(&fde->node, &fde_root);
+
+       if (mod != NULL)
+               list_add_tail(&fde->link, &mod->arch.fde_list);
+
        spin_unlock_irqrestore(&dwarf_fde_lock, flags);
 
        return 0;
 }
 
-static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
+static void dwarf_unwinder_dump(struct task_struct *task,
+                               struct pt_regs *regs,
                                unsigned long *sp,
-                               const struct stacktrace_ops *ops, void *data)
+                               const struct stacktrace_ops *ops,
+                               void *data)
 {
-       struct dwarf_frame *frame;
+       struct dwarf_frame *frame, *_frame;
+       unsigned long return_addr;
+
+       _frame = NULL;
+       return_addr = 0;
+
+       while (1) {
+               frame = dwarf_unwind_stack(return_addr, _frame);
+
+               if (_frame)
+                       dwarf_free_frame(_frame);
 
-       frame = dwarf_unwind_stack(0, NULL);
+               _frame = frame;
 
-       while (frame && frame->return_addr) {
-               ops->address(data, frame->return_addr, 1);
-               frame = frame->next;
+               if (!frame || !frame->return_addr)
+                       break;
+
+               return_addr = frame->return_addr;
+               ops->address(data, return_addr, 1);
        }
+
+       if (frame)
+               dwarf_free_frame(frame);
 }
 
 static struct unwinder dwarf_unwinder = {
@@ -778,51 +981,57 @@ static struct unwinder dwarf_unwinder = {
 
 static void dwarf_unwinder_cleanup(void)
 {
-       struct dwarf_cie *cie, *m;
-       struct dwarf_fde *fde, *n;
-       unsigned long flags;
+       struct rb_node **fde_rb_node = &fde_root.rb_node;
+       struct rb_node **cie_rb_node = &cie_root.rb_node;
 
        /*
         * Deallocate all the memory allocated for the DWARF unwinder.
         * Traverse all the FDE/CIE lists and remove and free all the
         * memory associated with those data structures.
         */
-       spin_lock_irqsave(&dwarf_cie_lock, flags);
-       list_for_each_entry_safe(cie, m, &dwarf_cie_list, link)
-               kfree(cie);
-       spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+       while (*fde_rb_node) {
+               struct dwarf_fde *fde;
 
-       spin_lock_irqsave(&dwarf_fde_lock, flags);
-       list_for_each_entry_safe(fde, n, &dwarf_fde_list, link)
+               fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
+               rb_erase(*fde_rb_node, &fde_root);
                kfree(fde);
-       spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+       }
+
+       while (*cie_rb_node) {
+               struct dwarf_cie *cie;
+
+               cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
+               rb_erase(*cie_rb_node, &cie_root);
+               kfree(cie);
+       }
+
+       kmem_cache_destroy(dwarf_reg_cachep);
+       kmem_cache_destroy(dwarf_frame_cachep);
 }
 
 /**
- *     dwarf_unwinder_init - initialise the dwarf unwinder
+ *     dwarf_parse_section - parse DWARF section
+ *     @eh_frame_start: start address of the .eh_frame section
+ *     @eh_frame_end: end address of the .eh_frame section
+ *     @mod: the kernel module containing the .eh_frame section
  *
- *     Build the data structures describing the .dwarf_frame section to
- *     make it easier to lookup CIE and FDE entries. Because the
- *     .eh_frame section is packed as tightly as possible it is not
- *     easy to lookup the FDE for a given PC, so we build a list of FDE
- *     and CIE entries that make it easier.
+ *     Parse the information in a .eh_frame section.
  */
-void dwarf_unwinder_init(void)
+static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
+                              struct module *mod)
 {
        u32 entry_type;
        void *p, *entry;
-       int count, err;
-       unsigned long len;
+       int count, err = 0;
+       unsigned long len = 0;
        unsigned int c_entries, f_entries;
        unsigned char *end;
-       INIT_LIST_HEAD(&dwarf_cie_list);
-       INIT_LIST_HEAD(&dwarf_fde_list);
 
        c_entries = 0;
        f_entries = 0;
-       entry = &__start_eh_frame;
+       entry = eh_frame_start;
 
-       while ((char *)entry < __stop_eh_frame) {
+       while ((char *)entry < eh_frame_end) {
                p = entry;
 
                count = dwarf_entry_len(p, &len);
@@ -834,6 +1043,7 @@ void dwarf_unwinder_init(void)
                         * entry and move to the next one because 'len'
                         * tells us where our next entry is.
                         */
+                       err = -EINVAL;
                        goto out;
                } else
                        p += count;
@@ -841,17 +1051,18 @@ void dwarf_unwinder_init(void)
                /* initial length does not include itself */
                end = p + len;
 
-               entry_type = __get_unaligned_cpu32(p);
+               entry_type = get_unaligned((u32 *)p);
                p += 4;
 
                if (entry_type == DW_EH_FRAME_CIE) {
-                       err = dwarf_parse_cie(entry, p, len, end);
+                       err = dwarf_parse_cie(entry, p, len, end, mod);
                        if (err < 0)
                                goto out;
                        else
                                c_entries++;
                } else {
-                       err = dwarf_parse_fde(entry, entry_type, p, len);
+                       err = dwarf_parse_fde(entry, entry_type, p, len,
+                                             end, mod);
                        if (err < 0)
                                goto out;
                        else
@@ -864,13 +1075,126 @@ void dwarf_unwinder_init(void)
        printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
               c_entries, f_entries);
 
+       return 0;
+
+out:
+       return err;
+}
+
+#ifdef CONFIG_MODULES
+int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+                         struct module *me)
+{
+       unsigned int i, err;
+       unsigned long start, end;
+       char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+       start = end = 0;
+
+       for (i = 1; i < hdr->e_shnum; i++) {
+               /* Alloc bit cleared means "ignore it." */
+               if ((sechdrs[i].sh_flags & SHF_ALLOC)
+                   && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
+                       start = sechdrs[i].sh_addr;
+                       end = start + sechdrs[i].sh_size;
+                       break;
+               }
+       }
+
+       /* Did we find the .eh_frame section? */
+       if (i != hdr->e_shnum) {
+               INIT_LIST_HEAD(&me->arch.cie_list);
+               INIT_LIST_HEAD(&me->arch.fde_list);
+               err = dwarf_parse_section((char *)start, (char *)end, me);
+               if (err) {
+                       printk(KERN_WARNING "%s: failed to parse DWARF info\n",
+                              me->name);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ *     module_dwarf_cleanup - remove FDE/CIEs associated with @mod
+ *     @mod: the module that is being unloaded
+ *
+ *     Remove any FDEs and CIEs from the global lists that came from
+ *     @mod's .eh_frame section because @mod is being unloaded.
+ */
+void module_dwarf_cleanup(struct module *mod)
+{
+       struct dwarf_fde *fde, *ftmp;
+       struct dwarf_cie *cie, *ctmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+       list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
+               list_del(&cie->link);
+               rb_erase(&cie->node, &cie_root);
+               kfree(cie);
+       }
+
+       spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+       spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+       list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
+               list_del(&fde->link);
+               rb_erase(&fde->node, &fde_root);
+               kfree(fde);
+       }
+
+       spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+}
+#endif /* CONFIG_MODULES */
+
+/**
+ *     dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ *     Build the data structures describing the .dwarf_frame section to
+ *     make it easier to lookup CIE and FDE entries. Because the
+ *     .eh_frame section is packed as tightly as possible it is not
+ *     easy to lookup the FDE for a given PC, so we build a list of FDE
+ *     and CIE entries that make it easier.
+ */
+static int __init dwarf_unwinder_init(void)
+{
+       int err;
+
+       dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
+                       sizeof(struct dwarf_frame), 0,
+                       SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+       dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
+                       sizeof(struct dwarf_reg), 0,
+                       SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+       dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
+                                         mempool_alloc_slab,
+                                         mempool_free_slab,
+                                         dwarf_frame_cachep);
+
+       dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
+                                        mempool_alloc_slab,
+                                        mempool_free_slab,
+                                        dwarf_reg_cachep);
+
+       err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
+       if (err)
+               goto out;
+
        err = unwinder_register(&dwarf_unwinder);
        if (err)
                goto out;
 
-       return;
+       return 0;
 
 out:
        printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
        dwarf_unwinder_cleanup();
+       return -EINVAL;
 }
+early_initcall(dwarf_unwinder_init);