30d3b9a3c910fc6c2bf0395fed3c2958f4994597
[linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/exh_tables.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 /*#pragma message("--- version header: remove for static version ---")*/
20 #include <linux/version.h>
21
22 #include <linux/mm.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/uaccess.h>
26 #include <linux/err.h>
27 #include <linux/rcupdate.h>
28
29 #include <linux/tegra_profiler.h>
30
31 #include "eh_unwind.h"
32 #include "backtrace.h"
33 #include "comm.h"
34 #include "dwarf_unwind.h"
35
36 #define QUADD_EXTABS_SIZE       0x100
37
38 #define GET_NR_PAGES(a, l) \
39         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
40
41 enum regs {
42         FP_THUMB = 7,
43         FP_ARM = 11,
44
45         SP = 13,
46         LR = 14,
47         PC = 15
48 };
49
50 struct regions_data {
51         struct ex_region_info *entries;
52
53         unsigned long curr_nr;
54         unsigned long size;
55
56         struct rcu_head rcu;
57 };
58
59 struct quadd_unwind_ctx {
60         struct regions_data *rd;
61
62         pid_t pid;
63         unsigned long ex_tables_size;
64         spinlock_t lock;
65 };
66
67 struct unwind_idx {
68         u32 addr_offset;
69         u32 insn;
70 };
71
72 struct stackframe {
73         unsigned long fp_thumb;
74         unsigned long fp_arm;
75
76         unsigned long sp;
77         unsigned long lr;
78         unsigned long pc;
79 };
80
81 struct unwind_ctrl_block {
82         u32 vrs[16];            /* virtual register set */
83         const u32 *insn;        /* pointer to the current instr word */
84         int entries;            /* number of entries left */
85         int byte;               /* current byte in the instr word */
86 };
87
88 struct pin_pages_work {
89         struct work_struct work;
90         unsigned long vm_start;
91 };
92
93 static struct quadd_unwind_ctx ctx;
94
95 static inline int
96 validate_mmap_addr(struct quadd_mmap_area *mmap,
97                    unsigned long addr, unsigned long nbytes)
98 {
99         struct vm_area_struct *vma = mmap->mmap_vma;
100         unsigned long size = vma->vm_end - vma->vm_start;
101         unsigned long data = (unsigned long)mmap->data;
102
103         if (addr & 0x03) {
104                 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
105                             __func__, addr, data, data + size,
106                        vma->vm_start, vma->vm_end);
107                 return 0;
108         }
109
110         if (addr < data || addr >= data + (size - nbytes)) {
111                 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
112                             __func__, addr, data, data + size,
113                        vma->vm_start, vma->vm_end);
114                 return 0;
115         }
116
117         return 1;
118 }
119
120 #define read_user_data(addr, retval)                            \
121 ({                                                              \
122         long ret;                                               \
123                                                                 \
124         pagefault_disable();                                    \
125         ret = __get_user(retval, addr);                         \
126         pagefault_enable();                                     \
127                                                                 \
128         if (ret) {                                              \
129                 pr_debug("%s: failed for address: %p\n",        \
130                          __func__, addr);                       \
131                 ret = -QUADD_URC_EACCESS;                       \
132         }                                                       \
133                                                                 \
134         ret;                                                    \
135 })
136
137 static inline long
138 read_mmap_data(struct quadd_mmap_area *mmap, const u32 *addr, u32 *retval)
139 {
140         if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32))) {
141                 *retval = 0;
142                 return -QUADD_URC_EACCESS;
143         }
144
145         *retval = *addr;
146         return 0;
147 }
148
149 static inline unsigned long
150 ex_addr_to_mmap_addr(unsigned long addr,
151                      struct ex_region_info *ri,
152                      int exidx)
153 {
154         unsigned long offset;
155         struct extab_info *ei;
156
157         ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
158         offset = addr - ei->addr;
159
160         return ei->mmap_offset + offset + (unsigned long)ri->mmap->data;
161 }
162
163 static inline unsigned long
164 mmap_addr_to_ex_addr(unsigned long addr,
165                      struct ex_region_info *ri,
166                      int exidx)
167 {
168         unsigned long offset;
169         struct extab_info *ei;
170
171         ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
172         offset = addr - ei->mmap_offset - (unsigned long)ri->mmap->data;
173
174         return ei->addr + offset;
175 }
176
177 static inline u32
178 prel31_to_addr(const u32 *ptr)
179 {
180         u32 value;
181         s32 offset;
182
183         if (read_user_data(ptr, value))
184                 return 0;
185
186         /* sign-extend to 32 bits */
187         offset = (((s32)value) << 1) >> 1;
188         return (u32)(unsigned long)ptr + offset;
189 }
190
191 static unsigned long
192 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
193                     int is_src_exidx, int is_dst_exidx, int to_mmap)
194 {
195         u32 value, addr;
196         unsigned long addr_res;
197         s32 offset;
198         struct extab_info *ei_src, *ei_dst;
199
200         ei_src = is_src_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
201         ei_dst = is_dst_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
202
203         value = *ptr;
204         offset = (((s32)value) << 1) >> 1;
205
206         addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, is_src_exidx);
207         addr += offset;
208         addr_res = addr;
209
210         if (to_mmap)
211                 addr_res = ex_addr_to_mmap_addr(addr_res, ri, is_dst_exidx);
212
213         return addr_res;
214 }
215
216 static int
217 add_ex_region(struct regions_data *rd,
218               struct ex_region_info *new_entry)
219 {
220         unsigned int i_min, i_max, mid;
221         struct ex_region_info *array = rd->entries;
222         unsigned long size = rd->curr_nr;
223
224         if (!array)
225                 return 0;
226
227         if (size == 0) {
228                 memcpy(&array[0], new_entry, sizeof(*new_entry));
229                 return 1;
230         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
231                 return 0;
232         }
233
234         i_min = 0;
235         i_max = size;
236
237         if (array[0].vm_start > new_entry->vm_start) {
238                 memmove(array + 1, array,
239                         size * sizeof(*array));
240                 memcpy(&array[0], new_entry, sizeof(*new_entry));
241                 return 1;
242         } else if (array[size - 1].vm_start < new_entry->vm_start) {
243                 memcpy(&array[size], new_entry, sizeof(*new_entry));
244                 return 1;
245         }
246
247         while (i_min < i_max) {
248                 mid = i_min + (i_max - i_min) / 2;
249
250                 if (new_entry->vm_start <= array[mid].vm_start)
251                         i_max = mid;
252                 else
253                         i_min = mid + 1;
254         }
255
256         if (array[i_max].vm_start == new_entry->vm_start) {
257                 return 0;
258         } else {
259                 memmove(array + i_max + 1,
260                         array + i_max,
261                         (size - i_max) * sizeof(*array));
262                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
263                 return 1;
264         }
265 }
266
267 static int
268 remove_ex_region(struct regions_data *rd,
269                  struct ex_region_info *entry)
270 {
271         unsigned int i_min, i_max, mid;
272         struct ex_region_info *array = rd->entries;
273         unsigned long size = rd->curr_nr;
274
275         if (!array)
276                 return 0;
277
278         if (size == 0)
279                 return 0;
280
281         if (size == 1) {
282                 if (array[0].vm_start == entry->vm_start)
283                         return 1;
284                 else
285                         return 0;
286         }
287
288         if (array[0].vm_start > entry->vm_start)
289                 return 0;
290         else if (array[size - 1].vm_start < entry->vm_start)
291                 return 0;
292
293         i_min = 0;
294         i_max = size;
295
296         while (i_min < i_max) {
297                 mid = i_min + (i_max - i_min) / 2;
298
299                 if (entry->vm_start <= array[mid].vm_start)
300                         i_max = mid;
301                 else
302                         i_min = mid + 1;
303         }
304
305         if (array[i_max].vm_start == entry->vm_start) {
306                 memmove(array + i_max,
307                         array + i_max + 1,
308                         (size - i_max) * sizeof(*array));
309                 return 1;
310         } else {
311                 return 0;
312         }
313 }
314
315 static struct ex_region_info *
316 search_ex_region(struct ex_region_info *array,
317                  unsigned long size,
318                  unsigned long key)
319 {
320         unsigned int i_min, i_max, mid;
321
322         if (size == 0)
323                 return NULL;
324
325         i_min = 0;
326         i_max = size;
327
328         while (i_min < i_max) {
329                 mid = i_min + (i_max - i_min) / 2;
330
331                 if (key <= array[mid].vm_start)
332                         i_max = mid;
333                 else
334                         i_min = mid + 1;
335         }
336
337         if (array[i_max].vm_start == key)
338                 return &array[i_max];
339
340         return NULL;
341 }
342
343 long quadd_search_ex_region(unsigned long key, struct ex_region_info *ri)
344 {
345         struct regions_data *rd;
346         struct ex_region_info *ri_p = NULL;
347
348         rcu_read_lock();
349
350         rd = rcu_dereference(ctx.rd);
351         if (!rd)
352                 goto out;
353
354         ri_p = search_ex_region(rd->entries, rd->curr_nr, key);
355         if (ri_p)
356                 memcpy(ri, ri_p, sizeof(*ri));
357
358 out:
359         rcu_read_unlock();
360         return ri_p ? 0 : -ENOENT;
361 }
362
363 static struct regions_data *rd_alloc(unsigned long size)
364 {
365         struct regions_data *rd;
366
367         rd = kzalloc(sizeof(*rd), GFP_ATOMIC);
368         if (!rd)
369                 return NULL;
370
371         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_ATOMIC);
372         if (!rd->entries) {
373                 kfree(rd);
374                 return NULL;
375         }
376
377         rd->size = size;
378         rd->curr_nr = 0;
379
380         return rd;
381 }
382
383 static void rd_free(struct regions_data *rd)
384 {
385         if (rd)
386                 kfree(rd->entries);
387
388         kfree(rd);
389 }
390
391 static void rd_free_rcu(struct rcu_head *rh)
392 {
393         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
394         rd_free(rd);
395 }
396
397 int quadd_unwind_set_extab(struct quadd_extables *extabs,
398                            struct quadd_mmap_area *mmap)
399 {
400         int err = 0;
401         unsigned long nr_entries, nr_added, new_size;
402         struct ex_region_info ri_entry;
403         struct extab_info *ti;
404         struct regions_data *rd, *rd_new;
405         struct ex_region_info *ex_entry;
406
407         if (mmap->type != QUADD_MMAP_TYPE_EXTABS)
408                 return -EIO;
409
410         spin_lock(&ctx.lock);
411
412         rd = rcu_dereference(ctx.rd);
413         if (!rd) {
414                 pr_warn("%s: warning: rd\n", __func__);
415                 new_size = QUADD_EXTABS_SIZE;
416                 nr_entries = 0;
417         } else {
418                 new_size = rd->size;
419                 nr_entries = rd->curr_nr;
420         }
421
422         if (nr_entries >= new_size)
423                 new_size += new_size >> 1;
424
425         rd_new = rd_alloc(new_size);
426         if (IS_ERR_OR_NULL(rd_new)) {
427                 pr_err("%s: error: rd_alloc\n", __func__);
428                 err = -ENOMEM;
429                 goto error_out;
430         }
431
432         if (rd && nr_entries)
433                 memcpy(rd_new->entries, rd->entries,
434                        nr_entries * sizeof(*rd->entries));
435
436         rd_new->curr_nr = nr_entries;
437
438         ri_entry.vm_start = extabs->vm_start;
439         ri_entry.vm_end = extabs->vm_end;
440
441         ri_entry.mmap = mmap;
442
443         ri_entry.tf_start = 0;
444         ri_entry.tf_end = 0;
445
446         ti = &ri_entry.tabs.exidx;
447         ti->addr = extabs->exidx.addr;
448         ti->length = extabs->exidx.length;
449         ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXIDX_OFFSET];
450         ctx.ex_tables_size += ti->length;
451
452         ti = &ri_entry.tabs.extab;
453         ti->addr = extabs->extab.addr;
454         ti->length = extabs->extab.length;
455         ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXTAB_OFFSET];
456         ctx.ex_tables_size += ti->length;
457
458         nr_added = add_ex_region(rd_new, &ri_entry);
459         if (nr_added == 0)
460                 goto error_free;
461
462         rd_new->curr_nr += nr_added;
463
464         ex_entry = kzalloc(sizeof(*ex_entry), GFP_ATOMIC);
465         if (!ex_entry) {
466                 err = -ENOMEM;
467                 goto error_free;
468         }
469         memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
470
471         INIT_LIST_HEAD(&ex_entry->list);
472         list_add_tail(&ex_entry->list, &mmap->ex_entries);
473
474         rcu_assign_pointer(ctx.rd, rd_new);
475
476         if (rd)
477                 call_rcu(&rd->rcu, rd_free_rcu);
478
479         spin_unlock(&ctx.lock);
480
481         return 0;
482
483 error_free:
484         rd_free(rd_new);
485 error_out:
486         spin_unlock(&ctx.lock);
487         return err;
488 }
489
490 void
491 quadd_unwind_set_tail_info(unsigned long vm_start,
492                            unsigned long tf_start,
493                            unsigned long tf_end)
494 {
495         struct ex_region_info *ri;
496         unsigned long nr_entries, size;
497         struct regions_data *rd, *rd_new;
498
499         spin_lock(&ctx.lock);
500
501         rd = rcu_dereference(ctx.rd);
502
503         if (!rd || rd->curr_nr == 0)
504                 goto error_out;
505
506         size = rd->size;
507         nr_entries = rd->curr_nr;
508
509         rd_new = rd_alloc(size);
510         if (IS_ERR_OR_NULL(rd_new)) {
511                 pr_err_once("%s: error: rd_alloc\n", __func__);
512                 goto error_out;
513         }
514
515         memcpy(rd_new->entries, rd->entries,
516                nr_entries * sizeof(*rd->entries));
517
518         rd_new->curr_nr = nr_entries;
519
520         ri = search_ex_region(rd_new->entries, nr_entries, vm_start);
521         if (!ri)
522                 goto error_free;
523
524         ri->tf_start = tf_start;
525         ri->tf_end = tf_end;
526
527         rcu_assign_pointer(ctx.rd, rd_new);
528
529         call_rcu(&rd->rcu, rd_free_rcu);
530         spin_unlock(&ctx.lock);
531
532         return;
533
534 error_free:
535         rd_free(rd_new);
536
537 error_out:
538         spin_unlock(&ctx.lock);
539 }
540
541 static int
542 clean_mmap(struct regions_data *rd, struct quadd_mmap_area *mmap, int rm_ext)
543 {
544         int nr_removed = 0;
545         struct ex_region_info *entry, *next;
546
547         if (!rd || !mmap)
548                 return 0;
549
550         list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
551                 if (rm_ext)
552                         nr_removed += remove_ex_region(rd, entry);
553
554                 list_del(&entry->list);
555                 kfree(entry);
556         }
557
558         return nr_removed;
559 }
560
561 void quadd_unwind_delete_mmap(struct quadd_mmap_area *mmap)
562 {
563         unsigned long nr_entries, nr_removed, new_size;
564         struct regions_data *rd, *rd_new;
565
566         if (!mmap)
567                 return;
568
569         spin_lock(&ctx.lock);
570
571         rd = rcu_dereference(ctx.rd);
572         if (!rd || !rd->curr_nr)
573                 goto error_out;
574
575         nr_entries = rd->curr_nr;
576         new_size = min_t(unsigned long, rd->size, nr_entries);
577
578         rd_new = rd_alloc(new_size);
579         if (IS_ERR_OR_NULL(rd_new)) {
580                 pr_err("%s: error: rd_alloc\n", __func__);
581                 goto error_out;
582         }
583         rd_new->size = new_size;
584         rd_new->curr_nr = nr_entries;
585
586         memcpy(rd_new->entries, rd->entries,
587                 nr_entries * sizeof(*rd->entries));
588
589         nr_removed = clean_mmap(rd_new, mmap, 1);
590         rd_new->curr_nr -= nr_removed;
591
592         rcu_assign_pointer(ctx.rd, rd_new);
593         call_rcu(&rd->rcu, rd_free_rcu);
594
595 error_out:
596         spin_unlock(&ctx.lock);
597 }
598
599 static const struct unwind_idx *
600 unwind_find_idx(struct ex_region_info *ri, u32 addr)
601 {
602         unsigned long length;
603         u32 value;
604         struct unwind_idx *start;
605         struct unwind_idx *stop;
606         struct unwind_idx *mid = NULL;
607         length = ri->tabs.exidx.length / sizeof(*start);
608
609         if (unlikely(!length))
610                 return NULL;
611
612         start = (struct unwind_idx *)((char *)ri->mmap->data +
613                 ri->tabs.exidx.mmap_offset);
614         stop = start + length - 1;
615
616         value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri, 1, 0, 0);
617         if (addr < value)
618                 return NULL;
619
620         value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri, 1, 0, 0);
621         if (addr >= value)
622                 return NULL;
623
624         while (start < stop - 1) {
625                 mid = start + ((stop - start) >> 1);
626
627                 value = (u32)mmap_prel31_to_addr(&mid->addr_offset,
628                                                  ri, 1, 0, 0);
629
630                 if (addr < value)
631                         stop = mid;
632                 else
633                         start = mid;
634         }
635
636         return start;
637 }
638
639 static unsigned long
640 unwind_get_byte(struct quadd_mmap_area *mmap,
641                 struct unwind_ctrl_block *ctrl, long *err)
642 {
643         unsigned long ret;
644         u32 insn_word;
645
646         *err = 0;
647
648         if (ctrl->entries <= 0) {
649                 pr_err_once("%s: error: corrupt unwind table\n", __func__);
650                 *err = -QUADD_URC_TBL_IS_CORRUPT;
651                 return 0;
652         }
653
654         *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
655         if (*err < 0)
656                 return 0;
657
658         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
659
660         if (ctrl->byte == 0) {
661                 ctrl->insn++;
662                 ctrl->entries--;
663                 ctrl->byte = 3;
664         } else
665                 ctrl->byte--;
666
667         return ret;
668 }
669
670 static long
671 read_uleb128(struct quadd_mmap_area *mmap,
672              struct unwind_ctrl_block *ctrl,
673              unsigned long *ret)
674 {
675         long err = 0;
676         unsigned long result;
677         unsigned char byte;
678         int shift, count;
679
680         result = 0;
681         shift = 0;
682         count = 0;
683
684         while (1) {
685                 byte = unwind_get_byte(mmap, ctrl, &err);
686                 if (err < 0)
687                         return err;
688
689                 count++;
690
691                 result |= (byte & 0x7f) << shift;
692                 shift += 7;
693
694                 if (!(byte & 0x80))
695                         break;
696         }
697
698         *ret = result;
699
700         return count;
701 }
702
703 /*
704  * Execute the current unwind instruction.
705  */
706 static long
707 unwind_exec_insn(struct quadd_mmap_area *mmap,
708                  struct unwind_ctrl_block *ctrl)
709 {
710         long err;
711         unsigned int i;
712         unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
713
714         if (err < 0)
715                 return err;
716
717         pr_debug("%s: insn = %08lx\n", __func__, insn);
718
719         if ((insn & 0xc0) == 0x00) {
720                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
721
722                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
723                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
724         } else if ((insn & 0xc0) == 0x40) {
725                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
726
727                 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
728                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
729         } else if ((insn & 0xf0) == 0x80) {
730                 unsigned long mask;
731                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
732                 int load_sp, reg = 4;
733
734                 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
735                 if (err < 0)
736                         return err;
737
738                 mask = insn & 0x0fff;
739                 if (mask == 0) {
740                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
741                                    insn);
742                         return -QUADD_URC_REFUSE_TO_UNWIND;
743                 }
744
745                 /* pop R4-R15 according to mask */
746                 load_sp = mask & (1 << (13 - 4));
747                 while (mask) {
748                         if (mask & 1) {
749                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
750                                 if (err < 0)
751                                         return err;
752
753                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
754                         }
755                         mask >>= 1;
756                         reg++;
757                 }
758                 if (!load_sp)
759                         ctrl->vrs[SP] = (unsigned long)vsp;
760
761                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
762         } else if ((insn & 0xf0) == 0x90 &&
763                    (insn & 0x0d) != 0x0d) {
764                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
765                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
766         } else if ((insn & 0xf0) == 0xa0) {
767                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
768                 unsigned int reg;
769
770                 /* pop R4-R[4+bbb] */
771                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
772                         err = read_user_data(vsp++, ctrl->vrs[reg]);
773                         if (err < 0)
774                                 return err;
775
776                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
777                 }
778
779                 if (insn & 0x08) {
780                         err = read_user_data(vsp++, ctrl->vrs[14]);
781                         if (err < 0)
782                                 return err;
783
784                         pr_debug("CMD_REG_POP: pop {r14}\n");
785                 }
786
787                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
788                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
789         } else if (insn == 0xb0) {
790                 if (ctrl->vrs[PC] == 0)
791                         ctrl->vrs[PC] = ctrl->vrs[LR];
792                 /* no further processing */
793                 ctrl->entries = 0;
794
795                 pr_debug("CMD_FINISH\n");
796         } else if (insn == 0xb1) {
797                 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
798                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
799                 int reg = 0;
800
801                 if (err < 0)
802                         return err;
803
804                 if (mask == 0 || mask & 0xf0) {
805                         pr_debug("unwind: Spare encoding %04lx\n",
806                                (insn << 8) | mask);
807                         return -QUADD_URC_SPARE_ENCODING;
808                 }
809
810                 /* pop R0-R3 according to mask */
811                 while (mask) {
812                         if (mask & 1) {
813                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
814                                 if (err < 0)
815                                         return err;
816
817                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
818                         }
819                         mask >>= 1;
820                         reg++;
821                 }
822
823                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
824                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
825         } else if (insn == 0xb2) {
826                 long count;
827                 unsigned long uleb128 = 0;
828
829                 count = read_uleb128(mmap, ctrl, &uleb128);
830                 if (count < 0)
831                         return count;
832
833                 if (count == 0)
834                         return -QUADD_URC_TBL_IS_CORRUPT;
835
836                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
837
838                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (%#lx), new vsp: %#x\n",
839                          0x204 + (uleb128 << 2), 0x204 + (uleb128 << 2),
840                          ctrl->vrs[SP]);
841         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
842                 unsigned long data, reg_from, reg_to;
843                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
844
845                 data = unwind_get_byte(mmap, ctrl, &err);
846                 if (err < 0)
847                         return err;
848
849                 reg_from = (data & 0xf0) >> 4;
850                 reg_to = reg_from + (data & 0x0f);
851
852                 if (insn == 0xc8) {
853                         reg_from += 16;
854                         reg_to += 16;
855                 }
856
857                 for (i = reg_from; i <= reg_to; i++)
858                         vsp += 2;
859
860                 if (insn == 0xb3)
861                         vsp++;
862
863                 ctrl->vrs[SP] = (unsigned long)vsp;
864                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
865
866                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
867                          insn, data, reg_from, reg_to);
868                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
869         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
870                 unsigned long reg_to;
871                 unsigned long data = insn & 0x07;
872                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
873
874                 reg_to = 8 + data;
875
876                 for (i = 8; i <= reg_to; i++)
877                         vsp += 2;
878
879                 if ((insn & 0xf8) == 0xb8)
880                         vsp++;
881
882                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
883
884                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
885                          insn, reg_to);
886                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
887         } else {
888                 pr_debug("error: unhandled instruction %02lx\n", insn);
889                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
890         }
891
892         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
893                  __func__,
894                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
895                  ctrl->vrs[LR], ctrl->vrs[PC]);
896
897         return 0;
898 }
899
900 /*
901  * Unwind a single frame starting with *sp for the symbol at *pc. It
902  * updates the *pc and *sp with the new values.
903  */
904 static long
905 unwind_frame(struct ex_region_info *ri,
906              struct stackframe *frame,
907              struct vm_area_struct *vma_sp,
908              unsigned int *unw_type)
909 {
910         unsigned long high, low;
911         const struct unwind_idx *idx;
912         struct unwind_ctrl_block ctrl;
913         long err = 0;
914         u32 val;
915
916         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
917                 return -QUADD_URC_SP_INCORRECT;
918
919         /* only go to a higher address on the stack */
920         low = frame->sp;
921         high = vma_sp->vm_end;
922
923         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
924                 frame->pc, frame->lr, frame->sp, low, high);
925
926         idx = unwind_find_idx(ri, frame->pc);
927         if (IS_ERR_OR_NULL(idx))
928                 return -QUADD_URC_IDX_NOT_FOUND;
929
930         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
931
932         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
933         ctrl.vrs[FP_ARM] = frame->fp_arm;
934
935         ctrl.vrs[SP] = frame->sp;
936         ctrl.vrs[LR] = frame->lr;
937         ctrl.vrs[PC] = 0;
938
939         err = read_mmap_data(ri->mmap, &idx->insn, &val);
940         if (err < 0)
941                 return err;
942
943         if (val == 1) {
944                 /* can't unwind */
945                 return -QUADD_URC_CANTUNWIND;
946         } else if ((val & 0x80000000) == 0) {
947                 /* prel31 to the unwind table */
948                 ctrl.insn = (u32 *)(unsigned long)
949                                 mmap_prel31_to_addr(&idx->insn, ri, 1, 0, 1);
950                 if (!ctrl.insn)
951                         return -QUADD_URC_EACCESS;
952         } else if ((val & 0xff000000) == 0x80000000) {
953                 /* only personality routine 0 supported in the index */
954                 ctrl.insn = &idx->insn;
955         } else {
956                 pr_debug("unsupported personality routine %#x in the index at %p\n",
957                          val, idx);
958                 return -QUADD_URC_UNSUPPORTED_PR;
959         }
960
961         err = read_mmap_data(ri->mmap, ctrl.insn, &val);
962         if (err < 0)
963                 return err;
964
965         /* check the personality routine */
966         if ((val & 0xff000000) == 0x80000000) {
967                 ctrl.byte = 2;
968                 ctrl.entries = 1;
969         } else if ((val & 0xff000000) == 0x81000000) {
970                 ctrl.byte = 1;
971                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
972         } else {
973                 pr_debug("unsupported personality routine %#x at %p\n",
974                          val, ctrl.insn);
975                 return -QUADD_URC_UNSUPPORTED_PR;
976         }
977
978         while (ctrl.entries > 0) {
979                 err = unwind_exec_insn(ri->mmap, &ctrl);
980                 if (err < 0)
981                         return err;
982
983                 if (ctrl.vrs[SP] & 0x03 ||
984                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
985                         return -QUADD_URC_SP_INCORRECT;
986         }
987
988         if (ctrl.vrs[PC] == 0) {
989                 ctrl.vrs[PC] = ctrl.vrs[LR];
990                 *unw_type = QUADD_UNW_TYPE_LR_UT;
991         } else {
992                 *unw_type = QUADD_UNW_TYPE_UT;
993         }
994
995         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
996                 return -QUADD_URC_PC_INCORRECT;
997
998         frame->fp_thumb = ctrl.vrs[FP_THUMB];
999         frame->fp_arm = ctrl.vrs[FP_ARM];
1000
1001         frame->sp = ctrl.vrs[SP];
1002         frame->lr = ctrl.vrs[LR];
1003         frame->pc = ctrl.vrs[PC];
1004
1005         return 0;
1006 }
1007
1008 static void
1009 unwind_backtrace(struct quadd_callchain *cc,
1010                  struct ex_region_info *ri,
1011                  struct stackframe *frame,
1012                  struct vm_area_struct *vma_sp,
1013                  struct task_struct *task)
1014 {
1015         unsigned int unw_type;
1016         struct ex_region_info ri_new;
1017
1018         cc->unw_rc = QUADD_URC_FAILURE;
1019
1020         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
1021                  frame->fp_arm, frame->fp_thumb,
1022                  frame->sp, frame->lr, frame->pc);
1023         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
1024                  vma_sp->vm_start, vma_sp->vm_end,
1025                  vma_sp->vm_end - vma_sp->vm_start);
1026
1027         while (1) {
1028                 long err;
1029                 int nr_added;
1030                 unsigned long where = frame->pc;
1031                 struct vm_area_struct *vma_pc;
1032                 struct mm_struct *mm = task->mm;
1033
1034                 if (!mm)
1035                         break;
1036
1037                 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
1038                         cc->unw_rc = -QUADD_URC_SP_INCORRECT;
1039                         break;
1040                 }
1041
1042                 vma_pc = find_vma(mm, frame->pc);
1043                 if (!vma_pc)
1044                         break;
1045
1046                 if (!is_vma_addr(ri->tabs.exidx.addr, vma_pc, sizeof(u32))) {
1047                         err = quadd_search_ex_region(vma_pc->vm_start, &ri_new);
1048                         if (err) {
1049                                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1050                                 break;
1051                         }
1052
1053                         ri = &ri_new;
1054                 }
1055
1056                 err = unwind_frame(ri, frame, vma_sp, &unw_type);
1057                 if (err < 0) {
1058                         pr_debug("end unwind, urc: %ld\n", err);
1059                         cc->unw_rc = -err;
1060                         break;
1061                 }
1062
1063                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1064                          where, frame->pc);
1065
1066                 cc->curr_sp = frame->sp;
1067                 cc->curr_fp = frame->fp_arm;
1068                 cc->curr_pc = frame->pc;
1069
1070                 nr_added = quadd_callchain_store(cc, frame->pc, unw_type);
1071                 if (nr_added == 0)
1072                         break;
1073         }
1074 }
1075
1076 unsigned int
1077 quadd_aarch32_get_user_callchain_ut(struct pt_regs *regs,
1078                                     struct quadd_callchain *cc,
1079                                     struct task_struct *task)
1080 {
1081         long err;
1082         int nr_prev = cc->nr;
1083         unsigned long ip, sp, lr;
1084         struct vm_area_struct *vma, *vma_sp;
1085         struct mm_struct *mm = task->mm;
1086         struct ex_region_info ri;
1087         struct stackframe frame;
1088
1089         if (!regs || !mm)
1090                 return 0;
1091
1092 #ifdef CONFIG_ARM64
1093         if (!compat_user_mode(regs)) {
1094                 pr_warn_once("user_mode 64: unsupported\n");
1095                 return 0;
1096         }
1097 #endif
1098
1099         if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
1100                 return nr_prev;
1101
1102         cc->unw_rc = QUADD_URC_FAILURE;
1103
1104         if (nr_prev > 0) {
1105                 ip = cc->curr_pc;
1106                 sp = cc->curr_sp;
1107                 lr = 0;
1108
1109                 frame.fp_thumb = 0;
1110                 frame.fp_arm = cc->curr_fp;
1111         } else {
1112                 ip = instruction_pointer(regs);
1113                 sp = quadd_user_stack_pointer(regs);
1114                 lr = quadd_user_link_register(regs);
1115
1116 #ifdef CONFIG_ARM64
1117                 frame.fp_thumb = regs->compat_usr(7);
1118                 frame.fp_arm = regs->compat_usr(11);
1119 #else
1120                 frame.fp_thumb = regs->ARM_r7;
1121                 frame.fp_arm = regs->ARM_fp;
1122 #endif
1123         }
1124
1125         frame.pc = ip;
1126         frame.sp = sp;
1127         frame.lr = lr;
1128
1129         vma = find_vma(mm, ip);
1130         if (!vma)
1131                 return 0;
1132
1133         vma_sp = find_vma(mm, sp);
1134         if (!vma_sp)
1135                 return 0;
1136
1137         err = quadd_search_ex_region(vma->vm_start, &ri);
1138         if (err) {
1139                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1140                 return 0;
1141         }
1142
1143         unwind_backtrace(cc, &ri, &frame, vma_sp, task);
1144
1145         return cc->nr;
1146 }
1147
1148 int
1149 quadd_aarch32_is_ex_entry_exist(struct pt_regs *regs,
1150                                 unsigned long addr,
1151                                 struct task_struct *task)
1152 {
1153         long err;
1154         u32 value;
1155         const struct unwind_idx *idx;
1156         struct ex_region_info ri;
1157         struct vm_area_struct *vma;
1158         struct mm_struct *mm = task->mm;
1159
1160         if (!regs || !mm)
1161                 return 0;
1162
1163         vma = find_vma(mm, addr);
1164         if (!vma)
1165                 return 0;
1166
1167         err = quadd_search_ex_region(vma->vm_start, &ri);
1168         if (err)
1169                 return 0;
1170
1171         idx = unwind_find_idx(&ri, addr);
1172         if (IS_ERR_OR_NULL(idx))
1173                 return 0;
1174
1175         err = read_mmap_data(ri.mmap, &idx->insn, &value);
1176         if (err < 0)
1177                 return 0;
1178
1179         if (value == 1)
1180                 return 0;
1181
1182         return 1;
1183 }
1184
1185 int quadd_unwind_start(struct task_struct *task)
1186 {
1187         int err;
1188         struct regions_data *rd, *rd_old;
1189
1190         rd = rd_alloc(QUADD_EXTABS_SIZE);
1191         if (IS_ERR_OR_NULL(rd)) {
1192                 pr_err("%s: error: rd_alloc\n", __func__);
1193                 return -ENOMEM;
1194         }
1195
1196         err = quadd_dwarf_unwind_start();
1197         if (err) {
1198                 rd_free(rd);
1199                 return err;
1200         }
1201
1202         spin_lock(&ctx.lock);
1203
1204         rd_old = rcu_dereference(ctx.rd);
1205         if (rd_old)
1206                 pr_warn("%s: warning: rd_old\n", __func__);
1207
1208         rcu_assign_pointer(ctx.rd, rd);
1209
1210         if (rd_old)
1211                 call_rcu(&rd_old->rcu, rd_free_rcu);
1212
1213         ctx.pid = task->tgid;
1214
1215         ctx.ex_tables_size = 0;
1216
1217         spin_unlock(&ctx.lock);
1218
1219         return 0;
1220 }
1221
1222 void quadd_unwind_stop(void)
1223 {
1224         int i;
1225         unsigned long nr_entries, size;
1226         struct regions_data *rd;
1227         struct ex_region_info *ri;
1228
1229         quadd_dwarf_unwind_stop();
1230
1231         spin_lock(&ctx.lock);
1232
1233         ctx.pid = 0;
1234
1235         rd = rcu_dereference(ctx.rd);
1236         if (!rd)
1237                 goto out;
1238
1239         nr_entries = rd->curr_nr;
1240         size = rd->size;
1241
1242         for (i = 0; i < nr_entries; i++) {
1243                 ri = &rd->entries[i];
1244                 clean_mmap(rd, ri->mmap, 0);
1245         }
1246
1247         rcu_assign_pointer(ctx.rd, NULL);
1248         call_rcu(&rd->rcu, rd_free_rcu);
1249
1250 out:
1251         spin_unlock(&ctx.lock);
1252         pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1253 }
1254
1255 int quadd_unwind_init(void)
1256 {
1257         int err;
1258
1259         err = quadd_dwarf_unwind_init();
1260         if (err)
1261                 return err;
1262
1263         spin_lock_init(&ctx.lock);
1264         rcu_assign_pointer(ctx.rd, NULL);
1265         ctx.pid = 0;
1266
1267         return 0;
1268 }
1269
1270 void quadd_unwind_deinit(void)
1271 {
1272         quadd_unwind_stop();
1273         rcu_barrier();
1274 }