misc: tegra-profiler: support eh_frame sections
[linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/eh_unwind.c
3  *
4  * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
25
26 #include <linux/tegra_profiler.h>
27
28 #include "eh_unwind.h"
29 #include "backtrace.h"
30 #include "comm.h"
31 #include "dwarf_unwind.h"
32
33 #define QUADD_EXTABS_SIZE       0x100
34
35 #define GET_NR_PAGES(a, l) \
36         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
37
38 enum regs {
39         FP_THUMB = 7,
40         FP_ARM = 11,
41
42         SP = 13,
43         LR = 14,
44         PC = 15
45 };
46
47 struct regions_data {
48         struct ex_region_info *entries;
49
50         unsigned long curr_nr;
51         unsigned long size;
52
53         struct rcu_head rcu;
54 };
55
56 struct quadd_unwind_ctx {
57         struct regions_data *rd;
58
59         pid_t pid;
60         unsigned long ex_tables_size;
61         spinlock_t lock;
62 };
63
64 struct unwind_idx {
65         u32 addr_offset;
66         u32 insn;
67 };
68
69 struct stackframe {
70         unsigned long fp_thumb;
71         unsigned long fp_arm;
72
73         unsigned long sp;
74         unsigned long lr;
75         unsigned long pc;
76 };
77
78 struct unwind_ctrl_block {
79         u32 vrs[16];            /* virtual register set */
80         const u32 *insn;        /* pointer to the current instr word */
81         int entries;            /* number of entries left */
82         int byte;               /* current byte in the instr word */
83 };
84
85 struct pin_pages_work {
86         struct work_struct work;
87         unsigned long vm_start;
88 };
89
90 static struct quadd_unwind_ctx ctx;
91
92 static inline int
93 validate_mmap_addr(struct quadd_mmap_area *mmap,
94                    unsigned long addr, unsigned long nbytes)
95 {
96         struct vm_area_struct *vma = mmap->mmap_vma;
97         unsigned long size = vma->vm_end - vma->vm_start;
98         unsigned long data = (unsigned long)mmap->data;
99
100         if (addr & 0x03) {
101                 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
102                             __func__, addr, data, data + size,
103                        vma->vm_start, vma->vm_end);
104                 return 0;
105         }
106
107         if (addr < data || addr >= data + (size - nbytes)) {
108                 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
109                             __func__, addr, data, data + size,
110                        vma->vm_start, vma->vm_end);
111                 return 0;
112         }
113
114         return 1;
115 }
116
117 #define read_user_data(addr, retval)                            \
118 ({                                                              \
119         long ret;                                               \
120                                                                 \
121         pagefault_disable();                                    \
122         ret = __get_user(retval, addr);                         \
123         pagefault_enable();                                     \
124                                                                 \
125         if (ret) {                                              \
126                 pr_debug("%s: failed for address: %p\n",        \
127                          __func__, addr);                       \
128                 ret = -QUADD_URC_EACCESS;                       \
129         }                                                       \
130                                                                 \
131         ret;                                                    \
132 })
133
134 static inline long
135 read_mmap_data(struct quadd_mmap_area *mmap, const u32 *addr, u32 *retval)
136 {
137         if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32))) {
138                 *retval = 0;
139                 return -QUADD_URC_EACCESS;
140         }
141
142         *retval = *addr;
143         return 0;
144 }
145
146 static inline unsigned long
147 ex_addr_to_mmap_addr(unsigned long addr,
148                      struct ex_region_info *ri,
149                      int sec_type)
150 {
151         unsigned long offset;
152         struct extab_info *ti;
153
154         ti = &ri->ex_sec[sec_type];
155         offset = addr - ti->addr;
156
157         return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
158 }
159
160 static inline unsigned long
161 mmap_addr_to_ex_addr(unsigned long addr,
162                      struct ex_region_info *ri,
163                      int sec_type)
164 {
165         unsigned long offset;
166         struct extab_info *ti;
167
168         ti = &ri->ex_sec[sec_type];
169         offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
170
171         return ti->addr + offset;
172 }
173
174 static inline u32
175 prel31_to_addr(const u32 *ptr)
176 {
177         u32 value;
178         s32 offset;
179
180         if (read_user_data(ptr, value))
181                 return 0;
182
183         /* sign-extend to 32 bits */
184         offset = (((s32)value) << 1) >> 1;
185         return (u32)(unsigned long)ptr + offset;
186 }
187
188 static unsigned long
189 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
190                     int src_type, int dst_type, int to_mmap)
191 {
192         s32 offset;
193         u32 value, addr;
194         unsigned long addr_res;
195
196         value = *ptr;
197         offset = (((s32)value) << 1) >> 1;
198
199         addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, src_type);
200         addr += offset;
201         addr_res = addr;
202
203         if (to_mmap)
204                 addr_res = ex_addr_to_mmap_addr(addr_res, ri, dst_type);
205
206         return addr_res;
207 }
208
209 static int
210 add_ex_region(struct regions_data *rd,
211               struct ex_region_info *new_entry)
212 {
213         unsigned int i_min, i_max, mid;
214         struct ex_region_info *array = rd->entries;
215         unsigned long size = rd->curr_nr;
216
217         if (!array)
218                 return 0;
219
220         if (size == 0) {
221                 memcpy(&array[0], new_entry, sizeof(*new_entry));
222                 return 1;
223         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
224                 return 0;
225         }
226
227         i_min = 0;
228         i_max = size;
229
230         if (array[0].vm_start > new_entry->vm_start) {
231                 memmove(array + 1, array,
232                         size * sizeof(*array));
233                 memcpy(&array[0], new_entry, sizeof(*new_entry));
234                 return 1;
235         } else if (array[size - 1].vm_start < new_entry->vm_start) {
236                 memcpy(&array[size], new_entry, sizeof(*new_entry));
237                 return 1;
238         }
239
240         while (i_min < i_max) {
241                 mid = i_min + (i_max - i_min) / 2;
242
243                 if (new_entry->vm_start <= array[mid].vm_start)
244                         i_max = mid;
245                 else
246                         i_min = mid + 1;
247         }
248
249         if (array[i_max].vm_start == new_entry->vm_start) {
250                 return 0;
251         } else {
252                 memmove(array + i_max + 1,
253                         array + i_max,
254                         (size - i_max) * sizeof(*array));
255                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
256                 return 1;
257         }
258 }
259
260 static int
261 remove_ex_region(struct regions_data *rd,
262                  struct ex_region_info *entry)
263 {
264         unsigned int i_min, i_max, mid;
265         struct ex_region_info *array = rd->entries;
266         unsigned long size = rd->curr_nr;
267
268         if (!array)
269                 return 0;
270
271         if (size == 0)
272                 return 0;
273
274         if (size == 1) {
275                 if (array[0].vm_start == entry->vm_start)
276                         return 1;
277                 else
278                         return 0;
279         }
280
281         if (array[0].vm_start > entry->vm_start)
282                 return 0;
283         else if (array[size - 1].vm_start < entry->vm_start)
284                 return 0;
285
286         i_min = 0;
287         i_max = size;
288
289         while (i_min < i_max) {
290                 mid = i_min + (i_max - i_min) / 2;
291
292                 if (entry->vm_start <= array[mid].vm_start)
293                         i_max = mid;
294                 else
295                         i_min = mid + 1;
296         }
297
298         if (array[i_max].vm_start == entry->vm_start) {
299                 memmove(array + i_max,
300                         array + i_max + 1,
301                         (size - i_max) * sizeof(*array));
302                 return 1;
303         } else {
304                 return 0;
305         }
306 }
307
308 static struct ex_region_info *
309 __search_ex_region(struct ex_region_info *array,
310                    unsigned long size,
311                    unsigned long key)
312 {
313         unsigned int i_min, i_max, mid;
314
315         if (size == 0)
316                 return NULL;
317
318         i_min = 0;
319         i_max = size;
320
321         while (i_min < i_max) {
322                 mid = i_min + (i_max - i_min) / 2;
323
324                 if (key <= array[mid].vm_start)
325                         i_max = mid;
326                 else
327                         i_min = mid + 1;
328         }
329
330         if (array[i_max].vm_start == key)
331                 return &array[i_max];
332
333         return NULL;
334 }
335
336 static long
337 search_ex_region(unsigned long key, struct ex_region_info *ri)
338 {
339         struct regions_data *rd;
340         struct ex_region_info *ri_p = NULL;
341
342         rcu_read_lock();
343
344         rd = rcu_dereference(ctx.rd);
345         if (!rd)
346                 goto out;
347
348         ri_p = __search_ex_region(rd->entries, rd->curr_nr, key);
349         if (ri_p)
350                 memcpy(ri, ri_p, sizeof(*ri));
351
352 out:
353         rcu_read_unlock();
354         return ri_p ? 0 : -ENOENT;
355 }
356
357 static long
358 get_extabs_ehabi(unsigned long key, struct ex_region_info *ri)
359 {
360         long err;
361         struct extab_info *ti_extab, *ti_exidx;
362
363         err = search_ex_region(key, ri);
364         if (err < 0)
365                 return err;
366
367         ti_extab = &ri->ex_sec[QUADD_SEC_TYPE_EXTAB];
368         ti_exidx = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
369
370         return (ti_extab->length && ti_exidx->length) ? 0 : -ENOENT;
371 }
372
373 long
374 quadd_get_extabs_ehframe(unsigned long key, struct ex_region_info *ri)
375 {
376         long err;
377         struct extab_info *ti_ehfr, *ti_ehfr_hdr;
378
379         err = search_ex_region(key, ri);
380         if (err < 0)
381                 return err;
382
383         ti_ehfr = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME];
384         ti_ehfr_hdr = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
385
386         return (ti_ehfr->length && ti_ehfr_hdr->length) ? 0 : -ENOENT;
387 }
388
389 static struct regions_data *rd_alloc(unsigned long size)
390 {
391         struct regions_data *rd;
392
393         rd = kzalloc(sizeof(*rd), GFP_ATOMIC);
394         if (!rd)
395                 return NULL;
396
397         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_ATOMIC);
398         if (!rd->entries) {
399                 kfree(rd);
400                 return NULL;
401         }
402
403         rd->size = size;
404         rd->curr_nr = 0;
405
406         return rd;
407 }
408
409 static void rd_free(struct regions_data *rd)
410 {
411         if (rd)
412                 kfree(rd->entries);
413
414         kfree(rd);
415 }
416
417 static void rd_free_rcu(struct rcu_head *rh)
418 {
419         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
420         rd_free(rd);
421 }
422
423 int quadd_unwind_set_extab(struct quadd_sections *extabs,
424                            struct quadd_mmap_area *mmap)
425 {
426         int i, err = 0;
427         unsigned long nr_entries, nr_added, new_size;
428         struct ex_region_info ri_entry;
429         struct extab_info *ti;
430         struct regions_data *rd, *rd_new;
431         struct ex_region_info *ex_entry;
432
433         if (mmap->type != QUADD_MMAP_TYPE_EXTABS)
434                 return -EIO;
435
436         spin_lock(&ctx.lock);
437
438         rd = rcu_dereference(ctx.rd);
439         if (!rd) {
440                 pr_warn("%s: warning: rd\n", __func__);
441                 new_size = QUADD_EXTABS_SIZE;
442                 nr_entries = 0;
443         } else {
444                 new_size = rd->size;
445                 nr_entries = rd->curr_nr;
446         }
447
448         if (nr_entries >= new_size)
449                 new_size += new_size >> 1;
450
451         rd_new = rd_alloc(new_size);
452         if (IS_ERR_OR_NULL(rd_new)) {
453                 pr_err("%s: error: rd_alloc\n", __func__);
454                 err = -ENOMEM;
455                 goto error_out;
456         }
457
458         if (rd && nr_entries)
459                 memcpy(rd_new->entries, rd->entries,
460                        nr_entries * sizeof(*rd->entries));
461
462         rd_new->curr_nr = nr_entries;
463
464         ri_entry.vm_start = extabs->vm_start;
465         ri_entry.vm_end = extabs->vm_end;
466
467         ri_entry.mmap = mmap;
468
469         ri_entry.tf_start = 0;
470         ri_entry.tf_end = 0;
471
472         for (i = 0; i < QUADD_SEC_TYPE_MAX; i++) {
473                 struct quadd_sec_info *si = &extabs->sec[i];
474
475                 ti = &ri_entry.ex_sec[i];
476
477                 if (!si->addr) {
478                         ti->addr = 0;
479                         ti->length = 0;
480                         ti->mmap_offset = 0;
481
482                         continue;
483                 }
484
485                 ti->addr = si->addr;
486                 ti->length = si->length;
487                 ti->mmap_offset = si->mmap_offset;
488         }
489
490         nr_added = add_ex_region(rd_new, &ri_entry);
491         if (nr_added == 0)
492                 goto error_free;
493
494         rd_new->curr_nr += nr_added;
495
496         ex_entry = kzalloc(sizeof(*ex_entry), GFP_ATOMIC);
497         if (!ex_entry) {
498                 err = -ENOMEM;
499                 goto error_free;
500         }
501         memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
502
503         INIT_LIST_HEAD(&ex_entry->list);
504         list_add_tail(&ex_entry->list, &mmap->ex_entries);
505
506         rcu_assign_pointer(ctx.rd, rd_new);
507
508         if (rd)
509                 call_rcu(&rd->rcu, rd_free_rcu);
510
511         spin_unlock(&ctx.lock);
512
513         return 0;
514
515 error_free:
516         rd_free(rd_new);
517 error_out:
518         spin_unlock(&ctx.lock);
519         return err;
520 }
521
522 void
523 quadd_unwind_set_tail_info(unsigned long vm_start,
524                            unsigned long tf_start,
525                            unsigned long tf_end)
526 {
527         struct ex_region_info *ri;
528         unsigned long nr_entries, size;
529         struct regions_data *rd, *rd_new;
530
531         spin_lock(&ctx.lock);
532
533         rd = rcu_dereference(ctx.rd);
534
535         if (!rd || rd->curr_nr == 0)
536                 goto error_out;
537
538         size = rd->size;
539         nr_entries = rd->curr_nr;
540
541         rd_new = rd_alloc(size);
542         if (IS_ERR_OR_NULL(rd_new)) {
543                 pr_err_once("%s: error: rd_alloc\n", __func__);
544                 goto error_out;
545         }
546
547         memcpy(rd_new->entries, rd->entries,
548                nr_entries * sizeof(*rd->entries));
549
550         rd_new->curr_nr = nr_entries;
551
552         ri = __search_ex_region(rd_new->entries, nr_entries, vm_start);
553         if (!ri)
554                 goto error_free;
555
556         ri->tf_start = tf_start;
557         ri->tf_end = tf_end;
558
559         rcu_assign_pointer(ctx.rd, rd_new);
560
561         call_rcu(&rd->rcu, rd_free_rcu);
562         spin_unlock(&ctx.lock);
563
564         return;
565
566 error_free:
567         rd_free(rd_new);
568
569 error_out:
570         spin_unlock(&ctx.lock);
571 }
572
573 static int
574 clean_mmap(struct regions_data *rd, struct quadd_mmap_area *mmap, int rm_ext)
575 {
576         int nr_removed = 0;
577         struct ex_region_info *entry, *next;
578
579         if (!rd || !mmap)
580                 return 0;
581
582         list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
583                 if (rm_ext)
584                         nr_removed += remove_ex_region(rd, entry);
585
586                 list_del(&entry->list);
587                 kfree(entry);
588         }
589
590         return nr_removed;
591 }
592
593 void quadd_unwind_delete_mmap(struct quadd_mmap_area *mmap)
594 {
595         unsigned long nr_entries, nr_removed, new_size;
596         struct regions_data *rd, *rd_new;
597
598         if (!mmap)
599                 return;
600
601         spin_lock(&ctx.lock);
602
603         rd = rcu_dereference(ctx.rd);
604         if (!rd || !rd->curr_nr)
605                 goto error_out;
606
607         nr_entries = rd->curr_nr;
608         new_size = min_t(unsigned long, rd->size, nr_entries);
609
610         rd_new = rd_alloc(new_size);
611         if (IS_ERR_OR_NULL(rd_new)) {
612                 pr_err("%s: error: rd_alloc\n", __func__);
613                 goto error_out;
614         }
615         rd_new->size = new_size;
616         rd_new->curr_nr = nr_entries;
617
618         memcpy(rd_new->entries, rd->entries,
619                 nr_entries * sizeof(*rd->entries));
620
621         nr_removed = clean_mmap(rd_new, mmap, 1);
622         rd_new->curr_nr -= nr_removed;
623
624         rcu_assign_pointer(ctx.rd, rd_new);
625         call_rcu(&rd->rcu, rd_free_rcu);
626
627 error_out:
628         spin_unlock(&ctx.lock);
629 }
630
631 static const struct unwind_idx *
632 unwind_find_idx(struct ex_region_info *ri, u32 addr)
633 {
634         u32 value;
635         unsigned long length;
636         struct extab_info *ti;
637         struct unwind_idx *start;
638         struct unwind_idx *stop;
639         struct unwind_idx *mid = NULL;
640
641         ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
642
643         length = ti->length / sizeof(*start);
644
645         if (unlikely(!length))
646                 return NULL;
647
648         start = (struct unwind_idx *)((char *)ri->mmap->data + ti->mmap_offset);
649         stop = start + length - 1;
650
651         value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri,
652                                          QUADD_SEC_TYPE_EXIDX,
653                                          QUADD_SEC_TYPE_EXTAB, 0);
654         if (addr < value)
655                 return NULL;
656
657         value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri,
658                                          QUADD_SEC_TYPE_EXIDX,
659                                          QUADD_SEC_TYPE_EXTAB, 0);
660         if (addr >= value)
661                 return NULL;
662
663         while (start < stop - 1) {
664                 mid = start + ((stop - start) >> 1);
665
666                 value = (u32)mmap_prel31_to_addr(&mid->addr_offset, ri,
667                                                  QUADD_SEC_TYPE_EXIDX,
668                                                  QUADD_SEC_TYPE_EXTAB, 0);
669
670                 if (addr < value)
671                         stop = mid;
672                 else
673                         start = mid;
674         }
675
676         return start;
677 }
678
679 static unsigned long
680 unwind_get_byte(struct quadd_mmap_area *mmap,
681                 struct unwind_ctrl_block *ctrl, long *err)
682 {
683         unsigned long ret;
684         u32 insn_word;
685
686         *err = 0;
687
688         if (ctrl->entries <= 0) {
689                 pr_err_once("%s: error: corrupt unwind table\n", __func__);
690                 *err = -QUADD_URC_TBL_IS_CORRUPT;
691                 return 0;
692         }
693
694         *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
695         if (*err < 0)
696                 return 0;
697
698         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
699
700         if (ctrl->byte == 0) {
701                 ctrl->insn++;
702                 ctrl->entries--;
703                 ctrl->byte = 3;
704         } else
705                 ctrl->byte--;
706
707         return ret;
708 }
709
710 static long
711 read_uleb128(struct quadd_mmap_area *mmap,
712              struct unwind_ctrl_block *ctrl,
713              unsigned long *ret)
714 {
715         long err = 0;
716         unsigned long result;
717         unsigned char byte;
718         int shift, count;
719
720         result = 0;
721         shift = 0;
722         count = 0;
723
724         while (1) {
725                 byte = unwind_get_byte(mmap, ctrl, &err);
726                 if (err < 0)
727                         return err;
728
729                 count++;
730
731                 result |= (byte & 0x7f) << shift;
732                 shift += 7;
733
734                 if (!(byte & 0x80))
735                         break;
736         }
737
738         *ret = result;
739
740         return count;
741 }
742
743 /*
744  * Execute the current unwind instruction.
745  */
746 static long
747 unwind_exec_insn(struct quadd_mmap_area *mmap,
748                  struct unwind_ctrl_block *ctrl)
749 {
750         long err;
751         unsigned int i;
752         unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
753
754         if (err < 0)
755                 return err;
756
757         pr_debug("%s: insn = %08lx\n", __func__, insn);
758
759         if ((insn & 0xc0) == 0x00) {
760                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
761
762                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
763                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
764         } else if ((insn & 0xc0) == 0x40) {
765                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
766
767                 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
768                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
769         } else if ((insn & 0xf0) == 0x80) {
770                 unsigned long mask;
771                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
772                 int load_sp, reg = 4;
773
774                 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
775                 if (err < 0)
776                         return err;
777
778                 mask = insn & 0x0fff;
779                 if (mask == 0) {
780                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
781                                    insn);
782                         return -QUADD_URC_REFUSE_TO_UNWIND;
783                 }
784
785                 /* pop R4-R15 according to mask */
786                 load_sp = mask & (1 << (13 - 4));
787                 while (mask) {
788                         if (mask & 1) {
789                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
790                                 if (err < 0)
791                                         return err;
792
793                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
794                         }
795                         mask >>= 1;
796                         reg++;
797                 }
798                 if (!load_sp)
799                         ctrl->vrs[SP] = (unsigned long)vsp;
800
801                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
802         } else if ((insn & 0xf0) == 0x90 &&
803                    (insn & 0x0d) != 0x0d) {
804                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
805                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
806         } else if ((insn & 0xf0) == 0xa0) {
807                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
808                 unsigned int reg;
809
810                 /* pop R4-R[4+bbb] */
811                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
812                         err = read_user_data(vsp++, ctrl->vrs[reg]);
813                         if (err < 0)
814                                 return err;
815
816                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
817                 }
818
819                 if (insn & 0x08) {
820                         err = read_user_data(vsp++, ctrl->vrs[14]);
821                         if (err < 0)
822                                 return err;
823
824                         pr_debug("CMD_REG_POP: pop {r14}\n");
825                 }
826
827                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
828                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
829         } else if (insn == 0xb0) {
830                 if (ctrl->vrs[PC] == 0)
831                         ctrl->vrs[PC] = ctrl->vrs[LR];
832                 /* no further processing */
833                 ctrl->entries = 0;
834
835                 pr_debug("CMD_FINISH\n");
836         } else if (insn == 0xb1) {
837                 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
838                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
839                 int reg = 0;
840
841                 if (err < 0)
842                         return err;
843
844                 if (mask == 0 || mask & 0xf0) {
845                         pr_debug("unwind: Spare encoding %04lx\n",
846                                (insn << 8) | mask);
847                         return -QUADD_URC_SPARE_ENCODING;
848                 }
849
850                 /* pop R0-R3 according to mask */
851                 while (mask) {
852                         if (mask & 1) {
853                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
854                                 if (err < 0)
855                                         return err;
856
857                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
858                         }
859                         mask >>= 1;
860                         reg++;
861                 }
862
863                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
864                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
865         } else if (insn == 0xb2) {
866                 long count;
867                 unsigned long uleb128 = 0;
868
869                 count = read_uleb128(mmap, ctrl, &uleb128);
870                 if (count < 0)
871                         return count;
872
873                 if (count == 0)
874                         return -QUADD_URC_TBL_IS_CORRUPT;
875
876                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
877
878                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (%#lx), new vsp: %#x\n",
879                          0x204 + (uleb128 << 2), 0x204 + (uleb128 << 2),
880                          ctrl->vrs[SP]);
881         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
882                 unsigned long data, reg_from, reg_to;
883                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
884
885                 data = unwind_get_byte(mmap, ctrl, &err);
886                 if (err < 0)
887                         return err;
888
889                 reg_from = (data & 0xf0) >> 4;
890                 reg_to = reg_from + (data & 0x0f);
891
892                 if (insn == 0xc8) {
893                         reg_from += 16;
894                         reg_to += 16;
895                 }
896
897                 for (i = reg_from; i <= reg_to; i++)
898                         vsp += 2;
899
900                 if (insn == 0xb3)
901                         vsp++;
902
903                 ctrl->vrs[SP] = (unsigned long)vsp;
904                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
905
906                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
907                          insn, data, reg_from, reg_to);
908                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
909         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
910                 unsigned long reg_to;
911                 unsigned long data = insn & 0x07;
912                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
913
914                 reg_to = 8 + data;
915
916                 for (i = 8; i <= reg_to; i++)
917                         vsp += 2;
918
919                 if ((insn & 0xf8) == 0xb8)
920                         vsp++;
921
922                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
923
924                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
925                          insn, reg_to);
926                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
927         } else {
928                 pr_debug("error: unhandled instruction %02lx\n", insn);
929                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
930         }
931
932         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
933                  __func__,
934                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
935                  ctrl->vrs[LR], ctrl->vrs[PC]);
936
937         return 0;
938 }
939
940 /*
941  * Unwind a single frame starting with *sp for the symbol at *pc. It
942  * updates the *pc and *sp with the new values.
943  */
944 static long
945 unwind_frame(struct ex_region_info *ri,
946              struct stackframe *frame,
947              struct vm_area_struct *vma_sp,
948              unsigned int *unw_type)
949 {
950         unsigned long high, low;
951         const struct unwind_idx *idx;
952         struct unwind_ctrl_block ctrl;
953         long err = 0;
954         u32 val;
955
956         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
957                 return -QUADD_URC_SP_INCORRECT;
958
959         /* only go to a higher address on the stack */
960         low = frame->sp;
961         high = vma_sp->vm_end;
962
963         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
964                 frame->pc, frame->lr, frame->sp, low, high);
965
966         idx = unwind_find_idx(ri, frame->pc);
967         if (IS_ERR_OR_NULL(idx))
968                 return -QUADD_URC_IDX_NOT_FOUND;
969
970         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
971
972         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
973         ctrl.vrs[FP_ARM] = frame->fp_arm;
974
975         ctrl.vrs[SP] = frame->sp;
976         ctrl.vrs[LR] = frame->lr;
977         ctrl.vrs[PC] = 0;
978
979         err = read_mmap_data(ri->mmap, &idx->insn, &val);
980         if (err < 0)
981                 return err;
982
983         if (val == 1) {
984                 /* can't unwind */
985                 return -QUADD_URC_CANTUNWIND;
986         } else if ((val & 0x80000000) == 0) {
987                 /* prel31 to the unwind table */
988                 ctrl.insn = (u32 *)(unsigned long)
989                                 mmap_prel31_to_addr(&idx->insn, ri,
990                                                     QUADD_SEC_TYPE_EXIDX,
991                                                     QUADD_SEC_TYPE_EXTAB, 1);
992                 if (!ctrl.insn)
993                         return -QUADD_URC_EACCESS;
994         } else if ((val & 0xff000000) == 0x80000000) {
995                 /* only personality routine 0 supported in the index */
996                 ctrl.insn = &idx->insn;
997         } else {
998                 pr_debug("unsupported personality routine %#x in the index at %p\n",
999                          val, idx);
1000                 return -QUADD_URC_UNSUPPORTED_PR;
1001         }
1002
1003         err = read_mmap_data(ri->mmap, ctrl.insn, &val);
1004         if (err < 0)
1005                 return err;
1006
1007         /* check the personality routine */
1008         if ((val & 0xff000000) == 0x80000000) {
1009                 ctrl.byte = 2;
1010                 ctrl.entries = 1;
1011         } else if ((val & 0xff000000) == 0x81000000) {
1012                 ctrl.byte = 1;
1013                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
1014         } else {
1015                 pr_debug("unsupported personality routine %#x at %p\n",
1016                          val, ctrl.insn);
1017                 return -QUADD_URC_UNSUPPORTED_PR;
1018         }
1019
1020         while (ctrl.entries > 0) {
1021                 err = unwind_exec_insn(ri->mmap, &ctrl);
1022                 if (err < 0)
1023                         return err;
1024
1025                 if (ctrl.vrs[SP] & 0x03 ||
1026                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
1027                         return -QUADD_URC_SP_INCORRECT;
1028         }
1029
1030         if (ctrl.vrs[PC] == 0) {
1031                 ctrl.vrs[PC] = ctrl.vrs[LR];
1032                 *unw_type = QUADD_UNW_TYPE_LR_UT;
1033         } else {
1034                 *unw_type = QUADD_UNW_TYPE_UT;
1035         }
1036
1037         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
1038                 return -QUADD_URC_PC_INCORRECT;
1039
1040         frame->fp_thumb = ctrl.vrs[FP_THUMB];
1041         frame->fp_arm = ctrl.vrs[FP_ARM];
1042
1043         frame->sp = ctrl.vrs[SP];
1044         frame->lr = ctrl.vrs[LR];
1045         frame->pc = ctrl.vrs[PC];
1046
1047         return 0;
1048 }
1049
1050 static void
1051 unwind_backtrace(struct quadd_callchain *cc,
1052                  struct ex_region_info *ri,
1053                  struct stackframe *frame,
1054                  struct vm_area_struct *vma_sp,
1055                  struct task_struct *task)
1056 {
1057         unsigned int unw_type;
1058         struct ex_region_info ri_new;
1059
1060         cc->unw_rc = QUADD_URC_FAILURE;
1061
1062         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
1063                  frame->fp_arm, frame->fp_thumb,
1064                  frame->sp, frame->lr, frame->pc);
1065         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
1066                  vma_sp->vm_start, vma_sp->vm_end,
1067                  vma_sp->vm_end - vma_sp->vm_start);
1068
1069         while (1) {
1070                 long err;
1071                 int nr_added;
1072                 struct extab_info *ti;
1073                 unsigned long where = frame->pc;
1074                 struct vm_area_struct *vma_pc;
1075                 struct mm_struct *mm = task->mm;
1076
1077                 if (!mm)
1078                         break;
1079
1080                 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
1081                         cc->unw_rc = -QUADD_URC_SP_INCORRECT;
1082                         break;
1083                 }
1084
1085                 vma_pc = find_vma(mm, frame->pc);
1086                 if (!vma_pc)
1087                         break;
1088
1089                 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
1090
1091                 if (!is_vma_addr(ti->addr, vma_pc, sizeof(u32))) {
1092                         err = get_extabs_ehabi(vma_pc->vm_start, &ri_new);
1093                         if (err) {
1094                                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1095                                 break;
1096                         }
1097
1098                         ri = &ri_new;
1099                 }
1100
1101                 err = unwind_frame(ri, frame, vma_sp, &unw_type);
1102                 if (err < 0) {
1103                         pr_debug("end unwind, urc: %ld\n", err);
1104                         cc->unw_rc = -err;
1105                         break;
1106                 }
1107
1108                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1109                          where, frame->pc);
1110
1111                 cc->curr_sp = frame->sp;
1112                 cc->curr_fp = frame->fp_arm;
1113                 cc->curr_pc = frame->pc;
1114
1115                 nr_added = quadd_callchain_store(cc, frame->pc, unw_type);
1116                 if (nr_added == 0)
1117                         break;
1118         }
1119 }
1120
1121 unsigned int
1122 quadd_get_user_cc_arm32_ehabi(struct pt_regs *regs,
1123                               struct quadd_callchain *cc,
1124                               struct task_struct *task)
1125 {
1126         long err;
1127         int nr_prev = cc->nr;
1128         unsigned long ip, sp, lr;
1129         struct vm_area_struct *vma, *vma_sp;
1130         struct mm_struct *mm = task->mm;
1131         struct ex_region_info ri;
1132         struct stackframe frame;
1133
1134         if (!regs || !mm)
1135                 return 0;
1136
1137 #ifdef CONFIG_ARM64
1138         if (!compat_user_mode(regs))
1139                 return 0;
1140 #endif
1141
1142         if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
1143                 return nr_prev;
1144
1145         cc->unw_rc = QUADD_URC_FAILURE;
1146
1147         if (nr_prev > 0) {
1148                 ip = cc->curr_pc;
1149                 sp = cc->curr_sp;
1150                 lr = 0;
1151
1152                 frame.fp_thumb = 0;
1153                 frame.fp_arm = cc->curr_fp;
1154         } else {
1155                 ip = instruction_pointer(regs);
1156                 sp = quadd_user_stack_pointer(regs);
1157                 lr = quadd_user_link_register(regs);
1158
1159 #ifdef CONFIG_ARM64
1160                 frame.fp_thumb = regs->compat_usr(7);
1161                 frame.fp_arm = regs->compat_usr(11);
1162 #else
1163                 frame.fp_thumb = regs->ARM_r7;
1164                 frame.fp_arm = regs->ARM_fp;
1165 #endif
1166         }
1167
1168         frame.pc = ip;
1169         frame.sp = sp;
1170         frame.lr = lr;
1171
1172         vma = find_vma(mm, ip);
1173         if (!vma)
1174                 return 0;
1175
1176         vma_sp = find_vma(mm, sp);
1177         if (!vma_sp)
1178                 return 0;
1179
1180         err = get_extabs_ehabi(vma->vm_start, &ri);
1181         if (err) {
1182                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1183                 return 0;
1184         }
1185
1186         unwind_backtrace(cc, &ri, &frame, vma_sp, task);
1187
1188         pr_debug("%s: exit, cc->nr: %d --> %d\n",
1189                  __func__, nr_prev, cc->nr);
1190
1191         return cc->nr;
1192 }
1193
1194 int
1195 quadd_is_ex_entry_exist_arm32_ehabi(struct pt_regs *regs,
1196                                     unsigned long addr,
1197                                     struct task_struct *task)
1198 {
1199         long err;
1200         u32 value;
1201         const struct unwind_idx *idx;
1202         struct ex_region_info ri;
1203         struct vm_area_struct *vma;
1204         struct mm_struct *mm = task->mm;
1205
1206         if (!regs || !mm)
1207                 return 0;
1208
1209         vma = find_vma(mm, addr);
1210         if (!vma)
1211                 return 0;
1212
1213         err = get_extabs_ehabi(vma->vm_start, &ri);
1214         if (err)
1215                 return 0;
1216
1217         idx = unwind_find_idx(&ri, addr);
1218         if (IS_ERR_OR_NULL(idx))
1219                 return 0;
1220
1221         err = read_mmap_data(ri.mmap, &idx->insn, &value);
1222         if (err < 0)
1223                 return 0;
1224
1225         /* EXIDX_CANTUNWIND */
1226         if (value == 1)
1227                 return 0;
1228
1229         return 1;
1230 }
1231
1232 int quadd_unwind_start(struct task_struct *task)
1233 {
1234         int err;
1235         struct regions_data *rd, *rd_old;
1236
1237         rd = rd_alloc(QUADD_EXTABS_SIZE);
1238         if (IS_ERR_OR_NULL(rd)) {
1239                 pr_err("%s: error: rd_alloc\n", __func__);
1240                 return -ENOMEM;
1241         }
1242
1243         err = quadd_dwarf_unwind_start();
1244         if (err) {
1245                 rd_free(rd);
1246                 return err;
1247         }
1248
1249         spin_lock(&ctx.lock);
1250
1251         rd_old = rcu_dereference(ctx.rd);
1252         if (rd_old)
1253                 pr_warn("%s: warning: rd_old\n", __func__);
1254
1255         rcu_assign_pointer(ctx.rd, rd);
1256
1257         if (rd_old)
1258                 call_rcu(&rd_old->rcu, rd_free_rcu);
1259
1260         ctx.pid = task->tgid;
1261
1262         ctx.ex_tables_size = 0;
1263
1264         spin_unlock(&ctx.lock);
1265
1266         return 0;
1267 }
1268
1269 void quadd_unwind_stop(void)
1270 {
1271         int i;
1272         unsigned long nr_entries, size;
1273         struct regions_data *rd;
1274         struct ex_region_info *ri;
1275
1276         quadd_dwarf_unwind_stop();
1277
1278         spin_lock(&ctx.lock);
1279
1280         ctx.pid = 0;
1281
1282         rd = rcu_dereference(ctx.rd);
1283         if (!rd)
1284                 goto out;
1285
1286         nr_entries = rd->curr_nr;
1287         size = rd->size;
1288
1289         for (i = 0; i < nr_entries; i++) {
1290                 ri = &rd->entries[i];
1291                 clean_mmap(rd, ri->mmap, 0);
1292         }
1293
1294         rcu_assign_pointer(ctx.rd, NULL);
1295         call_rcu(&rd->rcu, rd_free_rcu);
1296
1297 out:
1298         spin_unlock(&ctx.lock);
1299         pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1300 }
1301
1302 int quadd_unwind_init(void)
1303 {
1304         int err;
1305
1306         err = quadd_dwarf_unwind_init();
1307         if (err)
1308                 return err;
1309
1310         spin_lock_init(&ctx.lock);
1311         rcu_assign_pointer(ctx.rd, NULL);
1312         ctx.pid = 0;
1313
1314         return 0;
1315 }
1316
1317 void quadd_unwind_deinit(void)
1318 {
1319         quadd_unwind_stop();
1320         rcu_barrier();
1321 }