misc: tegra-profiler: fix Coverity issue of NULL dereference
[linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/exh_tables.c
3  *
4  * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
25
26 #include <linux/tegra_profiler.h>
27
28 #include "eh_unwind.h"
29 #include "backtrace.h"
30 #include "comm.h"
31
32 #define QUADD_EXTABS_SIZE       0x100
33
34 #define GET_NR_PAGES(a, l) \
35         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
36
37 enum regs {
38         FP_THUMB = 7,
39         FP_ARM = 11,
40
41         SP = 13,
42         LR = 14,
43         PC = 15
44 };
45
46 struct extab_info {
47         unsigned long addr;
48         unsigned long length;
49
50         unsigned long mmap_offset;
51 };
52
53 struct extables {
54         struct extab_info extab;
55         struct extab_info exidx;
56 };
57
58 struct ex_region_info {
59         unsigned long vm_start;
60         unsigned long vm_end;
61
62         struct extables tabs;
63         struct quadd_extabs_mmap *mmap;
64
65         struct list_head list;
66 };
67
68 struct regions_data {
69         struct ex_region_info *entries;
70
71         unsigned long curr_nr;
72         unsigned long size;
73
74         struct rcu_head rcu;
75 };
76
77 struct quadd_unwind_ctx {
78         struct regions_data *rd;
79
80         pid_t pid;
81         unsigned long ex_tables_size;
82         spinlock_t lock;
83 };
84
85 struct unwind_idx {
86         u32 addr_offset;
87         u32 insn;
88 };
89
90 struct stackframe {
91         unsigned long fp_thumb;
92         unsigned long fp_arm;
93
94         unsigned long sp;
95         unsigned long lr;
96         unsigned long pc;
97 };
98
99 struct unwind_ctrl_block {
100         u32 vrs[16];            /* virtual register set */
101         const u32 *insn;        /* pointer to the current instr word */
102         int entries;            /* number of entries left */
103         int byte;               /* current byte in the instr word */
104 };
105
106 struct pin_pages_work {
107         struct work_struct work;
108         unsigned long vm_start;
109 };
110
111 struct quadd_unwind_ctx ctx;
112
113 static inline int
114 validate_stack_addr(unsigned long addr,
115                     struct vm_area_struct *vma,
116                     unsigned long nbytes)
117 {
118         if (addr & 0x03)
119                 return 0;
120
121         return is_vma_addr(addr, vma, nbytes);
122 }
123
124 static inline int
125 validate_mmap_addr(struct quadd_extabs_mmap *mmap,
126                    unsigned long addr, unsigned long nbytes)
127 {
128         struct vm_area_struct *vma = mmap->mmap_vma;
129         unsigned long size = vma->vm_end - vma->vm_start;
130         unsigned long data = (unsigned long)mmap->data;
131
132         if (addr & 0x03) {
133                 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
134                             __func__, addr, data, data + size,
135                        vma->vm_start, vma->vm_end);
136                 return 0;
137         }
138
139         if (addr < data || addr >= data + (size - nbytes)) {
140                 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
141                             __func__, addr, data, data + size,
142                        vma->vm_start, vma->vm_end);
143                 return 0;
144         }
145
146         return 1;
147 }
148
149 /*
150  * TBD: why probe_kernel_address() can lead to random crashes
151  * on 64-bit kernel, and replacing it to __get_user() fixed the issue.
152  */
153 #define read_user_data(addr, retval)                            \
154 ({                                                              \
155         int ret;                                                \
156                                                                 \
157         pagefault_disable();                                    \
158         ret = __get_user(retval, addr);                         \
159         pagefault_enable();                                     \
160                                                                 \
161         if (ret) {                                              \
162                 pr_debug("%s: failed for address: %p\n",        \
163                          __func__, addr);                       \
164                 ret = -QUADD_URC_EACCESS;                       \
165         }                                                       \
166                                                                 \
167         ret;                                                    \
168 })
169
170 static inline long
171 read_mmap_data(struct quadd_extabs_mmap *mmap, const u32 *addr, u32 *retval)
172 {
173         if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32)))
174                 return -QUADD_URC_EACCESS;
175
176         *retval = *addr;
177         return 0;
178 }
179
180 static inline unsigned long
181 ex_addr_to_mmap_addr(unsigned long addr,
182                      struct ex_region_info *ri,
183                      int exidx)
184 {
185         unsigned long offset;
186         struct extab_info *ei;
187
188         ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
189         offset = addr - ei->addr;
190
191         return ei->mmap_offset + offset + (unsigned long)ri->mmap->data;
192 }
193
194 static inline unsigned long
195 mmap_addr_to_ex_addr(unsigned long addr,
196                      struct ex_region_info *ri,
197                      int exidx)
198 {
199         unsigned long offset;
200         struct extab_info *ei;
201
202         ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
203         offset = addr - ei->mmap_offset - (unsigned long)ri->mmap->data;
204
205         return ei->addr + offset;
206 }
207
208 static inline u32
209 prel31_to_addr(const u32 *ptr)
210 {
211         u32 value;
212         s32 offset;
213
214         if (read_user_data(ptr, value))
215                 return 0;
216
217         /* sign-extend to 32 bits */
218         offset = (((s32)value) << 1) >> 1;
219         return (u32)(unsigned long)ptr + offset;
220 }
221
222 static unsigned long
223 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
224                     int is_src_exidx, int is_dst_exidx, int to_mmap)
225 {
226         u32 value, addr;
227         unsigned long addr_res;
228         s32 offset;
229         struct extab_info *ei_src, *ei_dst;
230
231         ei_src = is_src_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
232         ei_dst = is_dst_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
233
234         value = *ptr;
235         offset = (((s32)value) << 1) >> 1;
236
237         addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, is_src_exidx);
238         addr += offset;
239         addr_res = addr;
240
241         if (to_mmap)
242                 addr_res = ex_addr_to_mmap_addr(addr_res, ri, is_dst_exidx);
243
244         return addr_res;
245 }
246
247 static int
248 add_ex_region(struct regions_data *rd,
249               struct ex_region_info *new_entry)
250 {
251         unsigned int i_min, i_max, mid;
252         struct ex_region_info *array = rd->entries;
253         unsigned long size = rd->curr_nr;
254
255         if (!array)
256                 return 0;
257
258         if (size == 0) {
259                 memcpy(&array[0], new_entry, sizeof(*new_entry));
260                 return 1;
261         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
262                 return 0;
263         }
264
265         i_min = 0;
266         i_max = size;
267
268         if (array[0].vm_start > new_entry->vm_start) {
269                 memmove(array + 1, array,
270                         size * sizeof(*array));
271                 memcpy(&array[0], new_entry, sizeof(*new_entry));
272                 return 1;
273         } else if (array[size - 1].vm_start < new_entry->vm_start) {
274                 memcpy(&array[size], new_entry, sizeof(*new_entry));
275                 return 1;
276         }
277
278         while (i_min < i_max) {
279                 mid = i_min + (i_max - i_min) / 2;
280
281                 if (new_entry->vm_start <= array[mid].vm_start)
282                         i_max = mid;
283                 else
284                         i_min = mid + 1;
285         }
286
287         if (array[i_max].vm_start == new_entry->vm_start) {
288                 return 0;
289         } else {
290                 memmove(array + i_max + 1,
291                         array + i_max,
292                         (size - i_max) * sizeof(*array));
293                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
294                 return 1;
295         }
296 }
297
298 static int
299 remove_ex_region(struct regions_data *rd,
300                  struct ex_region_info *entry)
301 {
302         unsigned int i_min, i_max, mid;
303         struct ex_region_info *array = rd->entries;
304         unsigned long size = rd->curr_nr;
305
306         if (!array)
307                 return 0;
308
309         if (size == 0)
310                 return 0;
311
312         if (size == 1) {
313                 if (array[0].vm_start == entry->vm_start)
314                         return 1;
315                 else
316                         return 0;
317         }
318
319         if (array[0].vm_start > entry->vm_start)
320                 return 0;
321         else if (array[size - 1].vm_start < entry->vm_start)
322                 return 0;
323
324         i_min = 0;
325         i_max = size;
326
327         while (i_min < i_max) {
328                 mid = i_min + (i_max - i_min) / 2;
329
330                 if (entry->vm_start <= array[mid].vm_start)
331                         i_max = mid;
332                 else
333                         i_min = mid + 1;
334         }
335
336         if (array[i_max].vm_start == entry->vm_start) {
337                 memmove(array + i_max,
338                         array + i_max + 1,
339                         (size - i_max) * sizeof(*array));
340                 return 1;
341         } else {
342                 return 0;
343         }
344 }
345
346 static struct ex_region_info *
347 search_ex_region(struct ex_region_info *array,
348                  unsigned long size,
349                  unsigned long key,
350                  struct ex_region_info *ri)
351 {
352         unsigned int i_min, i_max, mid;
353
354         if (size == 0)
355                 return NULL;
356
357         i_min = 0;
358         i_max = size;
359
360         while (i_min < i_max) {
361                 mid = i_min + (i_max - i_min) / 2;
362
363                 if (key <= array[mid].vm_start)
364                         i_max = mid;
365                 else
366                         i_min = mid + 1;
367         }
368
369         if (array[i_max].vm_start == key) {
370                 memcpy(ri, &array[i_max], sizeof(*ri));
371                 return &array[i_max];
372         }
373
374         return NULL;
375 }
376
377 static long
378 __search_ex_region(unsigned long key, struct ex_region_info *ri)
379 {
380         struct regions_data *rd;
381         struct ex_region_info *ri_p = NULL;
382
383         rcu_read_lock();
384
385         rd = rcu_dereference(ctx.rd);
386         if (!rd)
387                 goto out;
388
389         ri_p = search_ex_region(rd->entries, rd->curr_nr, key, ri);
390
391 out:
392         rcu_read_unlock();
393         return ri_p ? 0 : -ENOENT;
394 }
395
396 static struct regions_data *rd_alloc(unsigned long size)
397 {
398         struct regions_data *rd;
399
400         rd = kzalloc(sizeof(*rd), GFP_KERNEL);
401         if (!rd)
402                 return NULL;
403
404         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_KERNEL);
405         if (!rd->entries) {
406                 kfree(rd);
407                 return NULL;
408         }
409
410         rd->size = size;
411         rd->curr_nr = 0;
412
413         return rd;
414 }
415
416 static void rd_free(struct regions_data *rd)
417 {
418         if (rd)
419                 kfree(rd->entries);
420
421         kfree(rd);
422 }
423
424 static void rd_free_rcu(struct rcu_head *rh)
425 {
426         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
427         rd_free(rd);
428 }
429
430 int quadd_unwind_set_extab(struct quadd_extables *extabs,
431                            struct quadd_extabs_mmap *mmap)
432 {
433         int err = 0;
434         unsigned long nr_entries, nr_added, new_size;
435         struct ex_region_info ri_entry;
436         struct extab_info *ti;
437         struct regions_data *rd, *rd_new;
438         struct ex_region_info *ex_entry;
439
440         spin_lock(&ctx.lock);
441
442         rd = rcu_dereference(ctx.rd);
443         if (!rd) {
444                 pr_warn("%s: warning: rd\n", __func__);
445                 new_size = QUADD_EXTABS_SIZE;
446                 nr_entries = 0;
447         } else {
448                 new_size = rd->size;
449                 nr_entries = rd->curr_nr;
450         }
451
452         if (nr_entries >= new_size)
453                 new_size += new_size >> 1;
454
455         rd_new = rd_alloc(new_size);
456         if (IS_ERR_OR_NULL(rd_new)) {
457                 pr_err("%s: error: rd_alloc\n", __func__);
458                 err = -ENOMEM;
459                 goto error_out;
460         }
461
462         if (rd && nr_entries)
463                 memcpy(rd_new->entries, rd->entries,
464                        nr_entries * sizeof(*rd->entries));
465
466         rd_new->curr_nr = nr_entries;
467
468         ri_entry.vm_start = extabs->vm_start;
469         ri_entry.vm_end = extabs->vm_end;
470
471         ri_entry.mmap = mmap;
472
473         ti = &ri_entry.tabs.exidx;
474         ti->addr = extabs->exidx.addr;
475         ti->length = extabs->exidx.length;
476         ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXIDX_OFFSET];
477         ctx.ex_tables_size += ti->length;
478
479         ti = &ri_entry.tabs.extab;
480         ti->addr = extabs->extab.addr;
481         ti->length = extabs->extab.length;
482         ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXTAB_OFFSET];
483         ctx.ex_tables_size += ti->length;
484
485         nr_added = add_ex_region(rd_new, &ri_entry);
486         if (nr_added == 0)
487                 goto error_free;
488         rd_new->curr_nr += nr_added;
489
490         ex_entry = kzalloc(sizeof(*ex_entry), GFP_KERNEL);
491         if (!ex_entry) {
492                 err = -ENOMEM;
493                 goto error_free;
494         }
495         memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
496
497         INIT_LIST_HEAD(&ex_entry->list);
498         list_add_tail(&ex_entry->list, &mmap->ex_entries);
499
500         rcu_assign_pointer(ctx.rd, rd_new);
501
502         if (rd)
503                 call_rcu(&rd->rcu, rd_free_rcu);
504
505         spin_unlock(&ctx.lock);
506
507         return 0;
508
509 error_free:
510         rd_free(rd_new);
511 error_out:
512         spin_unlock(&ctx.lock);
513         return err;
514 }
515
516 static int
517 clean_mmap(struct regions_data *rd, struct quadd_extabs_mmap *mmap, int rm_ext)
518 {
519         int nr_removed = 0;
520         struct ex_region_info *entry, *next;
521
522         if (!rd || !mmap)
523                 return 0;
524
525         list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
526                 if (rm_ext)
527                         nr_removed += remove_ex_region(rd, entry);
528
529                 list_del(&entry->list);
530                 kfree(entry);
531         }
532
533         return nr_removed;
534 }
535
536 void quadd_unwind_delete_mmap(struct quadd_extabs_mmap *mmap)
537 {
538         unsigned long nr_entries, nr_removed, new_size;
539         struct regions_data *rd, *rd_new;
540
541         if (!mmap)
542                 return;
543
544         spin_lock(&ctx.lock);
545
546         rd = rcu_dereference(ctx.rd);
547         if (!rd || !rd->curr_nr)
548                 goto error_out;
549
550         nr_entries = rd->curr_nr;
551         new_size = min_t(unsigned long, rd->size, nr_entries);
552
553         rd_new = rd_alloc(new_size);
554         if (IS_ERR_OR_NULL(rd_new)) {
555                 pr_err("%s: error: rd_alloc\n", __func__);
556                 goto error_out;
557         }
558         rd_new->size = new_size;
559         rd_new->curr_nr = nr_entries;
560
561         memcpy(rd_new->entries, rd->entries,
562                 nr_entries * sizeof(*rd->entries));
563
564         nr_removed = clean_mmap(rd_new, mmap, 1);
565         rd_new->curr_nr -= nr_removed;
566
567         rcu_assign_pointer(ctx.rd, rd_new);
568         call_rcu(&rd->rcu, rd_free_rcu);
569
570 error_out:
571         spin_unlock(&ctx.lock);
572 }
573
574 static const struct unwind_idx *
575 unwind_find_idx(struct ex_region_info *ri, u32 addr)
576 {
577         unsigned long length;
578         u32 value;
579         struct unwind_idx *start;
580         struct unwind_idx *stop;
581         struct unwind_idx *mid = NULL;
582         length = ri->tabs.exidx.length / sizeof(*start);
583
584         if (unlikely(!length))
585                 return NULL;
586
587         start = (struct unwind_idx *)((char *)ri->mmap->data +
588                 ri->tabs.exidx.mmap_offset);
589         stop = start + length - 1;
590
591         value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri, 1, 0, 0);
592         if (addr < value)
593                 return NULL;
594
595         value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri, 1, 0, 0);
596         if (addr >= value)
597                 return NULL;
598
599         while (start < stop - 1) {
600                 mid = start + ((stop - start) >> 1);
601
602                 value = (u32)mmap_prel31_to_addr(&mid->addr_offset,
603                                                  ri, 1, 0, 0);
604
605                 if (addr < value)
606                         stop = mid;
607                 else
608                         start = mid;
609         }
610
611         return start;
612 }
613
614 static unsigned long
615 unwind_get_byte(struct quadd_extabs_mmap *mmap,
616                 struct unwind_ctrl_block *ctrl, long *err)
617 {
618         unsigned long ret;
619         u32 insn_word;
620
621         *err = 0;
622
623         if (ctrl->entries <= 0) {
624                 pr_err_once("%s: error: corrupt unwind table\n", __func__);
625                 *err = -QUADD_URC_TBL_IS_CORRUPT;
626                 return 0;
627         }
628
629         *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
630         if (*err < 0)
631                 return 0;
632
633         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
634
635         if (ctrl->byte == 0) {
636                 ctrl->insn++;
637                 ctrl->entries--;
638                 ctrl->byte = 3;
639         } else
640                 ctrl->byte--;
641
642         return ret;
643 }
644
645 /*
646  * Execute the current unwind instruction.
647  */
648 static long
649 unwind_exec_insn(struct quadd_extabs_mmap *mmap,
650                  struct unwind_ctrl_block *ctrl)
651 {
652         long err;
653         unsigned int i;
654         unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
655
656         if (err < 0)
657                 return err;
658
659         pr_debug("%s: insn = %08lx\n", __func__, insn);
660
661         if ((insn & 0xc0) == 0x00) {
662                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
663
664                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
665                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
666         } else if ((insn & 0xc0) == 0x40) {
667                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
668
669                 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
670                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
671         } else if ((insn & 0xf0) == 0x80) {
672                 unsigned long mask;
673                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
674                 int load_sp, reg = 4;
675
676                 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
677                 if (err < 0)
678                         return err;
679
680                 mask = insn & 0x0fff;
681                 if (mask == 0) {
682                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
683                                    insn);
684                         return -QUADD_URC_REFUSE_TO_UNWIND;
685                 }
686
687                 /* pop R4-R15 according to mask */
688                 load_sp = mask & (1 << (13 - 4));
689                 while (mask) {
690                         if (mask & 1) {
691                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
692                                 if (err < 0)
693                                         return err;
694
695                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
696                         }
697                         mask >>= 1;
698                         reg++;
699                 }
700                 if (!load_sp)
701                         ctrl->vrs[SP] = (unsigned long)vsp;
702
703                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
704         } else if ((insn & 0xf0) == 0x90 &&
705                    (insn & 0x0d) != 0x0d) {
706                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
707                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
708         } else if ((insn & 0xf0) == 0xa0) {
709                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
710                 unsigned int reg;
711
712                 /* pop R4-R[4+bbb] */
713                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
714                         err = read_user_data(vsp++, ctrl->vrs[reg]);
715                         if (err < 0)
716                                 return err;
717
718                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
719                 }
720
721                 if (insn & 0x08) {
722                         err = read_user_data(vsp++, ctrl->vrs[14]);
723                         if (err < 0)
724                                 return err;
725
726                         pr_debug("CMD_REG_POP: pop {r14}\n");
727                 }
728
729                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
730                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
731         } else if (insn == 0xb0) {
732                 if (ctrl->vrs[PC] == 0)
733                         ctrl->vrs[PC] = ctrl->vrs[LR];
734                 /* no further processing */
735                 ctrl->entries = 0;
736
737                 pr_debug("CMD_FINISH\n");
738         } else if (insn == 0xb1) {
739                 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
740                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
741                 int reg = 0;
742
743                 if (err < 0)
744                         return err;
745
746                 if (mask == 0 || mask & 0xf0) {
747                         pr_debug("unwind: Spare encoding %04lx\n",
748                                (insn << 8) | mask);
749                         return -QUADD_URC_SPARE_ENCODING;
750                 }
751
752                 /* pop R0-R3 according to mask */
753                 while (mask) {
754                         if (mask & 1) {
755                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
756                                 if (err < 0)
757                                         return err;
758
759                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
760                         }
761                         mask >>= 1;
762                         reg++;
763                 }
764
765                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
766                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
767         } else if (insn == 0xb2) {
768                 unsigned long uleb128 = unwind_get_byte(mmap, ctrl, &err);
769                 if (err < 0)
770                         return err;
771
772                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
773
774                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu, new vsp: %#x\n",
775                          0x204 + (uleb128 << 2), ctrl->vrs[SP]);
776         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
777                 unsigned long data, reg_from, reg_to;
778                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
779
780                 data = unwind_get_byte(mmap, ctrl, &err);
781                 if (err < 0)
782                         return err;
783
784                 reg_from = (data & 0xf0) >> 4;
785                 reg_to = reg_from + (data & 0x0f);
786
787                 if (insn == 0xc8) {
788                         reg_from += 16;
789                         reg_to += 16;
790                 }
791
792                 for (i = reg_from; i <= reg_to; i++)
793                         vsp += 2;
794
795                 if (insn == 0xb3)
796                         vsp++;
797
798                 ctrl->vrs[SP] = (unsigned long)vsp;
799                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
800
801                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
802                          insn, data, reg_from, reg_to);
803                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
804         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
805                 unsigned long reg_to;
806                 unsigned long data = insn & 0x07;
807                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
808
809                 reg_to = 8 + data;
810
811                 for (i = 8; i <= reg_to; i++)
812                         vsp += 2;
813
814                 if ((insn & 0xf8) == 0xb8)
815                         vsp++;
816
817                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
818
819                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
820                          insn, reg_to);
821                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
822         } else {
823                 pr_debug("error: unhandled instruction %02lx\n", insn);
824                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
825         }
826
827         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
828                  __func__,
829                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
830                  ctrl->vrs[LR], ctrl->vrs[PC]);
831
832         return 0;
833 }
834
835 /*
836  * Unwind a single frame starting with *sp for the symbol at *pc. It
837  * updates the *pc and *sp with the new values.
838  */
839 static long
840 unwind_frame(struct ex_region_info *ri,
841              struct stackframe *frame,
842              struct vm_area_struct *vma_sp,
843              unsigned int *unw_type)
844 {
845         unsigned long high, low;
846         const struct unwind_idx *idx;
847         struct unwind_ctrl_block ctrl;
848         long err = 0;
849         u32 val;
850
851         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
852                 return -QUADD_URC_SP_INCORRECT;
853
854         /* only go to a higher address on the stack */
855         low = frame->sp;
856         high = vma_sp->vm_end;
857
858         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
859                 frame->pc, frame->lr, frame->sp, low, high);
860
861         idx = unwind_find_idx(ri, frame->pc);
862         if (IS_ERR_OR_NULL(idx))
863                 return -QUADD_URC_IDX_NOT_FOUND;
864
865         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
866
867         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
868         ctrl.vrs[FP_ARM] = frame->fp_arm;
869
870         ctrl.vrs[SP] = frame->sp;
871         ctrl.vrs[LR] = frame->lr;
872         ctrl.vrs[PC] = 0;
873
874         err = read_mmap_data(ri->mmap, &idx->insn, &val);
875         if (err < 0)
876                 return err;
877
878         if (val == 1) {
879                 /* can't unwind */
880                 return -QUADD_URC_CANTUNWIND;
881         } else if ((val & 0x80000000) == 0) {
882                 /* prel31 to the unwind table */
883                 ctrl.insn = (u32 *)(unsigned long)
884                                 mmap_prel31_to_addr(&idx->insn, ri, 1, 0, 1);
885                 if (!ctrl.insn)
886                         return -QUADD_URC_EACCESS;
887         } else if ((val & 0xff000000) == 0x80000000) {
888                 /* only personality routine 0 supported in the index */
889                 ctrl.insn = &idx->insn;
890         } else {
891                 pr_debug("unsupported personality routine %#x in the index at %p\n",
892                          val, idx);
893                 return -QUADD_URC_UNSUPPORTED_PR;
894         }
895
896         err = read_mmap_data(ri->mmap, ctrl.insn, &val);
897         if (err < 0)
898                 return err;
899
900         /* check the personality routine */
901         if ((val & 0xff000000) == 0x80000000) {
902                 ctrl.byte = 2;
903                 ctrl.entries = 1;
904         } else if ((val & 0xff000000) == 0x81000000) {
905                 ctrl.byte = 1;
906                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
907         } else {
908                 pr_debug("unsupported personality routine %#x at %p\n",
909                          val, ctrl.insn);
910                 return -QUADD_URC_UNSUPPORTED_PR;
911         }
912
913         while (ctrl.entries > 0) {
914                 err = unwind_exec_insn(ri->mmap, &ctrl);
915                 if (err < 0)
916                         return err;
917
918                 if (ctrl.vrs[SP] & 0x03 ||
919                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
920                         return -QUADD_URC_SP_INCORRECT;
921         }
922
923         if (ctrl.vrs[PC] == 0) {
924                 ctrl.vrs[PC] = ctrl.vrs[LR];
925                 *unw_type = QUADD_UNW_TYPE_LR_UT;
926         } else {
927                 *unw_type = QUADD_UNW_TYPE_UT;
928         }
929
930         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
931                 return -QUADD_URC_PC_INCORRECT;
932
933         frame->fp_thumb = ctrl.vrs[FP_THUMB];
934         frame->fp_arm = ctrl.vrs[FP_ARM];
935
936         frame->sp = ctrl.vrs[SP];
937         frame->lr = ctrl.vrs[LR];
938         frame->pc = ctrl.vrs[PC];
939
940         return 0;
941 }
942
943 static void
944 unwind_backtrace(struct quadd_callchain *cc,
945                  struct ex_region_info *ri,
946                  struct stackframe *frame,
947                  struct vm_area_struct *vma_sp,
948                  struct task_struct *task)
949 {
950         unsigned int unw_type;
951         struct ex_region_info ri_new;
952
953         cc->unw_rc = QUADD_URC_FAILURE;
954
955         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
956                  frame->fp_arm, frame->fp_thumb,
957                  frame->sp, frame->lr, frame->pc);
958         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
959                  vma_sp->vm_start, vma_sp->vm_end,
960                  vma_sp->vm_end - vma_sp->vm_start);
961
962         while (1) {
963                 long err;
964                 int nr_added;
965                 unsigned long where = frame->pc;
966                 struct vm_area_struct *vma_pc;
967                 struct mm_struct *mm = task->mm;
968
969                 if (!mm)
970                         break;
971
972                 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
973                         cc->unw_rc = -QUADD_URC_SP_INCORRECT;
974                         break;
975                 }
976
977                 vma_pc = find_vma(mm, frame->pc);
978                 if (!vma_pc)
979                         break;
980
981                 if (!is_vma_addr(ri->tabs.exidx.addr, vma_pc, sizeof(u32))) {
982                         err = __search_ex_region(vma_pc->vm_start, &ri_new);
983                         if (err) {
984                                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
985                                 break;
986                         }
987
988                         ri = &ri_new;
989                 }
990
991                 err = unwind_frame(ri, frame, vma_sp, &unw_type);
992                 if (err < 0) {
993                         pr_debug("end unwind, urc: %ld\n", err);
994                         cc->unw_rc = -err;
995                         break;
996                 }
997
998                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
999                          where, frame->pc);
1000
1001                 cc->curr_sp = frame->sp;
1002                 cc->curr_fp = frame->fp_arm;
1003                 cc->curr_pc = frame->pc;
1004
1005                 nr_added = quadd_callchain_store(cc, frame->pc, unw_type);
1006                 if (nr_added == 0)
1007                         break;
1008         }
1009 }
1010
1011 unsigned int
1012 quadd_get_user_callchain_ut(struct pt_regs *regs,
1013                             struct quadd_callchain *cc,
1014                             struct task_struct *task)
1015 {
1016         long err;
1017         int nr_prev = cc->nr;
1018         unsigned long ip, sp, lr;
1019         struct vm_area_struct *vma, *vma_sp;
1020         struct mm_struct *mm = task->mm;
1021         struct ex_region_info ri;
1022         struct stackframe frame;
1023
1024         if (!regs || !mm)
1025                 return 0;
1026
1027 #ifdef CONFIG_ARM64
1028         if (!compat_user_mode(regs)) {
1029                 pr_warn_once("user_mode 64: unsupported\n");
1030                 return 0;
1031         }
1032 #endif
1033
1034         if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
1035                 return nr_prev;
1036
1037         cc->unw_rc = QUADD_URC_FAILURE;
1038
1039         if (nr_prev > 0) {
1040                 ip = cc->curr_pc;
1041                 sp = cc->curr_sp;
1042                 lr = 0;
1043
1044                 frame.fp_thumb = 0;
1045                 frame.fp_arm = cc->curr_fp;
1046         } else {
1047                 ip = instruction_pointer(regs);
1048                 sp = quadd_user_stack_pointer(regs);
1049                 lr = quadd_user_link_register(regs);
1050
1051 #ifdef CONFIG_ARM64
1052                 frame.fp_thumb = regs->compat_usr(7);
1053                 frame.fp_arm = regs->compat_usr(11);
1054 #else
1055                 frame.fp_thumb = regs->ARM_r7;
1056                 frame.fp_arm = regs->ARM_fp;
1057 #endif
1058         }
1059
1060         frame.pc = ip;
1061         frame.sp = sp;
1062         frame.lr = lr;
1063
1064         vma = find_vma(mm, ip);
1065         if (!vma)
1066                 return 0;
1067
1068         vma_sp = find_vma(mm, sp);
1069         if (!vma_sp)
1070                 return 0;
1071
1072         err = __search_ex_region(vma->vm_start, &ri);
1073         if (err) {
1074                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1075                 return 0;
1076         }
1077
1078         unwind_backtrace(cc, &ri, &frame, vma_sp, task);
1079
1080         return cc->nr;
1081 }
1082
1083 int
1084 quadd_is_ex_entry_exist(struct pt_regs *regs,
1085                         unsigned long addr,
1086                         struct task_struct *task)
1087 {
1088         long err;
1089         u32 value;
1090         const struct unwind_idx *idx;
1091         struct ex_region_info ri;
1092         struct vm_area_struct *vma;
1093         struct mm_struct *mm = task->mm;
1094
1095         if (!regs || !mm)
1096                 return 0;
1097
1098 #ifdef CONFIG_ARM64
1099         if (!compat_user_mode(regs))
1100                 return 0;
1101 #endif
1102
1103         vma = find_vma(mm, addr);
1104         if (!vma)
1105                 return 0;
1106
1107         err = __search_ex_region(vma->vm_start, &ri);
1108         if (err)
1109                 return 0;
1110
1111         idx = unwind_find_idx(&ri, addr);
1112         if (IS_ERR_OR_NULL(idx))
1113                 return 0;
1114
1115         err = read_mmap_data(ri.mmap, &idx->insn, &value);
1116         if (err < 0)
1117                 return 0;
1118
1119         if (value == 1)
1120                 return 0;
1121
1122         return 1;
1123 }
1124
1125 int quadd_unwind_start(struct task_struct *task)
1126 {
1127         struct regions_data *rd, *rd_old;
1128         rd = rd_alloc(QUADD_EXTABS_SIZE);
1129
1130         spin_lock(&ctx.lock);
1131
1132         rd_old = rcu_dereference(ctx.rd);
1133         if (rd_old)
1134                 pr_warn("%s: warning: rd_old\n", __func__);
1135
1136         if (IS_ERR_OR_NULL(rd)) {
1137                 pr_err("%s: error: rd_alloc\n", __func__);
1138                 spin_unlock(&ctx.lock);
1139                 return -ENOMEM;
1140         }
1141
1142         rcu_assign_pointer(ctx.rd, rd);
1143
1144         if (rd_old)
1145                 call_rcu(&rd_old->rcu, rd_free_rcu);
1146
1147         ctx.pid = task->tgid;
1148
1149         ctx.ex_tables_size = 0;
1150
1151         spin_unlock(&ctx.lock);
1152
1153         return 0;
1154 }
1155
1156 void quadd_unwind_stop(void)
1157 {
1158         int i;
1159         unsigned long nr_entries, size;
1160         struct regions_data *rd;
1161         struct ex_region_info *ri;
1162
1163         spin_lock(&ctx.lock);
1164
1165         ctx.pid = 0;
1166
1167         rd = rcu_dereference(ctx.rd);
1168         if (!rd)
1169                 goto out;
1170
1171         nr_entries = rd->curr_nr;
1172         size = rd->size;
1173
1174         for (i = 0; i < nr_entries; i++) {
1175                 ri = &rd->entries[i];
1176                 clean_mmap(rd, ri->mmap, 0);
1177         }
1178
1179         rcu_assign_pointer(ctx.rd, NULL);
1180         call_rcu(&rd->rcu, rd_free_rcu);
1181
1182 out:
1183         spin_unlock(&ctx.lock);
1184         pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1185 }
1186
1187 int quadd_unwind_init(void)
1188 {
1189         spin_lock_init(&ctx.lock);
1190         rcu_assign_pointer(ctx.rd, NULL);
1191         ctx.pid = 0;
1192
1193         return 0;
1194 }
1195
1196 void quadd_unwind_deinit(void)
1197 {
1198         quadd_unwind_stop();
1199         rcu_barrier();
1200 }