Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 1 | /* |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 2 | * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org> |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/elf.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/sort.h> |
| 13 | |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 14 | static bool in_init(const struct module *mod, void *loc) |
| 15 | { |
| 16 | return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; |
| 17 | } |
| 18 | |
| 19 | u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 20 | Elf64_Sym *sym) |
| 21 | { |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 22 | struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : |
| 23 | &mod->arch.init; |
| 24 | struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; |
| 25 | int i = pltsec->plt_num_entries; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 26 | u64 val = sym->st_value + rela->r_addend; |
| 27 | |
Ard Biesheuvel | 7e8b9c1 | 2017-11-20 17:41:29 +0000 | [diff] [blame] | 28 | plt[i] = get_plt_entry(val); |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 29 | |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 30 | /* |
| 31 | * Check if the entry we just created is a duplicate. Given that the |
| 32 | * relocations are sorted, this will be the last entry we allocated. |
| 33 | * (if one exists). |
| 34 | */ |
Ard Biesheuvel | 7e8b9c1 | 2017-11-20 17:41:29 +0000 | [diff] [blame] | 35 | if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 36 | return (u64)&plt[i - 1]; |
| 37 | |
| 38 | pltsec->plt_num_entries++; |
Ard Biesheuvel | 5e8307b | 2018-03-06 17:15:31 +0000 | [diff] [blame] | 39 | if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) |
| 40 | return 0; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 41 | |
| 42 | return (u64)&plt[i]; |
| 43 | } |
| 44 | |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 45 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
| 46 | u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) |
| 47 | { |
| 48 | struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : |
| 49 | &mod->arch.init; |
| 50 | struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; |
| 51 | int i = pltsec->plt_num_entries++; |
| 52 | u32 mov0, mov1, mov2, br; |
| 53 | int rd; |
| 54 | |
| 55 | if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) |
| 56 | return 0; |
| 57 | |
| 58 | /* get the destination register of the ADRP instruction */ |
| 59 | rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, |
| 60 | le32_to_cpup((__le32 *)loc)); |
| 61 | |
| 62 | /* generate the veneer instructions */ |
| 63 | mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0, |
| 64 | AARCH64_INSN_VARIANT_64BIT, |
| 65 | AARCH64_INSN_MOVEWIDE_INVERSE); |
| 66 | mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16, |
| 67 | AARCH64_INSN_VARIANT_64BIT, |
| 68 | AARCH64_INSN_MOVEWIDE_KEEP); |
| 69 | mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32, |
| 70 | AARCH64_INSN_VARIANT_64BIT, |
| 71 | AARCH64_INSN_MOVEWIDE_KEEP); |
| 72 | br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4, |
| 73 | AARCH64_INSN_BRANCH_NOLINK); |
| 74 | |
| 75 | plt[i] = (struct plt_entry){ |
| 76 | cpu_to_le32(mov0), |
| 77 | cpu_to_le32(mov1), |
| 78 | cpu_to_le32(mov2), |
| 79 | cpu_to_le32(br) |
| 80 | }; |
| 81 | |
| 82 | return (u64)&plt[i]; |
| 83 | } |
| 84 | #endif |
| 85 | |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 86 | #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) |
| 87 | |
| 88 | static int cmp_rela(const void *a, const void *b) |
| 89 | { |
| 90 | const Elf64_Rela *x = a, *y = b; |
| 91 | int i; |
| 92 | |
| 93 | /* sort by type, symbol index and addend */ |
| 94 | i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info)); |
| 95 | if (i == 0) |
| 96 | i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info)); |
| 97 | if (i == 0) |
| 98 | i = cmp_3way(x->r_addend, y->r_addend); |
| 99 | return i; |
| 100 | } |
| 101 | |
| 102 | static bool duplicate_rel(const Elf64_Rela *rela, int num) |
| 103 | { |
| 104 | /* |
| 105 | * Entries are sorted by type, symbol index and addend. That means |
| 106 | * that, if a duplicate entry exists, it must be in the preceding |
| 107 | * slot. |
| 108 | */ |
| 109 | return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0; |
| 110 | } |
| 111 | |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 112 | static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 113 | Elf64_Word dstidx, Elf_Shdr *dstsec) |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 114 | { |
| 115 | unsigned int ret = 0; |
| 116 | Elf64_Sym *s; |
| 117 | int i; |
| 118 | |
| 119 | for (i = 0; i < num; i++) { |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 120 | u64 min_align; |
| 121 | |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 122 | switch (ELF64_R_TYPE(rela[i].r_info)) { |
| 123 | case R_AARCH64_JUMP26: |
| 124 | case R_AARCH64_CALL26: |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 125 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
| 126 | break; |
| 127 | |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 128 | /* |
| 129 | * We only have to consider branch targets that resolve |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 130 | * to symbols that are defined in a different section. |
| 131 | * This is not simply a heuristic, it is a fundamental |
| 132 | * limitation, since there is no guaranteed way to emit |
| 133 | * PLT entries sufficiently close to the branch if the |
| 134 | * section size exceeds the range of a branch |
| 135 | * instruction. So ignore relocations against defined |
| 136 | * symbols if they live in the same section as the |
| 137 | * relocation target. |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 138 | */ |
| 139 | s = syms + ELF64_R_SYM(rela[i].r_info); |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 140 | if (s->st_shndx == dstidx) |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 141 | break; |
| 142 | |
| 143 | /* |
| 144 | * Jump relocations with non-zero addends against |
| 145 | * undefined symbols are supported by the ELF spec, but |
| 146 | * do not occur in practice (e.g., 'jump n bytes past |
| 147 | * the entry point of undefined function symbol f'). |
| 148 | * So we need to support them, but there is no need to |
| 149 | * take them into consideration when trying to optimize |
| 150 | * this code. So let's only check for duplicates when |
| 151 | * the addend is zero: this allows us to record the PLT |
| 152 | * entry address in the symbol table itself, rather than |
| 153 | * having to search the list for duplicates each time we |
| 154 | * emit one. |
| 155 | */ |
| 156 | if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) |
| 157 | ret++; |
| 158 | break; |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 159 | case R_AARCH64_ADR_PREL_PG_HI21_NC: |
| 160 | case R_AARCH64_ADR_PREL_PG_HI21: |
| 161 | if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419)) |
| 162 | break; |
| 163 | |
| 164 | /* |
| 165 | * Determine the minimal safe alignment for this ADRP |
| 166 | * instruction: the section alignment at which it is |
| 167 | * guaranteed not to appear at a vulnerable offset. |
| 168 | * |
| 169 | * This comes down to finding the least significant zero |
| 170 | * bit in bits [11:3] of the section offset, and |
| 171 | * increasing the section's alignment so that the |
| 172 | * resulting address of this instruction is guaranteed |
| 173 | * to equal the offset in that particular bit (as well |
| 174 | * as all less signficant bits). This ensures that the |
| 175 | * address modulo 4 KB != 0xfff8 or 0xfffc (which would |
| 176 | * have all ones in bits [11:3]) |
| 177 | */ |
| 178 | min_align = 2ULL << ffz(rela[i].r_offset | 0x7); |
| 179 | |
| 180 | /* |
| 181 | * Allocate veneer space for each ADRP that may appear |
| 182 | * at a vulnerable offset nonetheless. At relocation |
| 183 | * time, some of these will remain unused since some |
| 184 | * ADRP instructions can be patched to ADR instructions |
| 185 | * instead. |
| 186 | */ |
| 187 | if (min_align > SZ_4K) |
| 188 | ret++; |
| 189 | else |
| 190 | dstsec->sh_addralign = max(dstsec->sh_addralign, |
| 191 | min_align); |
| 192 | break; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 193 | } |
| 194 | } |
| 195 | return ret; |
| 196 | } |
| 197 | |
| 198 | int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, |
| 199 | char *secstrings, struct module *mod) |
| 200 | { |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 201 | unsigned long core_plts = 0; |
| 202 | unsigned long init_plts = 0; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 203 | Elf64_Sym *syms = NULL; |
Ard Biesheuvel | be0f272 | 2017-11-20 17:41:30 +0000 | [diff] [blame] | 204 | Elf_Shdr *tramp = NULL; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 205 | int i; |
| 206 | |
| 207 | /* |
| 208 | * Find the empty .plt section so we can expand it to store the PLT |
| 209 | * entries. Record the symtab address as well. |
| 210 | */ |
| 211 | for (i = 0; i < ehdr->e_shnum; i++) { |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 212 | if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) |
| 213 | mod->arch.core.plt = sechdrs + i; |
| 214 | else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) |
| 215 | mod->arch.init.plt = sechdrs + i; |
Ard Biesheuvel | be0f272 | 2017-11-20 17:41:30 +0000 | [diff] [blame] | 216 | else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && |
| 217 | !strcmp(secstrings + sechdrs[i].sh_name, |
| 218 | ".text.ftrace_trampoline")) |
| 219 | tramp = sechdrs + i; |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 220 | else if (sechdrs[i].sh_type == SHT_SYMTAB) |
| 221 | syms = (Elf64_Sym *)sechdrs[i].sh_addr; |
| 222 | } |
| 223 | |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 224 | if (!mod->arch.core.plt || !mod->arch.init.plt) { |
| 225 | pr_err("%s: module PLT section(s) missing\n", mod->name); |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 226 | return -ENOEXEC; |
| 227 | } |
| 228 | if (!syms) { |
| 229 | pr_err("%s: module symtab section missing\n", mod->name); |
| 230 | return -ENOEXEC; |
| 231 | } |
| 232 | |
| 233 | for (i = 0; i < ehdr->e_shnum; i++) { |
| 234 | Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset; |
| 235 | int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela); |
| 236 | Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info; |
| 237 | |
| 238 | if (sechdrs[i].sh_type != SHT_RELA) |
| 239 | continue; |
| 240 | |
| 241 | /* ignore relocations that operate on non-exec sections */ |
| 242 | if (!(dstsec->sh_flags & SHF_EXECINSTR)) |
| 243 | continue; |
| 244 | |
| 245 | /* sort by type, symbol index and addend */ |
| 246 | sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL); |
| 247 | |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 248 | if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) |
| 249 | core_plts += count_plts(syms, rels, numrels, |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 250 | sechdrs[i].sh_info, dstsec); |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 251 | else |
| 252 | init_plts += count_plts(syms, rels, numrels, |
Ard Biesheuvel | a257e02 | 2018-03-06 17:15:33 +0000 | [diff] [blame^] | 253 | sechdrs[i].sh_info, dstsec); |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 254 | } |
| 255 | |
Ard Biesheuvel | 24af6c4 | 2017-02-21 22:12:57 +0000 | [diff] [blame] | 256 | mod->arch.core.plt->sh_type = SHT_NOBITS; |
| 257 | mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; |
| 258 | mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; |
| 259 | mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry); |
| 260 | mod->arch.core.plt_num_entries = 0; |
| 261 | mod->arch.core.plt_max_entries = core_plts; |
| 262 | |
| 263 | mod->arch.init.plt->sh_type = SHT_NOBITS; |
| 264 | mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; |
| 265 | mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; |
| 266 | mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry); |
| 267 | mod->arch.init.plt_num_entries = 0; |
| 268 | mod->arch.init.plt_max_entries = init_plts; |
| 269 | |
Ard Biesheuvel | be0f272 | 2017-11-20 17:41:30 +0000 | [diff] [blame] | 270 | if (tramp) { |
| 271 | tramp->sh_type = SHT_NOBITS; |
| 272 | tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; |
| 273 | tramp->sh_addralign = __alignof__(struct plt_entry); |
| 274 | tramp->sh_size = sizeof(struct plt_entry); |
| 275 | } |
| 276 | |
Ard Biesheuvel | fd045f6 | 2015-11-24 12:37:35 +0100 | [diff] [blame] | 277 | return 0; |
| 278 | } |