2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size)
37 p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
38 GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
39 NUMA_NO_NODE, __builtin_return_address(0));
41 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
42 !IS_ENABLED(CONFIG_KASAN))
44 * KASAN can only deal with module allocations being served
45 * from the reserved module region, since the remainder of
46 * the vmalloc region is already backed by zero shadow pages,
47 * and punching holes into it is non-trivial. Since the module
48 * region is not randomized when KASAN is enabled, it is even
49 * less likely that the module region gets exhausted, so we
50 * can simply omit this fallback in that case.
52 p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
53 VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
54 NUMA_NO_NODE, __builtin_return_address(0));
56 if (p && (kasan_module_alloc(p, size) < 0)) {
64 enum aarch64_reloc_op {
71 static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
77 return val - (u64)place;
79 return (val & ~0xfff) - ((u64)place & ~0xfff);
84 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
88 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
90 s64 sval = do_reloc(op, place, val);
95 if (sval < S16_MIN || sval > U16_MAX)
100 if (sval < S32_MIN || sval > U32_MAX)
104 *(s64 *)place = sval;
107 pr_err("Invalid length (%d) for data relocation\n", len);
113 enum aarch64_insn_movw_imm_type {
114 AARCH64_INSN_IMM_MOVNZ,
115 AARCH64_INSN_IMM_MOVKZ,
118 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
119 int lsb, enum aarch64_insn_movw_imm_type imm_type)
123 u32 insn = le32_to_cpu(*(u32 *)place);
125 sval = do_reloc(op, place, val);
128 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
130 * For signed MOVW relocations, we have to manipulate the
131 * instruction encoding depending on whether or not the
132 * immediate is less than zero.
136 /* >=0: Set the instruction to MOVZ (opcode 10b). */
140 * <0: Set the instruction to MOVN (opcode 00b).
141 * Since we've masked the opcode already, we
142 * don't need to do anything other than
143 * inverting the new immediate field.
149 /* Update the instruction with the new encoding. */
150 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
151 *(u32 *)place = cpu_to_le32(insn);
159 static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
160 int lsb, int len, enum aarch64_insn_imm_type imm_type)
164 u32 insn = le32_to_cpu(*(u32 *)place);
166 /* Calculate the relocation value. */
167 sval = do_reloc(op, place, val);
170 /* Extract the value bits and shift them to bit 0. */
171 imm_mask = (BIT(lsb + len) - 1) >> lsb;
172 imm = sval & imm_mask;
174 /* Update the instruction's immediate field. */
175 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
176 *(u32 *)place = cpu_to_le32(insn);
179 * Extract the upper value bits (including the sign bit) and
180 * shift them to bit 0.
182 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
185 * Overflow has occurred if the upper bits are not all equal to
186 * the sign bit of the value.
188 if ((u64)(sval + 1) >= 2)
194 int apply_relocate_add(Elf64_Shdr *sechdrs,
196 unsigned int symindex,
206 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
208 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
209 /* loc corresponds to P in the AArch64 ELF document. */
210 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
213 /* sym is the ELF symbol we're referring to. */
214 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
215 + ELF64_R_SYM(rel[i].r_info);
217 /* val corresponds to (S + A) in the AArch64 ELF document. */
218 val = sym->st_value + rel[i].r_addend;
220 /* Check for overflow by default. */
221 overflow_check = true;
223 /* Perform the static relocation. */
224 switch (ELF64_R_TYPE(rel[i].r_info)) {
225 /* Null relocations. */
231 /* Data relocations. */
232 case R_AARCH64_ABS64:
233 overflow_check = false;
234 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
236 case R_AARCH64_ABS32:
237 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
239 case R_AARCH64_ABS16:
240 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
242 case R_AARCH64_PREL64:
243 overflow_check = false;
244 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
246 case R_AARCH64_PREL32:
247 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
249 case R_AARCH64_PREL16:
250 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
253 /* MOVW instruction relocations. */
254 case R_AARCH64_MOVW_UABS_G0_NC:
255 overflow_check = false;
256 case R_AARCH64_MOVW_UABS_G0:
257 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
258 AARCH64_INSN_IMM_MOVKZ);
260 case R_AARCH64_MOVW_UABS_G1_NC:
261 overflow_check = false;
262 case R_AARCH64_MOVW_UABS_G1:
263 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
264 AARCH64_INSN_IMM_MOVKZ);
266 case R_AARCH64_MOVW_UABS_G2_NC:
267 overflow_check = false;
268 case R_AARCH64_MOVW_UABS_G2:
269 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
270 AARCH64_INSN_IMM_MOVKZ);
272 case R_AARCH64_MOVW_UABS_G3:
273 /* We're using the top bits so we can't overflow. */
274 overflow_check = false;
275 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
276 AARCH64_INSN_IMM_MOVKZ);
278 case R_AARCH64_MOVW_SABS_G0:
279 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
280 AARCH64_INSN_IMM_MOVNZ);
282 case R_AARCH64_MOVW_SABS_G1:
283 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
284 AARCH64_INSN_IMM_MOVNZ);
286 case R_AARCH64_MOVW_SABS_G2:
287 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
288 AARCH64_INSN_IMM_MOVNZ);
290 case R_AARCH64_MOVW_PREL_G0_NC:
291 overflow_check = false;
292 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
293 AARCH64_INSN_IMM_MOVKZ);
295 case R_AARCH64_MOVW_PREL_G0:
296 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
297 AARCH64_INSN_IMM_MOVNZ);
299 case R_AARCH64_MOVW_PREL_G1_NC:
300 overflow_check = false;
301 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
302 AARCH64_INSN_IMM_MOVKZ);
304 case R_AARCH64_MOVW_PREL_G1:
305 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
306 AARCH64_INSN_IMM_MOVNZ);
308 case R_AARCH64_MOVW_PREL_G2_NC:
309 overflow_check = false;
310 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
311 AARCH64_INSN_IMM_MOVKZ);
313 case R_AARCH64_MOVW_PREL_G2:
314 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
315 AARCH64_INSN_IMM_MOVNZ);
317 case R_AARCH64_MOVW_PREL_G3:
318 /* We're using the top bits so we can't overflow. */
319 overflow_check = false;
320 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
321 AARCH64_INSN_IMM_MOVNZ);
324 /* Immediate instruction relocations. */
325 case R_AARCH64_LD_PREL_LO19:
326 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
327 AARCH64_INSN_IMM_19);
329 case R_AARCH64_ADR_PREL_LO21:
330 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
331 AARCH64_INSN_IMM_ADR);
333 #ifndef CONFIG_ARM64_ERRATUM_843419
334 case R_AARCH64_ADR_PREL_PG_HI21_NC:
335 overflow_check = false;
336 case R_AARCH64_ADR_PREL_PG_HI21:
337 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
338 AARCH64_INSN_IMM_ADR);
341 case R_AARCH64_ADD_ABS_LO12_NC:
342 case R_AARCH64_LDST8_ABS_LO12_NC:
343 overflow_check = false;
344 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
345 AARCH64_INSN_IMM_12);
347 case R_AARCH64_LDST16_ABS_LO12_NC:
348 overflow_check = false;
349 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
350 AARCH64_INSN_IMM_12);
352 case R_AARCH64_LDST32_ABS_LO12_NC:
353 overflow_check = false;
354 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
355 AARCH64_INSN_IMM_12);
357 case R_AARCH64_LDST64_ABS_LO12_NC:
358 overflow_check = false;
359 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
360 AARCH64_INSN_IMM_12);
362 case R_AARCH64_LDST128_ABS_LO12_NC:
363 overflow_check = false;
364 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
365 AARCH64_INSN_IMM_12);
367 case R_AARCH64_TSTBR14:
368 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
369 AARCH64_INSN_IMM_14);
371 case R_AARCH64_CONDBR19:
372 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
373 AARCH64_INSN_IMM_19);
375 case R_AARCH64_JUMP26:
376 case R_AARCH64_CALL26:
377 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
378 AARCH64_INSN_IMM_26);
380 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
382 val = module_emit_plt_entry(me, &rel[i], sym);
383 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
384 26, AARCH64_INSN_IMM_26);
389 pr_err("module %s: unsupported RELA relocation: %llu\n",
390 me->name, ELF64_R_TYPE(rel[i].r_info));
394 if (overflow_check && ovf == -ERANGE)
402 pr_err("module %s: overflow in relocation type %d val %Lx\n",
403 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
407 int module_finalize(const Elf_Ehdr *hdr,
408 const Elf_Shdr *sechdrs,
411 const Elf_Shdr *s, *se;
412 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
414 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
415 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
416 apply_alternatives((void *)s->sh_addr, s->sh_size);