module.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * AArch64 loadable module support.
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Author: Will Deacon <will.deacon@arm.com>
  19. */
  20. #include <linux/bitops.h>
  21. #include <linux/elf.h>
  22. #include <linux/gfp.h>
  23. #include <linux/kernel.h>
  24. #include <linux/mm.h>
  25. #include <linux/moduleloader.h>
  26. #include <linux/vmalloc.h>
  27. void *module_alloc(unsigned long size)
  28. {
  29. return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
  30. GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
  31. __builtin_return_address(0));
  32. }
  33. enum aarch64_reloc_op {
  34. RELOC_OP_NONE,
  35. RELOC_OP_ABS,
  36. RELOC_OP_PREL,
  37. RELOC_OP_PAGE,
  38. };
  39. static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
  40. {
  41. switch (reloc_op) {
  42. case RELOC_OP_ABS:
  43. return val;
  44. case RELOC_OP_PREL:
  45. return val - (u64)place;
  46. case RELOC_OP_PAGE:
  47. return (val & ~0xfff) - ((u64)place & ~0xfff);
  48. case RELOC_OP_NONE:
  49. return 0;
  50. }
  51. pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  52. return 0;
  53. }
  54. static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  55. {
  56. u64 imm_mask = (1 << len) - 1;
  57. s64 sval = do_reloc(op, place, val);
  58. switch (len) {
  59. case 16:
  60. *(s16 *)place = sval;
  61. break;
  62. case 32:
  63. *(s32 *)place = sval;
  64. break;
  65. case 64:
  66. *(s64 *)place = sval;
  67. break;
  68. default:
  69. pr_err("Invalid length (%d) for data relocation\n", len);
  70. return 0;
  71. }
  72. /*
  73. * Extract the upper value bits (including the sign bit) and
  74. * shift them to bit 0.
  75. */
  76. sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
  77. /*
  78. * Overflow has occurred if the value is not representable in
  79. * len bits (i.e the bottom len bits are not sign-extended and
  80. * the top bits are not all zero).
  81. */
  82. if ((u64)(sval + 1) > 2)
  83. return -ERANGE;
  84. return 0;
  85. }
  86. enum aarch64_imm_type {
  87. INSN_IMM_MOVNZ,
  88. INSN_IMM_MOVK,
  89. INSN_IMM_ADR,
  90. INSN_IMM_26,
  91. INSN_IMM_19,
  92. INSN_IMM_16,
  93. INSN_IMM_14,
  94. INSN_IMM_12,
  95. INSN_IMM_9,
  96. };
  97. static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
  98. {
  99. u32 immlo, immhi, lomask, himask, mask;
  100. int shift;
  101. /* The instruction stream is always little endian. */
  102. insn = le32_to_cpu(insn);
  103. switch (type) {
  104. case INSN_IMM_MOVNZ:
  105. /*
  106. * For signed MOVW relocations, we have to manipulate the
  107. * instruction encoding depending on whether or not the
  108. * immediate is less than zero.
  109. */
  110. insn &= ~(3 << 29);
  111. if ((s64)imm >= 0) {
  112. /* >=0: Set the instruction to MOVZ (opcode 10b). */
  113. insn |= 2 << 29;
  114. } else {
  115. /*
  116. * <0: Set the instruction to MOVN (opcode 00b).
  117. * Since we've masked the opcode already, we
  118. * don't need to do anything other than
  119. * inverting the new immediate field.
  120. */
  121. imm = ~imm;
  122. }
  123. case INSN_IMM_MOVK:
  124. mask = BIT(16) - 1;
  125. shift = 5;
  126. break;
  127. case INSN_IMM_ADR:
  128. lomask = 0x3;
  129. himask = 0x7ffff;
  130. immlo = imm & lomask;
  131. imm >>= 2;
  132. immhi = imm & himask;
  133. imm = (immlo << 24) | (immhi);
  134. mask = (lomask << 24) | (himask);
  135. shift = 5;
  136. break;
  137. case INSN_IMM_26:
  138. mask = BIT(26) - 1;
  139. shift = 0;
  140. break;
  141. case INSN_IMM_19:
  142. mask = BIT(19) - 1;
  143. shift = 5;
  144. break;
  145. case INSN_IMM_16:
  146. mask = BIT(16) - 1;
  147. shift = 5;
  148. break;
  149. case INSN_IMM_14:
  150. mask = BIT(14) - 1;
  151. shift = 5;
  152. break;
  153. case INSN_IMM_12:
  154. mask = BIT(12) - 1;
  155. shift = 10;
  156. break;
  157. case INSN_IMM_9:
  158. mask = BIT(9) - 1;
  159. shift = 12;
  160. break;
  161. default:
  162. pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
  163. type);
  164. return 0;
  165. }
  166. /* Update the immediate field. */
  167. insn &= ~(mask << shift);
  168. insn |= (imm & mask) << shift;
  169. return cpu_to_le32(insn);
  170. }
  171. static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
  172. int lsb, enum aarch64_imm_type imm_type)
  173. {
  174. u64 imm, limit = 0;
  175. s64 sval;
  176. u32 insn = *(u32 *)place;
  177. sval = do_reloc(op, place, val);
  178. sval >>= lsb;
  179. imm = sval & 0xffff;
  180. /* Update the instruction with the new encoding. */
  181. *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
  182. /* Shift out the immediate field. */
  183. sval >>= 16;
  184. /*
  185. * For unsigned immediates, the overflow check is straightforward.
  186. * For signed immediates, the sign bit is actually the bit past the
  187. * most significant bit of the field.
  188. * The INSN_IMM_16 immediate type is unsigned.
  189. */
  190. if (imm_type != INSN_IMM_16) {
  191. sval++;
  192. limit++;
  193. }
  194. /* Check the upper bits depending on the sign of the immediate. */
  195. if ((u64)sval > limit)
  196. return -ERANGE;
  197. return 0;
  198. }
  199. static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
  200. int lsb, int len, enum aarch64_imm_type imm_type)
  201. {
  202. u64 imm, imm_mask;
  203. s64 sval;
  204. u32 insn = *(u32 *)place;
  205. /* Calculate the relocation value. */
  206. sval = do_reloc(op, place, val);
  207. sval >>= lsb;
  208. /* Extract the value bits and shift them to bit 0. */
  209. imm_mask = (BIT(lsb + len) - 1) >> lsb;
  210. imm = sval & imm_mask;
  211. /* Update the instruction's immediate field. */
  212. *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
  213. /*
  214. * Extract the upper value bits (including the sign bit) and
  215. * shift them to bit 0.
  216. */
  217. sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
  218. /*
  219. * Overflow has occurred if the upper bits are not all equal to
  220. * the sign bit of the value.
  221. */
  222. if ((u64)(sval + 1) >= 2)
  223. return -ERANGE;
  224. return 0;
  225. }
  226. int apply_relocate_add(Elf64_Shdr *sechdrs,
  227. const char *strtab,
  228. unsigned int symindex,
  229. unsigned int relsec,
  230. struct module *me)
  231. {
  232. unsigned int i;
  233. int ovf;
  234. bool overflow_check;
  235. Elf64_Sym *sym;
  236. void *loc;
  237. u64 val;
  238. Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
  239. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  240. /* loc corresponds to P in the AArch64 ELF document. */
  241. loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
  242. + rel[i].r_offset;
  243. /* sym is the ELF symbol we're referring to. */
  244. sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
  245. + ELF64_R_SYM(rel[i].r_info);
  246. /* val corresponds to (S + A) in the AArch64 ELF document. */
  247. val = sym->st_value + rel[i].r_addend;
  248. /* Check for overflow by default. */
  249. overflow_check = true;
  250. /* Perform the static relocation. */
  251. switch (ELF64_R_TYPE(rel[i].r_info)) {
  252. /* Null relocations. */
  253. case R_ARM_NONE:
  254. case R_AARCH64_NONE:
  255. ovf = 0;
  256. break;
  257. /* Data relocations. */
  258. case R_AARCH64_ABS64:
  259. overflow_check = false;
  260. ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
  261. break;
  262. case R_AARCH64_ABS32:
  263. ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
  264. break;
  265. case R_AARCH64_ABS16:
  266. ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
  267. break;
  268. case R_AARCH64_PREL64:
  269. overflow_check = false;
  270. ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
  271. break;
  272. case R_AARCH64_PREL32:
  273. ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
  274. break;
  275. case R_AARCH64_PREL16:
  276. ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
  277. break;
  278. /* MOVW instruction relocations. */
  279. case R_AARCH64_MOVW_UABS_G0_NC:
  280. overflow_check = false;
  281. case R_AARCH64_MOVW_UABS_G0:
  282. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
  283. INSN_IMM_16);
  284. break;
  285. case R_AARCH64_MOVW_UABS_G1_NC:
  286. overflow_check = false;
  287. case R_AARCH64_MOVW_UABS_G1:
  288. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
  289. INSN_IMM_16);
  290. break;
  291. case R_AARCH64_MOVW_UABS_G2_NC:
  292. overflow_check = false;
  293. case R_AARCH64_MOVW_UABS_G2:
  294. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
  295. INSN_IMM_16);
  296. break;
  297. case R_AARCH64_MOVW_UABS_G3:
  298. /* We're using the top bits so we can't overflow. */
  299. overflow_check = false;
  300. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
  301. INSN_IMM_16);
  302. break;
  303. case R_AARCH64_MOVW_SABS_G0:
  304. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
  305. INSN_IMM_MOVNZ);
  306. break;
  307. case R_AARCH64_MOVW_SABS_G1:
  308. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
  309. INSN_IMM_MOVNZ);
  310. break;
  311. case R_AARCH64_MOVW_SABS_G2:
  312. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
  313. INSN_IMM_MOVNZ);
  314. break;
  315. case R_AARCH64_MOVW_PREL_G0_NC:
  316. overflow_check = false;
  317. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
  318. INSN_IMM_MOVK);
  319. break;
  320. case R_AARCH64_MOVW_PREL_G0:
  321. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
  322. INSN_IMM_MOVNZ);
  323. break;
  324. case R_AARCH64_MOVW_PREL_G1_NC:
  325. overflow_check = false;
  326. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
  327. INSN_IMM_MOVK);
  328. break;
  329. case R_AARCH64_MOVW_PREL_G1:
  330. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
  331. INSN_IMM_MOVNZ);
  332. break;
  333. case R_AARCH64_MOVW_PREL_G2_NC:
  334. overflow_check = false;
  335. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
  336. INSN_IMM_MOVK);
  337. break;
  338. case R_AARCH64_MOVW_PREL_G2:
  339. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
  340. INSN_IMM_MOVNZ);
  341. break;
  342. case R_AARCH64_MOVW_PREL_G3:
  343. /* We're using the top bits so we can't overflow. */
  344. overflow_check = false;
  345. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
  346. INSN_IMM_MOVNZ);
  347. break;
  348. /* Immediate instruction relocations. */
  349. case R_AARCH64_LD_PREL_LO19:
  350. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
  351. INSN_IMM_19);
  352. break;
  353. case R_AARCH64_ADR_PREL_LO21:
  354. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
  355. INSN_IMM_ADR);
  356. break;
  357. case R_AARCH64_ADR_PREL_PG_HI21_NC:
  358. overflow_check = false;
  359. case R_AARCH64_ADR_PREL_PG_HI21:
  360. ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
  361. INSN_IMM_ADR);
  362. break;
  363. case R_AARCH64_ADD_ABS_LO12_NC:
  364. case R_AARCH64_LDST8_ABS_LO12_NC:
  365. overflow_check = false;
  366. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
  367. INSN_IMM_12);
  368. break;
  369. case R_AARCH64_LDST16_ABS_LO12_NC:
  370. overflow_check = false;
  371. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
  372. INSN_IMM_12);
  373. break;
  374. case R_AARCH64_LDST32_ABS_LO12_NC:
  375. overflow_check = false;
  376. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
  377. INSN_IMM_12);
  378. break;
  379. case R_AARCH64_LDST64_ABS_LO12_NC:
  380. overflow_check = false;
  381. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
  382. INSN_IMM_12);
  383. break;
  384. case R_AARCH64_LDST128_ABS_LO12_NC:
  385. overflow_check = false;
  386. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
  387. INSN_IMM_12);
  388. break;
  389. case R_AARCH64_TSTBR14:
  390. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
  391. INSN_IMM_14);
  392. break;
  393. case R_AARCH64_CONDBR19:
  394. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
  395. INSN_IMM_19);
  396. break;
  397. case R_AARCH64_JUMP26:
  398. case R_AARCH64_CALL26:
  399. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
  400. INSN_IMM_26);
  401. break;
  402. default:
  403. pr_err("module %s: unsupported RELA relocation: %llu\n",
  404. me->name, ELF64_R_TYPE(rel[i].r_info));
  405. return -ENOEXEC;
  406. }
  407. if (overflow_check && ovf == -ERANGE)
  408. goto overflow;
  409. }
  410. return 0;
  411. overflow:
  412. pr_err("module %s: overflow in relocation type %d val %Lx\n",
  413. me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
  414. return -ENOEXEC;
  415. }