feature-fixups.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /*
  2. * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  3. *
  4. * Modifications for ppc64:
  5. * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
  6. *
  7. * Copyright 2008 Michael Ellerman, IBM Corporation.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <linux/init.h>
  17. #include <asm/cputable.h>
  18. #include <asm/code-patching.h>
  19. struct fixup_entry {
  20. unsigned long mask;
  21. unsigned long value;
  22. long start_off;
  23. long end_off;
  24. long alt_start_off;
  25. long alt_end_off;
  26. };
  27. static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
  28. {
  29. /*
  30. * We store the offset to the code as a negative offset from
  31. * the start of the alt_entry, to support the VDSO. This
  32. * routine converts that back into an actual address.
  33. */
  34. return (unsigned int *)((unsigned long)fcur + offset);
  35. }
  36. static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
  37. unsigned int *alt_start, unsigned int *alt_end)
  38. {
  39. unsigned int instr;
  40. instr = *src;
  41. if (instr_is_relative_branch(*src)) {
  42. unsigned int *target = (unsigned int *)branch_target(src);
  43. /* Branch within the section doesn't need translating */
  44. if (target < alt_start || target >= alt_end) {
  45. instr = translate_branch(dest, src);
  46. if (!instr)
  47. return 1;
  48. }
  49. }
  50. patch_instruction(dest, instr);
  51. return 0;
  52. }
  53. static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
  54. {
  55. unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
  56. start = calc_addr(fcur, fcur->start_off);
  57. end = calc_addr(fcur, fcur->end_off);
  58. alt_start = calc_addr(fcur, fcur->alt_start_off);
  59. alt_end = calc_addr(fcur, fcur->alt_end_off);
  60. if ((alt_end - alt_start) > (end - start))
  61. return 1;
  62. if ((value & fcur->mask) == fcur->value)
  63. return 0;
  64. src = alt_start;
  65. dest = start;
  66. for (; src < alt_end; src++, dest++) {
  67. if (patch_alt_instruction(src, dest, alt_start, alt_end))
  68. return 1;
  69. }
  70. for (; dest < end; dest++)
  71. patch_instruction(dest, PPC_INST_NOP);
  72. return 0;
  73. }
  74. void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  75. {
  76. struct fixup_entry *fcur, *fend;
  77. fcur = fixup_start;
  78. fend = fixup_end;
  79. for (; fcur < fend; fcur++) {
  80. if (patch_feature_section(value, fcur)) {
  81. WARN_ON(1);
  82. printk("Unable to patch feature section at %p - %p" \
  83. " with %p - %p\n",
  84. calc_addr(fcur, fcur->start_off),
  85. calc_addr(fcur, fcur->end_off),
  86. calc_addr(fcur, fcur->alt_start_off),
  87. calc_addr(fcur, fcur->alt_end_off));
  88. }
  89. }
  90. }
  91. void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  92. {
  93. long *start, *end;
  94. unsigned int *dest;
  95. if (!(value & CPU_FTR_LWSYNC))
  96. return ;
  97. start = fixup_start;
  98. end = fixup_end;
  99. for (; start < end; start++) {
  100. dest = (void *)start + *start;
  101. patch_instruction(dest, PPC_INST_LWSYNC);
  102. }
  103. }
  104. #ifdef CONFIG_FTR_FIXUP_SELFTEST
  105. #define check(x) \
  106. if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
  107. /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
  108. static struct fixup_entry fixup;
  109. static long calc_offset(struct fixup_entry *entry, unsigned int *p)
  110. {
  111. return (unsigned long)p - (unsigned long)entry;
  112. }
  113. void test_basic_patching(void)
  114. {
  115. extern unsigned int ftr_fixup_test1;
  116. extern unsigned int end_ftr_fixup_test1;
  117. extern unsigned int ftr_fixup_test1_orig;
  118. extern unsigned int ftr_fixup_test1_expected;
  119. int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
  120. fixup.value = fixup.mask = 8;
  121. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
  122. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
  123. fixup.alt_start_off = fixup.alt_end_off = 0;
  124. /* Sanity check */
  125. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  126. /* Check we don't patch if the value matches */
  127. patch_feature_section(8, &fixup);
  128. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  129. /* Check we do patch if the value doesn't match */
  130. patch_feature_section(0, &fixup);
  131. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  132. /* Check we do patch if the mask doesn't match */
  133. memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
  134. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  135. patch_feature_section(~8, &fixup);
  136. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  137. }
  138. static void test_alternative_patching(void)
  139. {
  140. extern unsigned int ftr_fixup_test2;
  141. extern unsigned int end_ftr_fixup_test2;
  142. extern unsigned int ftr_fixup_test2_orig;
  143. extern unsigned int ftr_fixup_test2_alt;
  144. extern unsigned int ftr_fixup_test2_expected;
  145. int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
  146. fixup.value = fixup.mask = 0xF;
  147. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
  148. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
  149. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
  150. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
  151. /* Sanity check */
  152. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  153. /* Check we don't patch if the value matches */
  154. patch_feature_section(0xF, &fixup);
  155. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  156. /* Check we do patch if the value doesn't match */
  157. patch_feature_section(0, &fixup);
  158. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  159. /* Check we do patch if the mask doesn't match */
  160. memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
  161. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  162. patch_feature_section(~0xF, &fixup);
  163. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  164. }
  165. static void test_alternative_case_too_big(void)
  166. {
  167. extern unsigned int ftr_fixup_test3;
  168. extern unsigned int end_ftr_fixup_test3;
  169. extern unsigned int ftr_fixup_test3_orig;
  170. extern unsigned int ftr_fixup_test3_alt;
  171. int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
  172. fixup.value = fixup.mask = 0xC;
  173. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
  174. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
  175. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
  176. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
  177. /* Sanity check */
  178. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  179. /* Expect nothing to be patched, and the error returned to us */
  180. check(patch_feature_section(0xF, &fixup) == 1);
  181. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  182. check(patch_feature_section(0, &fixup) == 1);
  183. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  184. check(patch_feature_section(~0xF, &fixup) == 1);
  185. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  186. }
  187. static void test_alternative_case_too_small(void)
  188. {
  189. extern unsigned int ftr_fixup_test4;
  190. extern unsigned int end_ftr_fixup_test4;
  191. extern unsigned int ftr_fixup_test4_orig;
  192. extern unsigned int ftr_fixup_test4_alt;
  193. extern unsigned int ftr_fixup_test4_expected;
  194. int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
  195. unsigned long flag;
  196. /* Check a high-bit flag */
  197. flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
  198. fixup.value = fixup.mask = flag;
  199. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
  200. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
  201. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
  202. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
  203. /* Sanity check */
  204. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  205. /* Check we don't patch if the value matches */
  206. patch_feature_section(flag, &fixup);
  207. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  208. /* Check we do patch if the value doesn't match */
  209. patch_feature_section(0, &fixup);
  210. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  211. /* Check we do patch if the mask doesn't match */
  212. memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
  213. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  214. patch_feature_section(~flag, &fixup);
  215. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  216. }
  217. static void test_alternative_case_with_branch(void)
  218. {
  219. extern unsigned int ftr_fixup_test5;
  220. extern unsigned int end_ftr_fixup_test5;
  221. extern unsigned int ftr_fixup_test5_expected;
  222. int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
  223. check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
  224. }
  225. static void test_alternative_case_with_external_branch(void)
  226. {
  227. extern unsigned int ftr_fixup_test6;
  228. extern unsigned int end_ftr_fixup_test6;
  229. extern unsigned int ftr_fixup_test6_expected;
  230. int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
  231. check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
  232. }
  233. static void test_cpu_macros(void)
  234. {
  235. extern void ftr_fixup_test_FTR_macros;
  236. extern void ftr_fixup_test_FTR_macros_expected;
  237. unsigned long size = &ftr_fixup_test_FTR_macros_expected -
  238. &ftr_fixup_test_FTR_macros;
  239. /* The fixups have already been done for us during boot */
  240. check(memcmp(&ftr_fixup_test_FTR_macros,
  241. &ftr_fixup_test_FTR_macros_expected, size) == 0);
  242. }
  243. static void test_fw_macros(void)
  244. {
  245. #ifdef CONFIG_PPC64
  246. extern void ftr_fixup_test_FW_FTR_macros;
  247. extern void ftr_fixup_test_FW_FTR_macros_expected;
  248. unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
  249. &ftr_fixup_test_FW_FTR_macros;
  250. /* The fixups have already been done for us during boot */
  251. check(memcmp(&ftr_fixup_test_FW_FTR_macros,
  252. &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
  253. #endif
  254. }
  255. static void test_lwsync_macros(void)
  256. {
  257. extern void lwsync_fixup_test;
  258. extern void end_lwsync_fixup_test;
  259. extern void lwsync_fixup_test_expected_LWSYNC;
  260. extern void lwsync_fixup_test_expected_SYNC;
  261. unsigned long size = &end_lwsync_fixup_test -
  262. &lwsync_fixup_test;
  263. /* The fixups have already been done for us during boot */
  264. if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
  265. check(memcmp(&lwsync_fixup_test,
  266. &lwsync_fixup_test_expected_LWSYNC, size) == 0);
  267. } else {
  268. check(memcmp(&lwsync_fixup_test,
  269. &lwsync_fixup_test_expected_SYNC, size) == 0);
  270. }
  271. }
  272. static int __init test_feature_fixups(void)
  273. {
  274. printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
  275. test_basic_patching();
  276. test_alternative_patching();
  277. test_alternative_case_too_big();
  278. test_alternative_case_too_small();
  279. test_alternative_case_with_branch();
  280. test_alternative_case_with_external_branch();
  281. test_cpu_macros();
  282. test_fw_macros();
  283. test_lwsync_macros();
  284. return 0;
  285. }
  286. late_initcall(test_feature_fixups);
  287. #endif /* CONFIG_FTR_FIXUP_SELFTEST */