feature-fixups.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  3. *
  4. * Modifications for ppc64:
  5. * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
  6. *
  7. * Copyright 2008 Michael Ellerman, IBM Corporation.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/init.h>
  18. #include <asm/cputable.h>
  19. #include <asm/code-patching.h>
  20. struct fixup_entry {
  21. unsigned long mask;
  22. unsigned long value;
  23. long start_off;
  24. long end_off;
  25. long alt_start_off;
  26. long alt_end_off;
  27. };
  28. static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
  29. {
  30. /*
  31. * We store the offset to the code as a negative offset from
  32. * the start of the alt_entry, to support the VDSO. This
  33. * routine converts that back into an actual address.
  34. */
  35. return (unsigned int *)((unsigned long)fcur + offset);
  36. }
  37. static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
  38. unsigned int *alt_start, unsigned int *alt_end)
  39. {
  40. unsigned int instr;
  41. instr = *src;
  42. if (instr_is_relative_branch(*src)) {
  43. unsigned int *target = (unsigned int *)branch_target(src);
  44. /* Branch within the section doesn't need translating */
  45. if (target < alt_start || target >= alt_end) {
  46. instr = translate_branch(dest, src);
  47. if (!instr)
  48. return 1;
  49. }
  50. }
  51. patch_instruction(dest, instr);
  52. return 0;
  53. }
  54. static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
  55. {
  56. unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
  57. start = calc_addr(fcur, fcur->start_off);
  58. end = calc_addr(fcur, fcur->end_off);
  59. alt_start = calc_addr(fcur, fcur->alt_start_off);
  60. alt_end = calc_addr(fcur, fcur->alt_end_off);
  61. if ((alt_end - alt_start) > (end - start))
  62. return 1;
  63. if ((value & fcur->mask) == fcur->value)
  64. return 0;
  65. src = alt_start;
  66. dest = start;
  67. for (; src < alt_end; src++, dest++) {
  68. if (patch_alt_instruction(src, dest, alt_start, alt_end))
  69. return 1;
  70. }
  71. for (; dest < end; dest++)
  72. patch_instruction(dest, PPC_INST_NOP);
  73. return 0;
  74. }
  75. void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  76. {
  77. struct fixup_entry *fcur, *fend;
  78. fcur = fixup_start;
  79. fend = fixup_end;
  80. for (; fcur < fend; fcur++) {
  81. if (patch_feature_section(value, fcur)) {
  82. WARN_ON(1);
  83. printk("Unable to patch feature section at %p - %p" \
  84. " with %p - %p\n",
  85. calc_addr(fcur, fcur->start_off),
  86. calc_addr(fcur, fcur->end_off),
  87. calc_addr(fcur, fcur->alt_start_off),
  88. calc_addr(fcur, fcur->alt_end_off));
  89. }
  90. }
  91. }
  92. void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  93. {
  94. long *start, *end;
  95. unsigned int *dest;
  96. if (!(value & CPU_FTR_LWSYNC))
  97. return ;
  98. start = fixup_start;
  99. end = fixup_end;
  100. for (; start < end; start++) {
  101. dest = (void *)start + *start;
  102. patch_instruction(dest, PPC_INST_LWSYNC);
  103. }
  104. }
  105. #ifdef CONFIG_FTR_FIXUP_SELFTEST
  106. #define check(x) \
  107. if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
  108. /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
  109. static struct fixup_entry fixup;
  110. static long calc_offset(struct fixup_entry *entry, unsigned int *p)
  111. {
  112. return (unsigned long)p - (unsigned long)entry;
  113. }
  114. void test_basic_patching(void)
  115. {
  116. extern unsigned int ftr_fixup_test1;
  117. extern unsigned int end_ftr_fixup_test1;
  118. extern unsigned int ftr_fixup_test1_orig;
  119. extern unsigned int ftr_fixup_test1_expected;
  120. int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
  121. fixup.value = fixup.mask = 8;
  122. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
  123. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
  124. fixup.alt_start_off = fixup.alt_end_off = 0;
  125. /* Sanity check */
  126. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  127. /* Check we don't patch if the value matches */
  128. patch_feature_section(8, &fixup);
  129. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  130. /* Check we do patch if the value doesn't match */
  131. patch_feature_section(0, &fixup);
  132. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  133. /* Check we do patch if the mask doesn't match */
  134. memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
  135. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  136. patch_feature_section(~8, &fixup);
  137. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  138. }
  139. static void test_alternative_patching(void)
  140. {
  141. extern unsigned int ftr_fixup_test2;
  142. extern unsigned int end_ftr_fixup_test2;
  143. extern unsigned int ftr_fixup_test2_orig;
  144. extern unsigned int ftr_fixup_test2_alt;
  145. extern unsigned int ftr_fixup_test2_expected;
  146. int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
  147. fixup.value = fixup.mask = 0xF;
  148. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
  149. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
  150. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
  151. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
  152. /* Sanity check */
  153. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  154. /* Check we don't patch if the value matches */
  155. patch_feature_section(0xF, &fixup);
  156. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  157. /* Check we do patch if the value doesn't match */
  158. patch_feature_section(0, &fixup);
  159. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  160. /* Check we do patch if the mask doesn't match */
  161. memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
  162. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  163. patch_feature_section(~0xF, &fixup);
  164. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  165. }
  166. static void test_alternative_case_too_big(void)
  167. {
  168. extern unsigned int ftr_fixup_test3;
  169. extern unsigned int end_ftr_fixup_test3;
  170. extern unsigned int ftr_fixup_test3_orig;
  171. extern unsigned int ftr_fixup_test3_alt;
  172. int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
  173. fixup.value = fixup.mask = 0xC;
  174. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
  175. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
  176. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
  177. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
  178. /* Sanity check */
  179. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  180. /* Expect nothing to be patched, and the error returned to us */
  181. check(patch_feature_section(0xF, &fixup) == 1);
  182. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  183. check(patch_feature_section(0, &fixup) == 1);
  184. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  185. check(patch_feature_section(~0xF, &fixup) == 1);
  186. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  187. }
  188. static void test_alternative_case_too_small(void)
  189. {
  190. extern unsigned int ftr_fixup_test4;
  191. extern unsigned int end_ftr_fixup_test4;
  192. extern unsigned int ftr_fixup_test4_orig;
  193. extern unsigned int ftr_fixup_test4_alt;
  194. extern unsigned int ftr_fixup_test4_expected;
  195. int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
  196. unsigned long flag;
  197. /* Check a high-bit flag */
  198. flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
  199. fixup.value = fixup.mask = flag;
  200. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
  201. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
  202. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
  203. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
  204. /* Sanity check */
  205. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  206. /* Check we don't patch if the value matches */
  207. patch_feature_section(flag, &fixup);
  208. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  209. /* Check we do patch if the value doesn't match */
  210. patch_feature_section(0, &fixup);
  211. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  212. /* Check we do patch if the mask doesn't match */
  213. memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
  214. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  215. patch_feature_section(~flag, &fixup);
  216. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  217. }
  218. static void test_alternative_case_with_branch(void)
  219. {
  220. extern unsigned int ftr_fixup_test5;
  221. extern unsigned int end_ftr_fixup_test5;
  222. extern unsigned int ftr_fixup_test5_expected;
  223. int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
  224. check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
  225. }
  226. static void test_alternative_case_with_external_branch(void)
  227. {
  228. extern unsigned int ftr_fixup_test6;
  229. extern unsigned int end_ftr_fixup_test6;
  230. extern unsigned int ftr_fixup_test6_expected;
  231. int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
  232. check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
  233. }
  234. static void test_cpu_macros(void)
  235. {
  236. extern u8 ftr_fixup_test_FTR_macros;
  237. extern u8 ftr_fixup_test_FTR_macros_expected;
  238. unsigned long size = &ftr_fixup_test_FTR_macros_expected -
  239. &ftr_fixup_test_FTR_macros;
  240. /* The fixups have already been done for us during boot */
  241. check(memcmp(&ftr_fixup_test_FTR_macros,
  242. &ftr_fixup_test_FTR_macros_expected, size) == 0);
  243. }
  244. static void test_fw_macros(void)
  245. {
  246. #ifdef CONFIG_PPC64
  247. extern u8 ftr_fixup_test_FW_FTR_macros;
  248. extern u8 ftr_fixup_test_FW_FTR_macros_expected;
  249. unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
  250. &ftr_fixup_test_FW_FTR_macros;
  251. /* The fixups have already been done for us during boot */
  252. check(memcmp(&ftr_fixup_test_FW_FTR_macros,
  253. &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
  254. #endif
  255. }
  256. static void test_lwsync_macros(void)
  257. {
  258. extern u8 lwsync_fixup_test;
  259. extern u8 end_lwsync_fixup_test;
  260. extern u8 lwsync_fixup_test_expected_LWSYNC;
  261. extern u8 lwsync_fixup_test_expected_SYNC;
  262. unsigned long size = &end_lwsync_fixup_test -
  263. &lwsync_fixup_test;
  264. /* The fixups have already been done for us during boot */
  265. if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
  266. check(memcmp(&lwsync_fixup_test,
  267. &lwsync_fixup_test_expected_LWSYNC, size) == 0);
  268. } else {
  269. check(memcmp(&lwsync_fixup_test,
  270. &lwsync_fixup_test_expected_SYNC, size) == 0);
  271. }
  272. }
  273. static int __init test_feature_fixups(void)
  274. {
  275. printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
  276. test_basic_patching();
  277. test_alternative_patching();
  278. test_alternative_case_too_big();
  279. test_alternative_case_too_small();
  280. test_alternative_case_with_branch();
  281. test_alternative_case_with_external_branch();
  282. test_cpu_macros();
  283. test_fw_macros();
  284. test_lwsync_macros();
  285. return 0;
  286. }
  287. late_initcall(test_feature_fixups);
  288. #endif /* CONFIG_FTR_FIXUP_SELFTEST */