feature-fixups.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /*
  2. * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  3. *
  4. * Modifications for ppc64:
  5. * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
  6. *
  7. * Copyright 2008 Michael Ellerman, IBM Corporation.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/jump_label.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/init.h>
  19. #include <asm/cputable.h>
  20. #include <asm/code-patching.h>
  21. #include <asm/page.h>
  22. #include <asm/sections.h>
  23. #include <asm/setup.h>
  24. #include <asm/security_features.h>
  25. #include <asm/firmware.h>
  26. #include <asm/setup.h>
  27. struct fixup_entry {
  28. unsigned long mask;
  29. unsigned long value;
  30. long start_off;
  31. long end_off;
  32. long alt_start_off;
  33. long alt_end_off;
  34. };
  35. static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
  36. {
  37. /*
  38. * We store the offset to the code as a negative offset from
  39. * the start of the alt_entry, to support the VDSO. This
  40. * routine converts that back into an actual address.
  41. */
  42. return (unsigned int *)((unsigned long)fcur + offset);
  43. }
  44. static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
  45. unsigned int *alt_start, unsigned int *alt_end)
  46. {
  47. unsigned int instr;
  48. instr = *src;
  49. if (instr_is_relative_branch(*src)) {
  50. unsigned int *target = (unsigned int *)branch_target(src);
  51. /* Branch within the section doesn't need translating */
  52. if (target < alt_start || target > alt_end) {
  53. instr = translate_branch(dest, src);
  54. if (!instr)
  55. return 1;
  56. }
  57. }
  58. patch_instruction(dest, instr);
  59. return 0;
  60. }
  61. static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
  62. {
  63. unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
  64. start = calc_addr(fcur, fcur->start_off);
  65. end = calc_addr(fcur, fcur->end_off);
  66. alt_start = calc_addr(fcur, fcur->alt_start_off);
  67. alt_end = calc_addr(fcur, fcur->alt_end_off);
  68. if ((alt_end - alt_start) > (end - start))
  69. return 1;
  70. if ((value & fcur->mask) == fcur->value)
  71. return 0;
  72. src = alt_start;
  73. dest = start;
  74. for (; src < alt_end; src++, dest++) {
  75. if (patch_alt_instruction(src, dest, alt_start, alt_end))
  76. return 1;
  77. }
  78. for (; dest < end; dest++)
  79. patch_instruction(dest, PPC_INST_NOP);
  80. return 0;
  81. }
  82. void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  83. {
  84. struct fixup_entry *fcur, *fend;
  85. fcur = fixup_start;
  86. fend = fixup_end;
  87. for (; fcur < fend; fcur++) {
  88. if (patch_feature_section(value, fcur)) {
  89. WARN_ON(1);
  90. printk("Unable to patch feature section at %p - %p" \
  91. " with %p - %p\n",
  92. calc_addr(fcur, fcur->start_off),
  93. calc_addr(fcur, fcur->end_off),
  94. calc_addr(fcur, fcur->alt_start_off),
  95. calc_addr(fcur, fcur->alt_end_off));
  96. }
  97. }
  98. }
  99. #ifdef CONFIG_PPC_BOOK3S_64
  100. void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
  101. {
  102. unsigned int instrs[3], *dest;
  103. long *start, *end;
  104. int i;
  105. start = PTRRELOC(&__start___stf_entry_barrier_fixup),
  106. end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
  107. instrs[0] = 0x60000000; /* nop */
  108. instrs[1] = 0x60000000; /* nop */
  109. instrs[2] = 0x60000000; /* nop */
  110. i = 0;
  111. if (types & STF_BARRIER_FALLBACK) {
  112. instrs[i++] = 0x7d4802a6; /* mflr r10 */
  113. instrs[i++] = 0x60000000; /* branch patched below */
  114. instrs[i++] = 0x7d4803a6; /* mtlr r10 */
  115. } else if (types & STF_BARRIER_EIEIO) {
  116. instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
  117. } else if (types & STF_BARRIER_SYNC_ORI) {
  118. instrs[i++] = 0x7c0004ac; /* hwsync */
  119. instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
  120. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  121. }
  122. for (i = 0; start < end; start++, i++) {
  123. dest = (void *)start + *start;
  124. pr_devel("patching dest %lx\n", (unsigned long)dest);
  125. patch_instruction(dest, instrs[0]);
  126. if (types & STF_BARRIER_FALLBACK)
  127. patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
  128. BRANCH_SET_LINK);
  129. else
  130. patch_instruction(dest + 1, instrs[1]);
  131. patch_instruction(dest + 2, instrs[2]);
  132. }
  133. printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
  134. (types == STF_BARRIER_NONE) ? "no" :
  135. (types == STF_BARRIER_FALLBACK) ? "fallback" :
  136. (types == STF_BARRIER_EIEIO) ? "eieio" :
  137. (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
  138. : "unknown");
  139. }
  140. void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
  141. {
  142. unsigned int instrs[6], *dest;
  143. long *start, *end;
  144. int i;
  145. start = PTRRELOC(&__start___stf_exit_barrier_fixup),
  146. end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
  147. instrs[0] = 0x60000000; /* nop */
  148. instrs[1] = 0x60000000; /* nop */
  149. instrs[2] = 0x60000000; /* nop */
  150. instrs[3] = 0x60000000; /* nop */
  151. instrs[4] = 0x60000000; /* nop */
  152. instrs[5] = 0x60000000; /* nop */
  153. i = 0;
  154. if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
  155. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  156. instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
  157. instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
  158. } else {
  159. instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
  160. instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
  161. }
  162. instrs[i++] = 0x7c0004ac; /* hwsync */
  163. instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
  164. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  165. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  166. instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
  167. } else {
  168. instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
  169. }
  170. } else if (types & STF_BARRIER_EIEIO) {
  171. instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
  172. }
  173. for (i = 0; start < end; start++, i++) {
  174. dest = (void *)start + *start;
  175. pr_devel("patching dest %lx\n", (unsigned long)dest);
  176. patch_instruction(dest, instrs[0]);
  177. patch_instruction(dest + 1, instrs[1]);
  178. patch_instruction(dest + 2, instrs[2]);
  179. patch_instruction(dest + 3, instrs[3]);
  180. patch_instruction(dest + 4, instrs[4]);
  181. patch_instruction(dest + 5, instrs[5]);
  182. }
  183. printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
  184. (types == STF_BARRIER_NONE) ? "no" :
  185. (types == STF_BARRIER_FALLBACK) ? "fallback" :
  186. (types == STF_BARRIER_EIEIO) ? "eieio" :
  187. (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
  188. : "unknown");
  189. }
  190. void do_stf_barrier_fixups(enum stf_barrier_type types)
  191. {
  192. do_stf_entry_barrier_fixups(types);
  193. do_stf_exit_barrier_fixups(types);
  194. }
  195. void do_rfi_flush_fixups(enum l1d_flush_type types)
  196. {
  197. unsigned int instrs[3], *dest;
  198. long *start, *end;
  199. int i;
  200. start = PTRRELOC(&__start___rfi_flush_fixup),
  201. end = PTRRELOC(&__stop___rfi_flush_fixup);
  202. instrs[0] = 0x60000000; /* nop */
  203. instrs[1] = 0x60000000; /* nop */
  204. instrs[2] = 0x60000000; /* nop */
  205. if (types & L1D_FLUSH_FALLBACK)
  206. /* b .+16 to fallback flush */
  207. instrs[0] = 0x48000010;
  208. i = 0;
  209. if (types & L1D_FLUSH_ORI) {
  210. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  211. instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
  212. }
  213. if (types & L1D_FLUSH_MTTRIG)
  214. instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
  215. for (i = 0; start < end; start++, i++) {
  216. dest = (void *)start + *start;
  217. pr_devel("patching dest %lx\n", (unsigned long)dest);
  218. patch_instruction(dest, instrs[0]);
  219. patch_instruction(dest + 1, instrs[1]);
  220. patch_instruction(dest + 2, instrs[2]);
  221. }
  222. printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
  223. (types == L1D_FLUSH_NONE) ? "no" :
  224. (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
  225. (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
  226. ? "ori+mttrig type"
  227. : "ori type" :
  228. (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
  229. : "unknown");
  230. }
  231. #endif /* CONFIG_PPC_BOOK3S_64 */
  232. void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  233. {
  234. long *start, *end;
  235. unsigned int *dest;
  236. if (!(value & CPU_FTR_LWSYNC))
  237. return ;
  238. start = fixup_start;
  239. end = fixup_end;
  240. for (; start < end; start++) {
  241. dest = (void *)start + *start;
  242. patch_instruction(dest, PPC_INST_LWSYNC);
  243. }
  244. }
  245. static void do_final_fixups(void)
  246. {
  247. #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
  248. int *src, *dest;
  249. unsigned long length;
  250. if (PHYSICAL_START == 0)
  251. return;
  252. src = (int *)(KERNELBASE + PHYSICAL_START);
  253. dest = (int *)KERNELBASE;
  254. length = (__end_interrupts - _stext) / sizeof(int);
  255. while (length--) {
  256. patch_instruction(dest, *src);
  257. src++;
  258. dest++;
  259. }
  260. #endif
  261. }
  262. static unsigned long __initdata saved_cpu_features;
  263. static unsigned int __initdata saved_mmu_features;
  264. #ifdef CONFIG_PPC64
  265. static unsigned long __initdata saved_firmware_features;
  266. #endif
  267. void __init apply_feature_fixups(void)
  268. {
  269. struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
  270. *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
  271. *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
  272. /*
  273. * Apply the CPU-specific and firmware specific fixups to kernel text
  274. * (nop out sections not relevant to this CPU or this firmware).
  275. */
  276. do_feature_fixups(spec->cpu_features,
  277. PTRRELOC(&__start___ftr_fixup),
  278. PTRRELOC(&__stop___ftr_fixup));
  279. do_feature_fixups(spec->mmu_features,
  280. PTRRELOC(&__start___mmu_ftr_fixup),
  281. PTRRELOC(&__stop___mmu_ftr_fixup));
  282. do_lwsync_fixups(spec->cpu_features,
  283. PTRRELOC(&__start___lwsync_fixup),
  284. PTRRELOC(&__stop___lwsync_fixup));
  285. #ifdef CONFIG_PPC64
  286. saved_firmware_features = powerpc_firmware_features;
  287. do_feature_fixups(powerpc_firmware_features,
  288. &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
  289. #endif
  290. do_final_fixups();
  291. }
  292. void __init setup_feature_keys(void)
  293. {
  294. /*
  295. * Initialise jump label. This causes all the cpu/mmu_has_feature()
  296. * checks to take on their correct polarity based on the current set of
  297. * CPU/MMU features.
  298. */
  299. jump_label_init();
  300. cpu_feature_keys_init();
  301. mmu_feature_keys_init();
  302. }
  303. static int __init check_features(void)
  304. {
  305. WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
  306. "CPU features changed after feature patching!\n");
  307. WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
  308. "MMU features changed after feature patching!\n");
  309. #ifdef CONFIG_PPC64
  310. WARN(saved_firmware_features != powerpc_firmware_features,
  311. "Firmware features changed after feature patching!\n");
  312. #endif
  313. return 0;
  314. }
  315. late_initcall(check_features);
  316. #ifdef CONFIG_FTR_FIXUP_SELFTEST
  317. #define check(x) \
  318. if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
  319. /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
  320. static struct fixup_entry fixup;
  321. static long calc_offset(struct fixup_entry *entry, unsigned int *p)
  322. {
  323. return (unsigned long)p - (unsigned long)entry;
  324. }
  325. static void test_basic_patching(void)
  326. {
  327. extern unsigned int ftr_fixup_test1;
  328. extern unsigned int end_ftr_fixup_test1;
  329. extern unsigned int ftr_fixup_test1_orig;
  330. extern unsigned int ftr_fixup_test1_expected;
  331. int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
  332. fixup.value = fixup.mask = 8;
  333. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
  334. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
  335. fixup.alt_start_off = fixup.alt_end_off = 0;
  336. /* Sanity check */
  337. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  338. /* Check we don't patch if the value matches */
  339. patch_feature_section(8, &fixup);
  340. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  341. /* Check we do patch if the value doesn't match */
  342. patch_feature_section(0, &fixup);
  343. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  344. /* Check we do patch if the mask doesn't match */
  345. memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
  346. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
  347. patch_feature_section(~8, &fixup);
  348. check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
  349. }
  350. static void test_alternative_patching(void)
  351. {
  352. extern unsigned int ftr_fixup_test2;
  353. extern unsigned int end_ftr_fixup_test2;
  354. extern unsigned int ftr_fixup_test2_orig;
  355. extern unsigned int ftr_fixup_test2_alt;
  356. extern unsigned int ftr_fixup_test2_expected;
  357. int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
  358. fixup.value = fixup.mask = 0xF;
  359. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
  360. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
  361. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
  362. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
  363. /* Sanity check */
  364. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  365. /* Check we don't patch if the value matches */
  366. patch_feature_section(0xF, &fixup);
  367. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  368. /* Check we do patch if the value doesn't match */
  369. patch_feature_section(0, &fixup);
  370. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  371. /* Check we do patch if the mask doesn't match */
  372. memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
  373. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
  374. patch_feature_section(~0xF, &fixup);
  375. check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
  376. }
  377. static void test_alternative_case_too_big(void)
  378. {
  379. extern unsigned int ftr_fixup_test3;
  380. extern unsigned int end_ftr_fixup_test3;
  381. extern unsigned int ftr_fixup_test3_orig;
  382. extern unsigned int ftr_fixup_test3_alt;
  383. int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
  384. fixup.value = fixup.mask = 0xC;
  385. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
  386. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
  387. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
  388. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
  389. /* Sanity check */
  390. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  391. /* Expect nothing to be patched, and the error returned to us */
  392. check(patch_feature_section(0xF, &fixup) == 1);
  393. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  394. check(patch_feature_section(0, &fixup) == 1);
  395. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  396. check(patch_feature_section(~0xF, &fixup) == 1);
  397. check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
  398. }
  399. static void test_alternative_case_too_small(void)
  400. {
  401. extern unsigned int ftr_fixup_test4;
  402. extern unsigned int end_ftr_fixup_test4;
  403. extern unsigned int ftr_fixup_test4_orig;
  404. extern unsigned int ftr_fixup_test4_alt;
  405. extern unsigned int ftr_fixup_test4_expected;
  406. int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
  407. unsigned long flag;
  408. /* Check a high-bit flag */
  409. flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
  410. fixup.value = fixup.mask = flag;
  411. fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
  412. fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
  413. fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
  414. fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
  415. /* Sanity check */
  416. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  417. /* Check we don't patch if the value matches */
  418. patch_feature_section(flag, &fixup);
  419. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  420. /* Check we do patch if the value doesn't match */
  421. patch_feature_section(0, &fixup);
  422. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  423. /* Check we do patch if the mask doesn't match */
  424. memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
  425. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
  426. patch_feature_section(~flag, &fixup);
  427. check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
  428. }
  429. static void test_alternative_case_with_branch(void)
  430. {
  431. extern unsigned int ftr_fixup_test5;
  432. extern unsigned int end_ftr_fixup_test5;
  433. extern unsigned int ftr_fixup_test5_expected;
  434. int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
  435. check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
  436. }
  437. static void test_alternative_case_with_external_branch(void)
  438. {
  439. extern unsigned int ftr_fixup_test6;
  440. extern unsigned int end_ftr_fixup_test6;
  441. extern unsigned int ftr_fixup_test6_expected;
  442. int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
  443. check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
  444. }
  445. static void test_cpu_macros(void)
  446. {
  447. extern u8 ftr_fixup_test_FTR_macros;
  448. extern u8 ftr_fixup_test_FTR_macros_expected;
  449. unsigned long size = &ftr_fixup_test_FTR_macros_expected -
  450. &ftr_fixup_test_FTR_macros;
  451. /* The fixups have already been done for us during boot */
  452. check(memcmp(&ftr_fixup_test_FTR_macros,
  453. &ftr_fixup_test_FTR_macros_expected, size) == 0);
  454. }
  455. static void test_fw_macros(void)
  456. {
  457. #ifdef CONFIG_PPC64
  458. extern u8 ftr_fixup_test_FW_FTR_macros;
  459. extern u8 ftr_fixup_test_FW_FTR_macros_expected;
  460. unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
  461. &ftr_fixup_test_FW_FTR_macros;
  462. /* The fixups have already been done for us during boot */
  463. check(memcmp(&ftr_fixup_test_FW_FTR_macros,
  464. &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
  465. #endif
  466. }
  467. static void test_lwsync_macros(void)
  468. {
  469. extern u8 lwsync_fixup_test;
  470. extern u8 end_lwsync_fixup_test;
  471. extern u8 lwsync_fixup_test_expected_LWSYNC;
  472. extern u8 lwsync_fixup_test_expected_SYNC;
  473. unsigned long size = &end_lwsync_fixup_test -
  474. &lwsync_fixup_test;
  475. /* The fixups have already been done for us during boot */
  476. if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
  477. check(memcmp(&lwsync_fixup_test,
  478. &lwsync_fixup_test_expected_LWSYNC, size) == 0);
  479. } else {
  480. check(memcmp(&lwsync_fixup_test,
  481. &lwsync_fixup_test_expected_SYNC, size) == 0);
  482. }
  483. }
  484. static int __init test_feature_fixups(void)
  485. {
  486. printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
  487. test_basic_patching();
  488. test_alternative_patching();
  489. test_alternative_case_too_big();
  490. test_alternative_case_too_small();
  491. test_alternative_case_with_branch();
  492. test_alternative_case_with_external_branch();
  493. test_cpu_macros();
  494. test_fw_macros();
  495. test_lwsync_macros();
  496. return 0;
  497. }
  498. late_initcall(test_feature_fixups);
  499. #endif /* CONFIG_FTR_FIXUP_SELFTEST */