kvm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  3. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  4. *
  5. * Authors:
  6. * Alexander Graf <agraf@suse.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License, version 2, as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/init.h>
  23. #include <linux/export.h>
  24. #include <linux/kvm_para.h>
  25. #include <linux/slab.h>
  26. #include <linux/of.h>
  27. #include <asm/reg.h>
  28. #include <asm/sections.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/disassemble.h>
  31. #include <asm/ppc-opcode.h>
  32. #define KVM_MAGIC_PAGE (-4096L)
  33. #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
  34. #define KVM_INST_LWZ 0x80000000
  35. #define KVM_INST_STW 0x90000000
  36. #define KVM_INST_LD 0xe8000000
  37. #define KVM_INST_STD 0xf8000000
  38. #define KVM_INST_NOP 0x60000000
  39. #define KVM_INST_B 0x48000000
  40. #define KVM_INST_B_MASK 0x03ffffff
  41. #define KVM_INST_B_MAX 0x01ffffff
  42. #define KVM_INST_LI 0x38000000
  43. #define KVM_MASK_RT 0x03e00000
  44. #define KVM_RT_30 0x03c00000
  45. #define KVM_MASK_RB 0x0000f800
  46. #define KVM_INST_MFMSR 0x7c0000a6
  47. #define SPR_FROM 0
  48. #define SPR_TO 0x100
  49. #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
  50. (((sprn) & 0x1f) << 16) | \
  51. (((sprn) & 0x3e0) << 6) | \
  52. (moveto))
  53. #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
  54. #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
  55. #define KVM_INST_TLBSYNC 0x7c00046c
  56. #define KVM_INST_MTMSRD_L0 0x7c000164
  57. #define KVM_INST_MTMSRD_L1 0x7c010164
  58. #define KVM_INST_MTMSR 0x7c000124
  59. #define KVM_INST_WRTEE 0x7c000106
  60. #define KVM_INST_WRTEEI_0 0x7c000146
  61. #define KVM_INST_WRTEEI_1 0x7c008146
  62. #define KVM_INST_MTSRIN 0x7c0001e4
  63. static bool kvm_patching_worked = true;
  64. static char kvm_tmp[1024 * 1024];
  65. static int kvm_tmp_index;
  66. static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
  67. {
  68. *inst = new_inst;
  69. flush_icache_range((ulong)inst, (ulong)inst + 4);
  70. }
  71. static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
  72. {
  73. #ifdef CONFIG_64BIT
  74. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  75. #else
  76. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
  77. #endif
  78. }
  79. static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
  80. {
  81. #ifdef CONFIG_64BIT
  82. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  83. #else
  84. kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
  85. #endif
  86. }
  87. static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
  88. {
  89. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
  90. }
  91. static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
  92. {
  93. #ifdef CONFIG_64BIT
  94. kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
  95. #else
  96. kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
  97. #endif
  98. }
  99. static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
  100. {
  101. kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
  102. }
  103. static void kvm_patch_ins_nop(u32 *inst)
  104. {
  105. kvm_patch_ins(inst, KVM_INST_NOP);
  106. }
  107. static void kvm_patch_ins_b(u32 *inst, int addr)
  108. {
  109. #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
  110. /* On relocatable kernels interrupts handlers and our code
  111. can be in different regions, so we don't patch them */
  112. if ((ulong)inst < (ulong)&__end_interrupts)
  113. return;
  114. #endif
  115. kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
  116. }
  117. static u32 *kvm_alloc(int len)
  118. {
  119. u32 *p;
  120. if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
  121. printk(KERN_ERR "KVM: No more space (%d + %d)\n",
  122. kvm_tmp_index, len);
  123. kvm_patching_worked = false;
  124. return NULL;
  125. }
  126. p = (void*)&kvm_tmp[kvm_tmp_index];
  127. kvm_tmp_index += len;
  128. return p;
  129. }
  130. extern u32 kvm_emulate_mtmsrd_branch_offs;
  131. extern u32 kvm_emulate_mtmsrd_reg_offs;
  132. extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
  133. extern u32 kvm_emulate_mtmsrd_len;
  134. extern u32 kvm_emulate_mtmsrd[];
  135. static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
  136. {
  137. u32 *p;
  138. int distance_start;
  139. int distance_end;
  140. ulong next_inst;
  141. p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
  142. if (!p)
  143. return;
  144. /* Find out where we are and put everything there */
  145. distance_start = (ulong)p - (ulong)inst;
  146. next_inst = ((ulong)inst + 4);
  147. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
  148. /* Make sure we only write valid b instructions */
  149. if (distance_start > KVM_INST_B_MAX) {
  150. kvm_patching_worked = false;
  151. return;
  152. }
  153. /* Modify the chunk to fit the invocation */
  154. memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
  155. p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
  156. switch (get_rt(rt)) {
  157. case 30:
  158. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  159. magic_var(scratch2), KVM_RT_30);
  160. break;
  161. case 31:
  162. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  163. magic_var(scratch1), KVM_RT_30);
  164. break;
  165. default:
  166. p[kvm_emulate_mtmsrd_reg_offs] |= rt;
  167. break;
  168. }
  169. p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
  170. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
  171. /* Patch the invocation */
  172. kvm_patch_ins_b(inst, distance_start);
  173. }
  174. extern u32 kvm_emulate_mtmsr_branch_offs;
  175. extern u32 kvm_emulate_mtmsr_reg1_offs;
  176. extern u32 kvm_emulate_mtmsr_reg2_offs;
  177. extern u32 kvm_emulate_mtmsr_orig_ins_offs;
  178. extern u32 kvm_emulate_mtmsr_len;
  179. extern u32 kvm_emulate_mtmsr[];
  180. static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
  181. {
  182. u32 *p;
  183. int distance_start;
  184. int distance_end;
  185. ulong next_inst;
  186. p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
  187. if (!p)
  188. return;
  189. /* Find out where we are and put everything there */
  190. distance_start = (ulong)p - (ulong)inst;
  191. next_inst = ((ulong)inst + 4);
  192. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
  193. /* Make sure we only write valid b instructions */
  194. if (distance_start > KVM_INST_B_MAX) {
  195. kvm_patching_worked = false;
  196. return;
  197. }
  198. /* Modify the chunk to fit the invocation */
  199. memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
  200. p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
  201. /* Make clobbered registers work too */
  202. switch (get_rt(rt)) {
  203. case 30:
  204. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  205. magic_var(scratch2), KVM_RT_30);
  206. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  207. magic_var(scratch2), KVM_RT_30);
  208. break;
  209. case 31:
  210. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  211. magic_var(scratch1), KVM_RT_30);
  212. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  213. magic_var(scratch1), KVM_RT_30);
  214. break;
  215. default:
  216. p[kvm_emulate_mtmsr_reg1_offs] |= rt;
  217. p[kvm_emulate_mtmsr_reg2_offs] |= rt;
  218. break;
  219. }
  220. p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
  221. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
  222. /* Patch the invocation */
  223. kvm_patch_ins_b(inst, distance_start);
  224. }
  225. #ifdef CONFIG_BOOKE
  226. extern u32 kvm_emulate_wrtee_branch_offs;
  227. extern u32 kvm_emulate_wrtee_reg_offs;
  228. extern u32 kvm_emulate_wrtee_orig_ins_offs;
  229. extern u32 kvm_emulate_wrtee_len;
  230. extern u32 kvm_emulate_wrtee[];
  231. static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
  232. {
  233. u32 *p;
  234. int distance_start;
  235. int distance_end;
  236. ulong next_inst;
  237. p = kvm_alloc(kvm_emulate_wrtee_len * 4);
  238. if (!p)
  239. return;
  240. /* Find out where we are and put everything there */
  241. distance_start = (ulong)p - (ulong)inst;
  242. next_inst = ((ulong)inst + 4);
  243. distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
  244. /* Make sure we only write valid b instructions */
  245. if (distance_start > KVM_INST_B_MAX) {
  246. kvm_patching_worked = false;
  247. return;
  248. }
  249. /* Modify the chunk to fit the invocation */
  250. memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
  251. p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
  252. if (imm_one) {
  253. p[kvm_emulate_wrtee_reg_offs] =
  254. KVM_INST_LI | __PPC_RT(30) | MSR_EE;
  255. } else {
  256. /* Make clobbered registers work too */
  257. switch (get_rt(rt)) {
  258. case 30:
  259. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  260. magic_var(scratch2), KVM_RT_30);
  261. break;
  262. case 31:
  263. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  264. magic_var(scratch1), KVM_RT_30);
  265. break;
  266. default:
  267. p[kvm_emulate_wrtee_reg_offs] |= rt;
  268. break;
  269. }
  270. }
  271. p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
  272. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
  273. /* Patch the invocation */
  274. kvm_patch_ins_b(inst, distance_start);
  275. }
  276. extern u32 kvm_emulate_wrteei_0_branch_offs;
  277. extern u32 kvm_emulate_wrteei_0_len;
  278. extern u32 kvm_emulate_wrteei_0[];
  279. static void kvm_patch_ins_wrteei_0(u32 *inst)
  280. {
  281. u32 *p;
  282. int distance_start;
  283. int distance_end;
  284. ulong next_inst;
  285. p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
  286. if (!p)
  287. return;
  288. /* Find out where we are and put everything there */
  289. distance_start = (ulong)p - (ulong)inst;
  290. next_inst = ((ulong)inst + 4);
  291. distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
  292. /* Make sure we only write valid b instructions */
  293. if (distance_start > KVM_INST_B_MAX) {
  294. kvm_patching_worked = false;
  295. return;
  296. }
  297. memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
  298. p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
  299. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
  300. /* Patch the invocation */
  301. kvm_patch_ins_b(inst, distance_start);
  302. }
  303. #endif
  304. #ifdef CONFIG_PPC_BOOK3S_32
  305. extern u32 kvm_emulate_mtsrin_branch_offs;
  306. extern u32 kvm_emulate_mtsrin_reg1_offs;
  307. extern u32 kvm_emulate_mtsrin_reg2_offs;
  308. extern u32 kvm_emulate_mtsrin_orig_ins_offs;
  309. extern u32 kvm_emulate_mtsrin_len;
  310. extern u32 kvm_emulate_mtsrin[];
  311. static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
  312. {
  313. u32 *p;
  314. int distance_start;
  315. int distance_end;
  316. ulong next_inst;
  317. p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
  318. if (!p)
  319. return;
  320. /* Find out where we are and put everything there */
  321. distance_start = (ulong)p - (ulong)inst;
  322. next_inst = ((ulong)inst + 4);
  323. distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
  324. /* Make sure we only write valid b instructions */
  325. if (distance_start > KVM_INST_B_MAX) {
  326. kvm_patching_worked = false;
  327. return;
  328. }
  329. /* Modify the chunk to fit the invocation */
  330. memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
  331. p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
  332. p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
  333. p[kvm_emulate_mtsrin_reg2_offs] |= rt;
  334. p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
  335. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
  336. /* Patch the invocation */
  337. kvm_patch_ins_b(inst, distance_start);
  338. }
  339. #endif
  340. static void kvm_map_magic_page(void *data)
  341. {
  342. u32 *features = data;
  343. ulong in[8];
  344. ulong out[8];
  345. in[0] = KVM_MAGIC_PAGE;
  346. in[1] = KVM_MAGIC_PAGE;
  347. kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
  348. *features = out[0];
  349. }
  350. static void kvm_check_ins(u32 *inst, u32 features)
  351. {
  352. u32 _inst = *inst;
  353. u32 inst_no_rt = _inst & ~KVM_MASK_RT;
  354. u32 inst_rt = _inst & KVM_MASK_RT;
  355. switch (inst_no_rt) {
  356. /* Loads */
  357. case KVM_INST_MFMSR:
  358. kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
  359. break;
  360. case KVM_INST_MFSPR(SPRN_SPRG0):
  361. kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
  362. break;
  363. case KVM_INST_MFSPR(SPRN_SPRG1):
  364. kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
  365. break;
  366. case KVM_INST_MFSPR(SPRN_SPRG2):
  367. kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
  368. break;
  369. case KVM_INST_MFSPR(SPRN_SPRG3):
  370. kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
  371. break;
  372. case KVM_INST_MFSPR(SPRN_SRR0):
  373. kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
  374. break;
  375. case KVM_INST_MFSPR(SPRN_SRR1):
  376. kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
  377. break;
  378. #ifdef CONFIG_BOOKE
  379. case KVM_INST_MFSPR(SPRN_DEAR):
  380. #else
  381. case KVM_INST_MFSPR(SPRN_DAR):
  382. #endif
  383. kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
  384. break;
  385. case KVM_INST_MFSPR(SPRN_DSISR):
  386. kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
  387. break;
  388. #ifdef CONFIG_PPC_BOOK3E_MMU
  389. case KVM_INST_MFSPR(SPRN_MAS0):
  390. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  391. kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
  392. break;
  393. case KVM_INST_MFSPR(SPRN_MAS1):
  394. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  395. kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
  396. break;
  397. case KVM_INST_MFSPR(SPRN_MAS2):
  398. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  399. kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
  400. break;
  401. case KVM_INST_MFSPR(SPRN_MAS3):
  402. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  403. kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
  404. break;
  405. case KVM_INST_MFSPR(SPRN_MAS4):
  406. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  407. kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
  408. break;
  409. case KVM_INST_MFSPR(SPRN_MAS6):
  410. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  411. kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
  412. break;
  413. case KVM_INST_MFSPR(SPRN_MAS7):
  414. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  415. kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
  416. break;
  417. #endif /* CONFIG_PPC_BOOK3E_MMU */
  418. case KVM_INST_MFSPR(SPRN_SPRG4):
  419. #ifdef CONFIG_BOOKE
  420. case KVM_INST_MFSPR(SPRN_SPRG4R):
  421. #endif
  422. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  423. kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
  424. break;
  425. case KVM_INST_MFSPR(SPRN_SPRG5):
  426. #ifdef CONFIG_BOOKE
  427. case KVM_INST_MFSPR(SPRN_SPRG5R):
  428. #endif
  429. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  430. kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
  431. break;
  432. case KVM_INST_MFSPR(SPRN_SPRG6):
  433. #ifdef CONFIG_BOOKE
  434. case KVM_INST_MFSPR(SPRN_SPRG6R):
  435. #endif
  436. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  437. kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
  438. break;
  439. case KVM_INST_MFSPR(SPRN_SPRG7):
  440. #ifdef CONFIG_BOOKE
  441. case KVM_INST_MFSPR(SPRN_SPRG7R):
  442. #endif
  443. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  444. kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
  445. break;
  446. #ifdef CONFIG_BOOKE
  447. case KVM_INST_MFSPR(SPRN_ESR):
  448. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  449. kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
  450. break;
  451. #endif
  452. case KVM_INST_MFSPR(SPRN_PIR):
  453. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  454. kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
  455. break;
  456. /* Stores */
  457. case KVM_INST_MTSPR(SPRN_SPRG0):
  458. kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
  459. break;
  460. case KVM_INST_MTSPR(SPRN_SPRG1):
  461. kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
  462. break;
  463. case KVM_INST_MTSPR(SPRN_SPRG2):
  464. kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
  465. break;
  466. case KVM_INST_MTSPR(SPRN_SPRG3):
  467. kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
  468. break;
  469. case KVM_INST_MTSPR(SPRN_SRR0):
  470. kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
  471. break;
  472. case KVM_INST_MTSPR(SPRN_SRR1):
  473. kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
  474. break;
  475. #ifdef CONFIG_BOOKE
  476. case KVM_INST_MTSPR(SPRN_DEAR):
  477. #else
  478. case KVM_INST_MTSPR(SPRN_DAR):
  479. #endif
  480. kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
  481. break;
  482. case KVM_INST_MTSPR(SPRN_DSISR):
  483. kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
  484. break;
  485. #ifdef CONFIG_PPC_BOOK3E_MMU
  486. case KVM_INST_MTSPR(SPRN_MAS0):
  487. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  488. kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
  489. break;
  490. case KVM_INST_MTSPR(SPRN_MAS1):
  491. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  492. kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
  493. break;
  494. case KVM_INST_MTSPR(SPRN_MAS2):
  495. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  496. kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
  497. break;
  498. case KVM_INST_MTSPR(SPRN_MAS3):
  499. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  500. kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
  501. break;
  502. case KVM_INST_MTSPR(SPRN_MAS4):
  503. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  504. kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
  505. break;
  506. case KVM_INST_MTSPR(SPRN_MAS6):
  507. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  508. kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
  509. break;
  510. case KVM_INST_MTSPR(SPRN_MAS7):
  511. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  512. kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
  513. break;
  514. #endif /* CONFIG_PPC_BOOK3E_MMU */
  515. case KVM_INST_MTSPR(SPRN_SPRG4):
  516. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  517. kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
  518. break;
  519. case KVM_INST_MTSPR(SPRN_SPRG5):
  520. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  521. kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
  522. break;
  523. case KVM_INST_MTSPR(SPRN_SPRG6):
  524. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  525. kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
  526. break;
  527. case KVM_INST_MTSPR(SPRN_SPRG7):
  528. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  529. kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
  530. break;
  531. #ifdef CONFIG_BOOKE
  532. case KVM_INST_MTSPR(SPRN_ESR):
  533. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  534. kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
  535. break;
  536. #endif
  537. /* Nops */
  538. case KVM_INST_TLBSYNC:
  539. kvm_patch_ins_nop(inst);
  540. break;
  541. /* Rewrites */
  542. case KVM_INST_MTMSRD_L1:
  543. kvm_patch_ins_mtmsrd(inst, inst_rt);
  544. break;
  545. case KVM_INST_MTMSR:
  546. case KVM_INST_MTMSRD_L0:
  547. kvm_patch_ins_mtmsr(inst, inst_rt);
  548. break;
  549. #ifdef CONFIG_BOOKE
  550. case KVM_INST_WRTEE:
  551. kvm_patch_ins_wrtee(inst, inst_rt, 0);
  552. break;
  553. #endif
  554. }
  555. switch (inst_no_rt & ~KVM_MASK_RB) {
  556. #ifdef CONFIG_PPC_BOOK3S_32
  557. case KVM_INST_MTSRIN:
  558. if (features & KVM_MAGIC_FEAT_SR) {
  559. u32 inst_rb = _inst & KVM_MASK_RB;
  560. kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
  561. }
  562. break;
  563. break;
  564. #endif
  565. }
  566. switch (_inst) {
  567. #ifdef CONFIG_BOOKE
  568. case KVM_INST_WRTEEI_0:
  569. kvm_patch_ins_wrteei_0(inst);
  570. break;
  571. case KVM_INST_WRTEEI_1:
  572. kvm_patch_ins_wrtee(inst, 0, 1);
  573. break;
  574. #endif
  575. }
  576. }
  577. extern u32 kvm_template_start[];
  578. extern u32 kvm_template_end[];
  579. static void kvm_use_magic_page(void)
  580. {
  581. u32 *p;
  582. u32 *start, *end;
  583. u32 tmp;
  584. u32 features;
  585. /* Tell the host to map the magic page to -4096 on all CPUs */
  586. on_each_cpu(kvm_map_magic_page, &features, 1);
  587. /* Quick self-test to see if the mapping works */
  588. if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
  589. kvm_patching_worked = false;
  590. return;
  591. }
  592. /* Now loop through all code and find instructions */
  593. start = (void*)_stext;
  594. end = (void*)_etext;
  595. /*
  596. * Being interrupted in the middle of patching would
  597. * be bad for SPRG4-7, which KVM can't keep in sync
  598. * with emulated accesses because reads don't trap.
  599. */
  600. local_irq_disable();
  601. for (p = start; p < end; p++) {
  602. /* Avoid patching the template code */
  603. if (p >= kvm_template_start && p < kvm_template_end) {
  604. p = kvm_template_end - 1;
  605. continue;
  606. }
  607. kvm_check_ins(p, features);
  608. }
  609. local_irq_enable();
  610. printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
  611. kvm_patching_worked ? "worked" : "failed");
  612. }
  613. unsigned long kvm_hypercall(unsigned long *in,
  614. unsigned long *out,
  615. unsigned long nr)
  616. {
  617. unsigned long register r0 asm("r0");
  618. unsigned long register r3 asm("r3") = in[0];
  619. unsigned long register r4 asm("r4") = in[1];
  620. unsigned long register r5 asm("r5") = in[2];
  621. unsigned long register r6 asm("r6") = in[3];
  622. unsigned long register r7 asm("r7") = in[4];
  623. unsigned long register r8 asm("r8") = in[5];
  624. unsigned long register r9 asm("r9") = in[6];
  625. unsigned long register r10 asm("r10") = in[7];
  626. unsigned long register r11 asm("r11") = nr;
  627. unsigned long register r12 asm("r12");
  628. asm volatile("bl kvm_hypercall_start"
  629. : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
  630. "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
  631. "=r"(r12)
  632. : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
  633. "r"(r9), "r"(r10), "r"(r11)
  634. : "memory", "cc", "xer", "ctr", "lr");
  635. out[0] = r4;
  636. out[1] = r5;
  637. out[2] = r6;
  638. out[3] = r7;
  639. out[4] = r8;
  640. out[5] = r9;
  641. out[6] = r10;
  642. out[7] = r11;
  643. return r3;
  644. }
  645. EXPORT_SYMBOL_GPL(kvm_hypercall);
  646. static int kvm_para_setup(void)
  647. {
  648. extern u32 kvm_hypercall_start;
  649. struct device_node *hyper_node;
  650. u32 *insts;
  651. int len, i;
  652. hyper_node = of_find_node_by_path("/hypervisor");
  653. if (!hyper_node)
  654. return -1;
  655. insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
  656. if (len % 4)
  657. return -1;
  658. if (len > (4 * 4))
  659. return -1;
  660. for (i = 0; i < (len / 4); i++)
  661. kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
  662. return 0;
  663. }
  664. static __init void kvm_free_tmp(void)
  665. {
  666. unsigned long start, end;
  667. start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
  668. end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
  669. /* Free the tmp space we don't need */
  670. for (; start < end; start += PAGE_SIZE) {
  671. ClearPageReserved(virt_to_page(start));
  672. init_page_count(virt_to_page(start));
  673. free_page(start);
  674. totalram_pages++;
  675. }
  676. }
  677. static int __init kvm_guest_init(void)
  678. {
  679. if (!kvm_para_available())
  680. goto free_tmp;
  681. if (kvm_para_setup())
  682. goto free_tmp;
  683. if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
  684. kvm_use_magic_page();
  685. #ifdef CONFIG_PPC_BOOK3S_64
  686. /* Enable napping */
  687. powersave_nap = 1;
  688. #endif
  689. free_tmp:
  690. kvm_free_tmp();
  691. return 0;
  692. }
  693. postcore_initcall(kvm_guest_init);