powerpc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19. */
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/hrtimer.h>
  25. #include <linux/fs.h>
  26. #include <linux/slab.h>
  27. #include <asm/cputable.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/kvm_ppc.h>
  30. #include <asm/tlbflush.h>
  31. #include <asm/cputhreads.h>
  32. #include "timing.h"
  33. #include "../mm/mmu_decl.h"
  34. #define CREATE_TRACE_POINTS
  35. #include "trace.h"
  36. int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  37. {
  38. return !(v->arch.shared->msr & MSR_WE) ||
  39. !!(v->arch.pending_exceptions) ||
  40. v->requests;
  41. }
  42. int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
  43. {
  44. int nr = kvmppc_get_gpr(vcpu, 11);
  45. int r;
  46. unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
  47. unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
  48. unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
  49. unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
  50. unsigned long r2 = 0;
  51. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  52. /* 32 bit mode */
  53. param1 &= 0xffffffff;
  54. param2 &= 0xffffffff;
  55. param3 &= 0xffffffff;
  56. param4 &= 0xffffffff;
  57. }
  58. switch (nr) {
  59. case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
  60. {
  61. vcpu->arch.magic_page_pa = param1;
  62. vcpu->arch.magic_page_ea = param2;
  63. r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
  64. r = HC_EV_SUCCESS;
  65. break;
  66. }
  67. case HC_VENDOR_KVM | KVM_HC_FEATURES:
  68. r = HC_EV_SUCCESS;
  69. #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
  70. /* XXX Missing magic page on 44x */
  71. r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
  72. #endif
  73. /* Second return value is in r4 */
  74. break;
  75. default:
  76. r = HC_EV_UNIMPLEMENTED;
  77. break;
  78. }
  79. kvmppc_set_gpr(vcpu, 4, r2);
  80. return r;
  81. }
  82. int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
  83. {
  84. int r = false;
  85. /* We have to know what CPU to virtualize */
  86. if (!vcpu->arch.pvr)
  87. goto out;
  88. /* PAPR only works with book3s_64 */
  89. if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
  90. goto out;
  91. #ifdef CONFIG_KVM_BOOK3S_64_HV
  92. /* HV KVM can only do PAPR mode for now */
  93. if (!vcpu->arch.papr_enabled)
  94. goto out;
  95. #endif
  96. r = true;
  97. out:
  98. vcpu->arch.sane = r;
  99. return r ? 0 : -EINVAL;
  100. }
  101. int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
  102. {
  103. enum emulation_result er;
  104. int r;
  105. er = kvmppc_emulate_instruction(run, vcpu);
  106. switch (er) {
  107. case EMULATE_DONE:
  108. /* Future optimization: only reload non-volatiles if they were
  109. * actually modified. */
  110. r = RESUME_GUEST_NV;
  111. break;
  112. case EMULATE_DO_MMIO:
  113. run->exit_reason = KVM_EXIT_MMIO;
  114. /* We must reload nonvolatiles because "update" load/store
  115. * instructions modify register state. */
  116. /* Future optimization: only reload non-volatiles if they were
  117. * actually modified. */
  118. r = RESUME_HOST_NV;
  119. break;
  120. case EMULATE_FAIL:
  121. /* XXX Deliver Program interrupt to guest. */
  122. printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
  123. kvmppc_get_last_inst(vcpu));
  124. r = RESUME_HOST;
  125. break;
  126. default:
  127. BUG();
  128. }
  129. return r;
  130. }
  131. int kvm_arch_hardware_enable(void *garbage)
  132. {
  133. return 0;
  134. }
  135. void kvm_arch_hardware_disable(void *garbage)
  136. {
  137. }
  138. int kvm_arch_hardware_setup(void)
  139. {
  140. return 0;
  141. }
  142. void kvm_arch_hardware_unsetup(void)
  143. {
  144. }
  145. void kvm_arch_check_processor_compat(void *rtn)
  146. {
  147. *(int *)rtn = kvmppc_core_check_processor_compat();
  148. }
  149. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  150. {
  151. if (type)
  152. return -EINVAL;
  153. return kvmppc_core_init_vm(kvm);
  154. }
  155. void kvm_arch_destroy_vm(struct kvm *kvm)
  156. {
  157. unsigned int i;
  158. struct kvm_vcpu *vcpu;
  159. kvm_for_each_vcpu(i, vcpu, kvm)
  160. kvm_arch_vcpu_free(vcpu);
  161. mutex_lock(&kvm->lock);
  162. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  163. kvm->vcpus[i] = NULL;
  164. atomic_set(&kvm->online_vcpus, 0);
  165. kvmppc_core_destroy_vm(kvm);
  166. mutex_unlock(&kvm->lock);
  167. }
  168. void kvm_arch_sync_events(struct kvm *kvm)
  169. {
  170. }
  171. int kvm_dev_ioctl_check_extension(long ext)
  172. {
  173. int r;
  174. switch (ext) {
  175. #ifdef CONFIG_BOOKE
  176. case KVM_CAP_PPC_BOOKE_SREGS:
  177. #else
  178. case KVM_CAP_PPC_SEGSTATE:
  179. case KVM_CAP_PPC_HIOR:
  180. case KVM_CAP_PPC_PAPR:
  181. #endif
  182. case KVM_CAP_PPC_UNSET_IRQ:
  183. case KVM_CAP_PPC_IRQ_LEVEL:
  184. case KVM_CAP_ENABLE_CAP:
  185. case KVM_CAP_ONE_REG:
  186. r = 1;
  187. break;
  188. #ifndef CONFIG_KVM_BOOK3S_64_HV
  189. case KVM_CAP_PPC_PAIRED_SINGLES:
  190. case KVM_CAP_PPC_OSI:
  191. case KVM_CAP_PPC_GET_PVINFO:
  192. #ifdef CONFIG_KVM_E500
  193. case KVM_CAP_SW_TLB:
  194. #endif
  195. r = 1;
  196. break;
  197. case KVM_CAP_COALESCED_MMIO:
  198. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  199. break;
  200. #endif
  201. #ifdef CONFIG_KVM_BOOK3S_64_HV
  202. case KVM_CAP_SPAPR_TCE:
  203. r = 1;
  204. break;
  205. case KVM_CAP_PPC_SMT:
  206. r = threads_per_core;
  207. break;
  208. case KVM_CAP_PPC_RMA:
  209. r = 1;
  210. /* PPC970 requires an RMA */
  211. if (cpu_has_feature(CPU_FTR_ARCH_201))
  212. r = 2;
  213. break;
  214. case KVM_CAP_SYNC_MMU:
  215. r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
  216. break;
  217. #endif
  218. case KVM_CAP_NR_VCPUS:
  219. /*
  220. * Recommending a number of CPUs is somewhat arbitrary; we
  221. * return the number of present CPUs for -HV (since a host
  222. * will have secondary threads "offline"), and for other KVM
  223. * implementations just count online CPUs.
  224. */
  225. #ifdef CONFIG_KVM_BOOK3S_64_HV
  226. r = num_present_cpus();
  227. #else
  228. r = num_online_cpus();
  229. #endif
  230. break;
  231. case KVM_CAP_MAX_VCPUS:
  232. r = KVM_MAX_VCPUS;
  233. break;
  234. default:
  235. r = 0;
  236. break;
  237. }
  238. return r;
  239. }
  240. long kvm_arch_dev_ioctl(struct file *filp,
  241. unsigned int ioctl, unsigned long arg)
  242. {
  243. return -EINVAL;
  244. }
  245. void kvm_arch_free_memslot(struct kvm_memory_slot *free,
  246. struct kvm_memory_slot *dont)
  247. {
  248. }
  249. int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
  250. {
  251. return 0;
  252. }
  253. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  254. struct kvm_memory_slot *memslot,
  255. struct kvm_memory_slot old,
  256. struct kvm_userspace_memory_region *mem,
  257. int user_alloc)
  258. {
  259. return kvmppc_core_prepare_memory_region(kvm, mem);
  260. }
  261. void kvm_arch_commit_memory_region(struct kvm *kvm,
  262. struct kvm_userspace_memory_region *mem,
  263. struct kvm_memory_slot old,
  264. int user_alloc)
  265. {
  266. kvmppc_core_commit_memory_region(kvm, mem);
  267. }
  268. void kvm_arch_flush_shadow(struct kvm *kvm)
  269. {
  270. }
  271. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  272. {
  273. struct kvm_vcpu *vcpu;
  274. vcpu = kvmppc_core_vcpu_create(kvm, id);
  275. if (!IS_ERR(vcpu)) {
  276. vcpu->arch.wqp = &vcpu->wq;
  277. kvmppc_create_vcpu_debugfs(vcpu, id);
  278. }
  279. return vcpu;
  280. }
  281. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  282. {
  283. /* Make sure we're not using the vcpu anymore */
  284. hrtimer_cancel(&vcpu->arch.dec_timer);
  285. tasklet_kill(&vcpu->arch.tasklet);
  286. kvmppc_remove_vcpu_debugfs(vcpu);
  287. kvmppc_core_vcpu_free(vcpu);
  288. }
  289. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  290. {
  291. kvm_arch_vcpu_free(vcpu);
  292. }
  293. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  294. {
  295. return kvmppc_core_pending_dec(vcpu);
  296. }
  297. /*
  298. * low level hrtimer wake routine. Because this runs in hardirq context
  299. * we schedule a tasklet to do the real work.
  300. */
  301. enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
  302. {
  303. struct kvm_vcpu *vcpu;
  304. vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
  305. tasklet_schedule(&vcpu->arch.tasklet);
  306. return HRTIMER_NORESTART;
  307. }
  308. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  309. {
  310. hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
  311. tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
  312. vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
  313. vcpu->arch.dec_expires = ~(u64)0;
  314. #ifdef CONFIG_KVM_EXIT_TIMING
  315. mutex_init(&vcpu->arch.exit_timing_lock);
  316. #endif
  317. return 0;
  318. }
  319. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  320. {
  321. kvmppc_mmu_destroy(vcpu);
  322. }
  323. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  324. {
  325. #ifdef CONFIG_BOOKE
  326. /*
  327. * vrsave (formerly usprg0) isn't used by Linux, but may
  328. * be used by the guest.
  329. *
  330. * On non-booke this is associated with Altivec and
  331. * is handled by code in book3s.c.
  332. */
  333. mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
  334. #endif
  335. kvmppc_core_vcpu_load(vcpu, cpu);
  336. vcpu->cpu = smp_processor_id();
  337. }
  338. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  339. {
  340. kvmppc_core_vcpu_put(vcpu);
  341. #ifdef CONFIG_BOOKE
  342. vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
  343. #endif
  344. vcpu->cpu = -1;
  345. }
  346. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  347. struct kvm_guest_debug *dbg)
  348. {
  349. return -EINVAL;
  350. }
  351. static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
  352. struct kvm_run *run)
  353. {
  354. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
  355. }
  356. static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
  357. struct kvm_run *run)
  358. {
  359. u64 uninitialized_var(gpr);
  360. if (run->mmio.len > sizeof(gpr)) {
  361. printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
  362. return;
  363. }
  364. if (vcpu->arch.mmio_is_bigendian) {
  365. switch (run->mmio.len) {
  366. case 8: gpr = *(u64 *)run->mmio.data; break;
  367. case 4: gpr = *(u32 *)run->mmio.data; break;
  368. case 2: gpr = *(u16 *)run->mmio.data; break;
  369. case 1: gpr = *(u8 *)run->mmio.data; break;
  370. }
  371. } else {
  372. /* Convert BE data from userland back to LE. */
  373. switch (run->mmio.len) {
  374. case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
  375. case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
  376. case 1: gpr = *(u8 *)run->mmio.data; break;
  377. }
  378. }
  379. if (vcpu->arch.mmio_sign_extend) {
  380. switch (run->mmio.len) {
  381. #ifdef CONFIG_PPC64
  382. case 4:
  383. gpr = (s64)(s32)gpr;
  384. break;
  385. #endif
  386. case 2:
  387. gpr = (s64)(s16)gpr;
  388. break;
  389. case 1:
  390. gpr = (s64)(s8)gpr;
  391. break;
  392. }
  393. }
  394. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  395. switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
  396. case KVM_MMIO_REG_GPR:
  397. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  398. break;
  399. case KVM_MMIO_REG_FPR:
  400. vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  401. break;
  402. #ifdef CONFIG_PPC_BOOK3S
  403. case KVM_MMIO_REG_QPR:
  404. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  405. break;
  406. case KVM_MMIO_REG_FQPR:
  407. vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  408. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  409. break;
  410. #endif
  411. default:
  412. BUG();
  413. }
  414. }
  415. int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  416. unsigned int rt, unsigned int bytes, int is_bigendian)
  417. {
  418. if (bytes > sizeof(run->mmio.data)) {
  419. printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
  420. run->mmio.len);
  421. }
  422. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  423. run->mmio.len = bytes;
  424. run->mmio.is_write = 0;
  425. vcpu->arch.io_gpr = rt;
  426. vcpu->arch.mmio_is_bigendian = is_bigendian;
  427. vcpu->mmio_needed = 1;
  428. vcpu->mmio_is_write = 0;
  429. vcpu->arch.mmio_sign_extend = 0;
  430. return EMULATE_DO_MMIO;
  431. }
  432. /* Same as above, but sign extends */
  433. int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
  434. unsigned int rt, unsigned int bytes, int is_bigendian)
  435. {
  436. int r;
  437. r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
  438. vcpu->arch.mmio_sign_extend = 1;
  439. return r;
  440. }
  441. int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  442. u64 val, unsigned int bytes, int is_bigendian)
  443. {
  444. void *data = run->mmio.data;
  445. if (bytes > sizeof(run->mmio.data)) {
  446. printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
  447. run->mmio.len);
  448. }
  449. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  450. run->mmio.len = bytes;
  451. run->mmio.is_write = 1;
  452. vcpu->mmio_needed = 1;
  453. vcpu->mmio_is_write = 1;
  454. /* Store the value at the lowest bytes in 'data'. */
  455. if (is_bigendian) {
  456. switch (bytes) {
  457. case 8: *(u64 *)data = val; break;
  458. case 4: *(u32 *)data = val; break;
  459. case 2: *(u16 *)data = val; break;
  460. case 1: *(u8 *)data = val; break;
  461. }
  462. } else {
  463. /* Store LE value into 'data'. */
  464. switch (bytes) {
  465. case 4: st_le32(data, val); break;
  466. case 2: st_le16(data, val); break;
  467. case 1: *(u8 *)data = val; break;
  468. }
  469. }
  470. return EMULATE_DO_MMIO;
  471. }
  472. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  473. {
  474. int r;
  475. sigset_t sigsaved;
  476. if (vcpu->sigset_active)
  477. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  478. if (vcpu->mmio_needed) {
  479. if (!vcpu->mmio_is_write)
  480. kvmppc_complete_mmio_load(vcpu, run);
  481. vcpu->mmio_needed = 0;
  482. } else if (vcpu->arch.dcr_needed) {
  483. if (!vcpu->arch.dcr_is_write)
  484. kvmppc_complete_dcr_load(vcpu, run);
  485. vcpu->arch.dcr_needed = 0;
  486. } else if (vcpu->arch.osi_needed) {
  487. u64 *gprs = run->osi.gprs;
  488. int i;
  489. for (i = 0; i < 32; i++)
  490. kvmppc_set_gpr(vcpu, i, gprs[i]);
  491. vcpu->arch.osi_needed = 0;
  492. } else if (vcpu->arch.hcall_needed) {
  493. int i;
  494. kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
  495. for (i = 0; i < 9; ++i)
  496. kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
  497. vcpu->arch.hcall_needed = 0;
  498. }
  499. r = kvmppc_vcpu_run(run, vcpu);
  500. if (vcpu->sigset_active)
  501. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  502. return r;
  503. }
  504. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  505. {
  506. int me;
  507. int cpu = vcpu->cpu;
  508. me = get_cpu();
  509. if (waitqueue_active(vcpu->arch.wqp)) {
  510. wake_up_interruptible(vcpu->arch.wqp);
  511. vcpu->stat.halt_wakeup++;
  512. } else if (cpu != me && cpu != -1) {
  513. smp_send_reschedule(vcpu->cpu);
  514. }
  515. put_cpu();
  516. }
  517. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
  518. {
  519. if (irq->irq == KVM_INTERRUPT_UNSET) {
  520. kvmppc_core_dequeue_external(vcpu, irq);
  521. return 0;
  522. }
  523. kvmppc_core_queue_external(vcpu, irq);
  524. kvm_vcpu_kick(vcpu);
  525. return 0;
  526. }
  527. static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
  528. struct kvm_enable_cap *cap)
  529. {
  530. int r;
  531. if (cap->flags)
  532. return -EINVAL;
  533. switch (cap->cap) {
  534. case KVM_CAP_PPC_OSI:
  535. r = 0;
  536. vcpu->arch.osi_enabled = true;
  537. break;
  538. case KVM_CAP_PPC_PAPR:
  539. r = 0;
  540. vcpu->arch.papr_enabled = true;
  541. break;
  542. #ifdef CONFIG_KVM_E500
  543. case KVM_CAP_SW_TLB: {
  544. struct kvm_config_tlb cfg;
  545. void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
  546. r = -EFAULT;
  547. if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
  548. break;
  549. r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
  550. break;
  551. }
  552. #endif
  553. default:
  554. r = -EINVAL;
  555. break;
  556. }
  557. if (!r)
  558. r = kvmppc_sanity_check(vcpu);
  559. return r;
  560. }
  561. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  562. struct kvm_mp_state *mp_state)
  563. {
  564. return -EINVAL;
  565. }
  566. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  567. struct kvm_mp_state *mp_state)
  568. {
  569. return -EINVAL;
  570. }
  571. long kvm_arch_vcpu_ioctl(struct file *filp,
  572. unsigned int ioctl, unsigned long arg)
  573. {
  574. struct kvm_vcpu *vcpu = filp->private_data;
  575. void __user *argp = (void __user *)arg;
  576. long r;
  577. switch (ioctl) {
  578. case KVM_INTERRUPT: {
  579. struct kvm_interrupt irq;
  580. r = -EFAULT;
  581. if (copy_from_user(&irq, argp, sizeof(irq)))
  582. goto out;
  583. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  584. goto out;
  585. }
  586. case KVM_ENABLE_CAP:
  587. {
  588. struct kvm_enable_cap cap;
  589. r = -EFAULT;
  590. if (copy_from_user(&cap, argp, sizeof(cap)))
  591. goto out;
  592. r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
  593. break;
  594. }
  595. case KVM_SET_ONE_REG:
  596. case KVM_GET_ONE_REG:
  597. {
  598. struct kvm_one_reg reg;
  599. r = -EFAULT;
  600. if (copy_from_user(&reg, argp, sizeof(reg)))
  601. goto out;
  602. if (ioctl == KVM_SET_ONE_REG)
  603. r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
  604. else
  605. r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
  606. break;
  607. }
  608. #ifdef CONFIG_KVM_E500
  609. case KVM_DIRTY_TLB: {
  610. struct kvm_dirty_tlb dirty;
  611. r = -EFAULT;
  612. if (copy_from_user(&dirty, argp, sizeof(dirty)))
  613. goto out;
  614. r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
  615. break;
  616. }
  617. #endif
  618. default:
  619. r = -EINVAL;
  620. }
  621. out:
  622. return r;
  623. }
  624. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  625. {
  626. return VM_FAULT_SIGBUS;
  627. }
  628. static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
  629. {
  630. u32 inst_lis = 0x3c000000;
  631. u32 inst_ori = 0x60000000;
  632. u32 inst_nop = 0x60000000;
  633. u32 inst_sc = 0x44000002;
  634. u32 inst_imm_mask = 0xffff;
  635. /*
  636. * The hypercall to get into KVM from within guest context is as
  637. * follows:
  638. *
  639. * lis r0, r0, KVM_SC_MAGIC_R0@h
  640. * ori r0, KVM_SC_MAGIC_R0@l
  641. * sc
  642. * nop
  643. */
  644. pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
  645. pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
  646. pvinfo->hcall[2] = inst_sc;
  647. pvinfo->hcall[3] = inst_nop;
  648. return 0;
  649. }
  650. long kvm_arch_vm_ioctl(struct file *filp,
  651. unsigned int ioctl, unsigned long arg)
  652. {
  653. void __user *argp = (void __user *)arg;
  654. long r;
  655. switch (ioctl) {
  656. case KVM_PPC_GET_PVINFO: {
  657. struct kvm_ppc_pvinfo pvinfo;
  658. memset(&pvinfo, 0, sizeof(pvinfo));
  659. r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
  660. if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
  661. r = -EFAULT;
  662. goto out;
  663. }
  664. break;
  665. }
  666. #ifdef CONFIG_KVM_BOOK3S_64_HV
  667. case KVM_CREATE_SPAPR_TCE: {
  668. struct kvm_create_spapr_tce create_tce;
  669. struct kvm *kvm = filp->private_data;
  670. r = -EFAULT;
  671. if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
  672. goto out;
  673. r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
  674. goto out;
  675. }
  676. case KVM_ALLOCATE_RMA: {
  677. struct kvm *kvm = filp->private_data;
  678. struct kvm_allocate_rma rma;
  679. r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
  680. if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
  681. r = -EFAULT;
  682. break;
  683. }
  684. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  685. default:
  686. r = -ENOTTY;
  687. }
  688. out:
  689. return r;
  690. }
  691. int kvm_arch_init(void *opaque)
  692. {
  693. return 0;
  694. }
  695. void kvm_arch_exit(void)
  696. {
  697. }