booke.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19. */
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/gfp.h>
  24. #include <linux/module.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/fs.h>
  27. #include <asm/cputable.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/kvm_ppc.h>
  30. #include "timing.h"
  31. #include <asm/cacheflush.h>
  32. #include "booke.h"
  33. unsigned long kvmppc_booke_handlers;
  34. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  35. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  36. struct kvm_stats_debugfs_item debugfs_entries[] = {
  37. { "mmio", VCPU_STAT(mmio_exits) },
  38. { "dcr", VCPU_STAT(dcr_exits) },
  39. { "sig", VCPU_STAT(signal_exits) },
  40. { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
  41. { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
  42. { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
  43. { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
  44. { "sysc", VCPU_STAT(syscall_exits) },
  45. { "isi", VCPU_STAT(isi_exits) },
  46. { "dsi", VCPU_STAT(dsi_exits) },
  47. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  48. { "dec", VCPU_STAT(dec_exits) },
  49. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  50. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  51. { NULL }
  52. };
  53. /* TODO: use vcpu_printf() */
  54. void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
  55. {
  56. int i;
  57. printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
  58. printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
  59. printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
  60. vcpu->arch.shared->srr1);
  61. printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
  62. for (i = 0; i < 32; i += 4) {
  63. printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
  64. kvmppc_get_gpr(vcpu, i),
  65. kvmppc_get_gpr(vcpu, i+1),
  66. kvmppc_get_gpr(vcpu, i+2),
  67. kvmppc_get_gpr(vcpu, i+3));
  68. }
  69. }
  70. static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
  71. unsigned int priority)
  72. {
  73. set_bit(priority, &vcpu->arch.pending_exceptions);
  74. }
  75. static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
  76. ulong dear_flags, ulong esr_flags)
  77. {
  78. vcpu->arch.queued_dear = dear_flags;
  79. vcpu->arch.queued_esr = esr_flags;
  80. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
  81. }
  82. static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
  83. ulong dear_flags, ulong esr_flags)
  84. {
  85. vcpu->arch.queued_dear = dear_flags;
  86. vcpu->arch.queued_esr = esr_flags;
  87. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
  88. }
  89. static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
  90. ulong esr_flags)
  91. {
  92. vcpu->arch.queued_esr = esr_flags;
  93. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
  94. }
  95. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
  96. {
  97. vcpu->arch.queued_esr = esr_flags;
  98. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
  99. }
  100. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  101. {
  102. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
  103. }
  104. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  105. {
  106. return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  107. }
  108. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  109. {
  110. clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  111. }
  112. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  113. struct kvm_interrupt *irq)
  114. {
  115. unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
  116. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  117. prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
  118. kvmppc_booke_queue_irqprio(vcpu, prio);
  119. }
  120. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
  121. struct kvm_interrupt *irq)
  122. {
  123. clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
  124. clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
  125. }
  126. /* Deliver the interrupt of the corresponding priority, if possible. */
  127. static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
  128. unsigned int priority)
  129. {
  130. int allowed = 0;
  131. ulong uninitialized_var(msr_mask);
  132. bool update_esr = false, update_dear = false;
  133. ulong crit_raw = vcpu->arch.shared->critical;
  134. ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
  135. bool crit;
  136. bool keep_irq = false;
  137. /* Truncate crit indicators in 32 bit mode */
  138. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  139. crit_raw &= 0xffffffff;
  140. crit_r1 &= 0xffffffff;
  141. }
  142. /* Critical section when crit == r1 */
  143. crit = (crit_raw == crit_r1);
  144. /* ... and we're in supervisor mode */
  145. crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
  146. if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
  147. priority = BOOKE_IRQPRIO_EXTERNAL;
  148. keep_irq = true;
  149. }
  150. switch (priority) {
  151. case BOOKE_IRQPRIO_DTLB_MISS:
  152. case BOOKE_IRQPRIO_DATA_STORAGE:
  153. update_dear = true;
  154. /* fall through */
  155. case BOOKE_IRQPRIO_INST_STORAGE:
  156. case BOOKE_IRQPRIO_PROGRAM:
  157. update_esr = true;
  158. /* fall through */
  159. case BOOKE_IRQPRIO_ITLB_MISS:
  160. case BOOKE_IRQPRIO_SYSCALL:
  161. case BOOKE_IRQPRIO_FP_UNAVAIL:
  162. case BOOKE_IRQPRIO_SPE_UNAVAIL:
  163. case BOOKE_IRQPRIO_SPE_FP_DATA:
  164. case BOOKE_IRQPRIO_SPE_FP_ROUND:
  165. case BOOKE_IRQPRIO_AP_UNAVAIL:
  166. case BOOKE_IRQPRIO_ALIGNMENT:
  167. allowed = 1;
  168. msr_mask = MSR_CE|MSR_ME|MSR_DE;
  169. break;
  170. case BOOKE_IRQPRIO_CRITICAL:
  171. case BOOKE_IRQPRIO_WATCHDOG:
  172. allowed = vcpu->arch.shared->msr & MSR_CE;
  173. msr_mask = MSR_ME;
  174. break;
  175. case BOOKE_IRQPRIO_MACHINE_CHECK:
  176. allowed = vcpu->arch.shared->msr & MSR_ME;
  177. msr_mask = 0;
  178. break;
  179. case BOOKE_IRQPRIO_EXTERNAL:
  180. case BOOKE_IRQPRIO_DECREMENTER:
  181. case BOOKE_IRQPRIO_FIT:
  182. allowed = vcpu->arch.shared->msr & MSR_EE;
  183. allowed = allowed && !crit;
  184. msr_mask = MSR_CE|MSR_ME|MSR_DE;
  185. break;
  186. case BOOKE_IRQPRIO_DEBUG:
  187. allowed = vcpu->arch.shared->msr & MSR_DE;
  188. msr_mask = MSR_ME;
  189. break;
  190. }
  191. if (allowed) {
  192. vcpu->arch.shared->srr0 = vcpu->arch.pc;
  193. vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
  194. vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
  195. if (update_esr == true)
  196. vcpu->arch.esr = vcpu->arch.queued_esr;
  197. if (update_dear == true)
  198. vcpu->arch.shared->dar = vcpu->arch.queued_dear;
  199. kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
  200. if (!keep_irq)
  201. clear_bit(priority, &vcpu->arch.pending_exceptions);
  202. }
  203. return allowed;
  204. }
  205. /* Check pending exceptions and deliver one, if possible. */
  206. void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
  207. {
  208. unsigned long *pending = &vcpu->arch.pending_exceptions;
  209. unsigned long old_pending = vcpu->arch.pending_exceptions;
  210. unsigned int priority;
  211. priority = __ffs(*pending);
  212. while (priority <= BOOKE_IRQPRIO_MAX) {
  213. if (kvmppc_booke_irqprio_deliver(vcpu, priority))
  214. break;
  215. priority = find_next_bit(pending,
  216. BITS_PER_BYTE * sizeof(*pending),
  217. priority + 1);
  218. }
  219. /* Tell the guest about our interrupt status */
  220. if (*pending)
  221. vcpu->arch.shared->int_pending = 1;
  222. else if (old_pending)
  223. vcpu->arch.shared->int_pending = 0;
  224. }
  225. /**
  226. * kvmppc_handle_exit
  227. *
  228. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  229. */
  230. int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  231. unsigned int exit_nr)
  232. {
  233. enum emulation_result er;
  234. int r = RESUME_HOST;
  235. /* update before a new last_exit_type is rewritten */
  236. kvmppc_update_timing_stats(vcpu);
  237. local_irq_enable();
  238. run->exit_reason = KVM_EXIT_UNKNOWN;
  239. run->ready_for_interrupt_injection = 1;
  240. switch (exit_nr) {
  241. case BOOKE_INTERRUPT_MACHINE_CHECK:
  242. printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
  243. kvmppc_dump_vcpu(vcpu);
  244. r = RESUME_HOST;
  245. break;
  246. case BOOKE_INTERRUPT_EXTERNAL:
  247. kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
  248. if (need_resched())
  249. cond_resched();
  250. r = RESUME_GUEST;
  251. break;
  252. case BOOKE_INTERRUPT_DECREMENTER:
  253. /* Since we switched IVPR back to the host's value, the host
  254. * handled this interrupt the moment we enabled interrupts.
  255. * Now we just offer it a chance to reschedule the guest. */
  256. kvmppc_account_exit(vcpu, DEC_EXITS);
  257. if (need_resched())
  258. cond_resched();
  259. r = RESUME_GUEST;
  260. break;
  261. case BOOKE_INTERRUPT_PROGRAM:
  262. if (vcpu->arch.shared->msr & MSR_PR) {
  263. /* Program traps generated by user-level software must be handled
  264. * by the guest kernel. */
  265. kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
  266. r = RESUME_GUEST;
  267. kvmppc_account_exit(vcpu, USR_PR_INST);
  268. break;
  269. }
  270. er = kvmppc_emulate_instruction(run, vcpu);
  271. switch (er) {
  272. case EMULATE_DONE:
  273. /* don't overwrite subtypes, just account kvm_stats */
  274. kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
  275. /* Future optimization: only reload non-volatiles if
  276. * they were actually modified by emulation. */
  277. r = RESUME_GUEST_NV;
  278. break;
  279. case EMULATE_DO_DCR:
  280. run->exit_reason = KVM_EXIT_DCR;
  281. r = RESUME_HOST;
  282. break;
  283. case EMULATE_FAIL:
  284. /* XXX Deliver Program interrupt to guest. */
  285. printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
  286. __func__, vcpu->arch.pc, vcpu->arch.last_inst);
  287. /* For debugging, encode the failing instruction and
  288. * report it to userspace. */
  289. run->hw.hardware_exit_reason = ~0ULL << 32;
  290. run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
  291. r = RESUME_HOST;
  292. break;
  293. default:
  294. BUG();
  295. }
  296. break;
  297. case BOOKE_INTERRUPT_FP_UNAVAIL:
  298. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
  299. kvmppc_account_exit(vcpu, FP_UNAVAIL);
  300. r = RESUME_GUEST;
  301. break;
  302. case BOOKE_INTERRUPT_SPE_UNAVAIL:
  303. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
  304. r = RESUME_GUEST;
  305. break;
  306. case BOOKE_INTERRUPT_SPE_FP_DATA:
  307. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
  308. r = RESUME_GUEST;
  309. break;
  310. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  311. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
  312. r = RESUME_GUEST;
  313. break;
  314. case BOOKE_INTERRUPT_DATA_STORAGE:
  315. kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
  316. vcpu->arch.fault_esr);
  317. kvmppc_account_exit(vcpu, DSI_EXITS);
  318. r = RESUME_GUEST;
  319. break;
  320. case BOOKE_INTERRUPT_INST_STORAGE:
  321. kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
  322. kvmppc_account_exit(vcpu, ISI_EXITS);
  323. r = RESUME_GUEST;
  324. break;
  325. case BOOKE_INTERRUPT_SYSCALL:
  326. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  327. (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
  328. /* KVM PV hypercalls */
  329. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  330. r = RESUME_GUEST;
  331. } else {
  332. /* Guest syscalls */
  333. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
  334. }
  335. kvmppc_account_exit(vcpu, SYSCALL_EXITS);
  336. r = RESUME_GUEST;
  337. break;
  338. case BOOKE_INTERRUPT_DTLB_MISS: {
  339. unsigned long eaddr = vcpu->arch.fault_dear;
  340. int gtlb_index;
  341. gpa_t gpaddr;
  342. gfn_t gfn;
  343. /* Check the guest TLB. */
  344. gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
  345. if (gtlb_index < 0) {
  346. /* The guest didn't have a mapping for it. */
  347. kvmppc_core_queue_dtlb_miss(vcpu,
  348. vcpu->arch.fault_dear,
  349. vcpu->arch.fault_esr);
  350. kvmppc_mmu_dtlb_miss(vcpu);
  351. kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
  352. r = RESUME_GUEST;
  353. break;
  354. }
  355. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  356. gfn = gpaddr >> PAGE_SHIFT;
  357. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  358. /* The guest TLB had a mapping, but the shadow TLB
  359. * didn't, and it is RAM. This could be because:
  360. * a) the entry is mapping the host kernel, or
  361. * b) the guest used a large mapping which we're faking
  362. * Either way, we need to satisfy the fault without
  363. * invoking the guest. */
  364. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  365. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  366. r = RESUME_GUEST;
  367. } else {
  368. /* Guest has mapped and accessed a page which is not
  369. * actually RAM. */
  370. vcpu->arch.paddr_accessed = gpaddr;
  371. r = kvmppc_emulate_mmio(run, vcpu);
  372. kvmppc_account_exit(vcpu, MMIO_EXITS);
  373. }
  374. break;
  375. }
  376. case BOOKE_INTERRUPT_ITLB_MISS: {
  377. unsigned long eaddr = vcpu->arch.pc;
  378. gpa_t gpaddr;
  379. gfn_t gfn;
  380. int gtlb_index;
  381. r = RESUME_GUEST;
  382. /* Check the guest TLB. */
  383. gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
  384. if (gtlb_index < 0) {
  385. /* The guest didn't have a mapping for it. */
  386. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
  387. kvmppc_mmu_itlb_miss(vcpu);
  388. kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
  389. break;
  390. }
  391. kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
  392. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  393. gfn = gpaddr >> PAGE_SHIFT;
  394. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  395. /* The guest TLB had a mapping, but the shadow TLB
  396. * didn't. This could be because:
  397. * a) the entry is mapping the host kernel, or
  398. * b) the guest used a large mapping which we're faking
  399. * Either way, we need to satisfy the fault without
  400. * invoking the guest. */
  401. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  402. } else {
  403. /* Guest mapped and leaped at non-RAM! */
  404. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
  405. }
  406. break;
  407. }
  408. case BOOKE_INTERRUPT_DEBUG: {
  409. u32 dbsr;
  410. vcpu->arch.pc = mfspr(SPRN_CSRR0);
  411. /* clear IAC events in DBSR register */
  412. dbsr = mfspr(SPRN_DBSR);
  413. dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
  414. mtspr(SPRN_DBSR, dbsr);
  415. run->exit_reason = KVM_EXIT_DEBUG;
  416. kvmppc_account_exit(vcpu, DEBUG_EXITS);
  417. r = RESUME_HOST;
  418. break;
  419. }
  420. default:
  421. printk(KERN_EMERG "exit_nr %d\n", exit_nr);
  422. BUG();
  423. }
  424. local_irq_disable();
  425. kvmppc_core_deliver_interrupts(vcpu);
  426. if (!(r & RESUME_HOST)) {
  427. /* To avoid clobbering exit_reason, only check for signals if
  428. * we aren't already exiting to userspace for some other
  429. * reason. */
  430. if (signal_pending(current)) {
  431. run->exit_reason = KVM_EXIT_INTR;
  432. r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
  433. kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  434. }
  435. }
  436. return r;
  437. }
  438. /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
  439. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  440. {
  441. int i;
  442. vcpu->arch.pc = 0;
  443. vcpu->arch.shared->msr = 0;
  444. kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
  445. vcpu->arch.shadow_pid = 1;
  446. /* Eye-catching numbers so we know if the guest takes an interrupt
  447. * before it's programmed its own IVPR/IVORs. */
  448. vcpu->arch.ivpr = 0x55550000;
  449. for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
  450. vcpu->arch.ivor[i] = 0x7700 | i * 4;
  451. kvmppc_init_timing_stats(vcpu);
  452. return kvmppc_core_vcpu_setup(vcpu);
  453. }
  454. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  455. {
  456. int i;
  457. regs->pc = vcpu->arch.pc;
  458. regs->cr = kvmppc_get_cr(vcpu);
  459. regs->ctr = vcpu->arch.ctr;
  460. regs->lr = vcpu->arch.lr;
  461. regs->xer = kvmppc_get_xer(vcpu);
  462. regs->msr = vcpu->arch.shared->msr;
  463. regs->srr0 = vcpu->arch.shared->srr0;
  464. regs->srr1 = vcpu->arch.shared->srr1;
  465. regs->pid = vcpu->arch.pid;
  466. regs->sprg0 = vcpu->arch.shared->sprg0;
  467. regs->sprg1 = vcpu->arch.shared->sprg1;
  468. regs->sprg2 = vcpu->arch.shared->sprg2;
  469. regs->sprg3 = vcpu->arch.shared->sprg3;
  470. regs->sprg4 = vcpu->arch.sprg4;
  471. regs->sprg5 = vcpu->arch.sprg5;
  472. regs->sprg6 = vcpu->arch.sprg6;
  473. regs->sprg7 = vcpu->arch.sprg7;
  474. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  475. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  476. return 0;
  477. }
  478. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  479. {
  480. int i;
  481. vcpu->arch.pc = regs->pc;
  482. kvmppc_set_cr(vcpu, regs->cr);
  483. vcpu->arch.ctr = regs->ctr;
  484. vcpu->arch.lr = regs->lr;
  485. kvmppc_set_xer(vcpu, regs->xer);
  486. kvmppc_set_msr(vcpu, regs->msr);
  487. vcpu->arch.shared->srr0 = regs->srr0;
  488. vcpu->arch.shared->srr1 = regs->srr1;
  489. kvmppc_set_pid(vcpu, regs->pid);
  490. vcpu->arch.shared->sprg0 = regs->sprg0;
  491. vcpu->arch.shared->sprg1 = regs->sprg1;
  492. vcpu->arch.shared->sprg2 = regs->sprg2;
  493. vcpu->arch.shared->sprg3 = regs->sprg3;
  494. vcpu->arch.sprg4 = regs->sprg4;
  495. vcpu->arch.sprg5 = regs->sprg5;
  496. vcpu->arch.sprg6 = regs->sprg6;
  497. vcpu->arch.sprg7 = regs->sprg7;
  498. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  499. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  500. return 0;
  501. }
  502. static void get_sregs_base(struct kvm_vcpu *vcpu,
  503. struct kvm_sregs *sregs)
  504. {
  505. u64 tb = get_tb();
  506. sregs->u.e.features |= KVM_SREGS_E_BASE;
  507. sregs->u.e.csrr0 = vcpu->arch.csrr0;
  508. sregs->u.e.csrr1 = vcpu->arch.csrr1;
  509. sregs->u.e.mcsr = vcpu->arch.mcsr;
  510. sregs->u.e.esr = vcpu->arch.esr;
  511. sregs->u.e.dear = vcpu->arch.shared->dar;
  512. sregs->u.e.tsr = vcpu->arch.tsr;
  513. sregs->u.e.tcr = vcpu->arch.tcr;
  514. sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
  515. sregs->u.e.tb = tb;
  516. sregs->u.e.vrsave = vcpu->arch.vrsave;
  517. }
  518. static int set_sregs_base(struct kvm_vcpu *vcpu,
  519. struct kvm_sregs *sregs)
  520. {
  521. if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
  522. return 0;
  523. vcpu->arch.csrr0 = sregs->u.e.csrr0;
  524. vcpu->arch.csrr1 = sregs->u.e.csrr1;
  525. vcpu->arch.mcsr = sregs->u.e.mcsr;
  526. vcpu->arch.esr = sregs->u.e.esr;
  527. vcpu->arch.shared->dar = sregs->u.e.dear;
  528. vcpu->arch.vrsave = sregs->u.e.vrsave;
  529. vcpu->arch.tcr = sregs->u.e.tcr;
  530. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC)
  531. vcpu->arch.dec = sregs->u.e.dec;
  532. kvmppc_emulate_dec(vcpu);
  533. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
  534. /*
  535. * FIXME: existing KVM timer handling is incomplete.
  536. * TSR cannot be read by the guest, and its value in
  537. * vcpu->arch is always zero. For now, just handle
  538. * the case where the caller is trying to inject a
  539. * decrementer interrupt.
  540. */
  541. if ((sregs->u.e.tsr & TSR_DIS) &&
  542. (vcpu->arch.tcr & TCR_DIE))
  543. kvmppc_core_queue_dec(vcpu);
  544. }
  545. return 0;
  546. }
  547. static void get_sregs_arch206(struct kvm_vcpu *vcpu,
  548. struct kvm_sregs *sregs)
  549. {
  550. sregs->u.e.features |= KVM_SREGS_E_ARCH206;
  551. sregs->u.e.pir = 0;
  552. sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
  553. sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
  554. sregs->u.e.decar = vcpu->arch.decar;
  555. sregs->u.e.ivpr = vcpu->arch.ivpr;
  556. }
  557. static int set_sregs_arch206(struct kvm_vcpu *vcpu,
  558. struct kvm_sregs *sregs)
  559. {
  560. if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
  561. return 0;
  562. if (sregs->u.e.pir != 0)
  563. return -EINVAL;
  564. vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
  565. vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
  566. vcpu->arch.decar = sregs->u.e.decar;
  567. vcpu->arch.ivpr = sregs->u.e.ivpr;
  568. return 0;
  569. }
  570. void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  571. {
  572. sregs->u.e.features |= KVM_SREGS_E_IVOR;
  573. sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
  574. sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
  575. sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
  576. sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
  577. sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
  578. sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
  579. sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
  580. sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
  581. sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
  582. sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
  583. sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
  584. sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
  585. sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
  586. sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
  587. sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
  588. sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
  589. }
  590. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  591. {
  592. if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
  593. return 0;
  594. vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
  595. vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
  596. vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
  597. vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
  598. vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
  599. vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
  600. vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
  601. vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
  602. vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
  603. vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
  604. vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
  605. vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
  606. vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
  607. vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
  608. vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
  609. vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
  610. return 0;
  611. }
  612. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  613. struct kvm_sregs *sregs)
  614. {
  615. sregs->pvr = vcpu->arch.pvr;
  616. get_sregs_base(vcpu, sregs);
  617. get_sregs_arch206(vcpu, sregs);
  618. kvmppc_core_get_sregs(vcpu, sregs);
  619. return 0;
  620. }
  621. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  622. struct kvm_sregs *sregs)
  623. {
  624. int ret;
  625. if (vcpu->arch.pvr != sregs->pvr)
  626. return -EINVAL;
  627. ret = set_sregs_base(vcpu, sregs);
  628. if (ret < 0)
  629. return ret;
  630. ret = set_sregs_arch206(vcpu, sregs);
  631. if (ret < 0)
  632. return ret;
  633. return kvmppc_core_set_sregs(vcpu, sregs);
  634. }
  635. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  636. {
  637. return -ENOTSUPP;
  638. }
  639. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  640. {
  641. return -ENOTSUPP;
  642. }
  643. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  644. struct kvm_translation *tr)
  645. {
  646. int r;
  647. r = kvmppc_core_vcpu_translate(vcpu, tr);
  648. return r;
  649. }
  650. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  651. {
  652. return -ENOTSUPP;
  653. }
  654. int __init kvmppc_booke_init(void)
  655. {
  656. unsigned long ivor[16];
  657. unsigned long max_ivor = 0;
  658. int i;
  659. /* We install our own exception handlers by hijacking IVPR. IVPR must
  660. * be 16-bit aligned, so we need a 64KB allocation. */
  661. kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  662. VCPU_SIZE_ORDER);
  663. if (!kvmppc_booke_handlers)
  664. return -ENOMEM;
  665. /* XXX make sure our handlers are smaller than Linux's */
  666. /* Copy our interrupt handlers to match host IVORs. That way we don't
  667. * have to swap the IVORs on every guest/host transition. */
  668. ivor[0] = mfspr(SPRN_IVOR0);
  669. ivor[1] = mfspr(SPRN_IVOR1);
  670. ivor[2] = mfspr(SPRN_IVOR2);
  671. ivor[3] = mfspr(SPRN_IVOR3);
  672. ivor[4] = mfspr(SPRN_IVOR4);
  673. ivor[5] = mfspr(SPRN_IVOR5);
  674. ivor[6] = mfspr(SPRN_IVOR6);
  675. ivor[7] = mfspr(SPRN_IVOR7);
  676. ivor[8] = mfspr(SPRN_IVOR8);
  677. ivor[9] = mfspr(SPRN_IVOR9);
  678. ivor[10] = mfspr(SPRN_IVOR10);
  679. ivor[11] = mfspr(SPRN_IVOR11);
  680. ivor[12] = mfspr(SPRN_IVOR12);
  681. ivor[13] = mfspr(SPRN_IVOR13);
  682. ivor[14] = mfspr(SPRN_IVOR14);
  683. ivor[15] = mfspr(SPRN_IVOR15);
  684. for (i = 0; i < 16; i++) {
  685. if (ivor[i] > max_ivor)
  686. max_ivor = ivor[i];
  687. memcpy((void *)kvmppc_booke_handlers + ivor[i],
  688. kvmppc_handlers_start + i * kvmppc_handler_len,
  689. kvmppc_handler_len);
  690. }
  691. flush_icache_range(kvmppc_booke_handlers,
  692. kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
  693. return 0;
  694. }
  695. void __exit kvmppc_booke_exit(void)
  696. {
  697. free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
  698. kvm_exit();
  699. }