switch.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/arm-smccc.h>
  18. #include <linux/types.h>
  19. #include <linux/jump_label.h>
  20. #include <uapi/linux/psci.h>
  21. #include <kvm/arm_psci.h>
  22. #include <asm/kvm_asm.h>
  23. #include <asm/kvm_emulate.h>
  24. #include <asm/kvm_hyp.h>
  25. static bool __hyp_text __fpsimd_enabled_nvhe(void)
  26. {
  27. return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
  28. }
  29. static bool __hyp_text __fpsimd_enabled_vhe(void)
  30. {
  31. return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
  32. }
  33. static hyp_alternate_select(__fpsimd_is_enabled,
  34. __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
  35. ARM64_HAS_VIRT_HOST_EXTN);
  36. bool __hyp_text __fpsimd_enabled(void)
  37. {
  38. return __fpsimd_is_enabled()();
  39. }
  40. static void __hyp_text __activate_traps_vhe(void)
  41. {
  42. u64 val;
  43. val = read_sysreg(cpacr_el1);
  44. val |= CPACR_EL1_TTA;
  45. val &= ~CPACR_EL1_FPEN;
  46. write_sysreg(val, cpacr_el1);
  47. write_sysreg(kvm_get_hyp_vector(), vbar_el1);
  48. }
  49. static void __hyp_text __activate_traps_nvhe(void)
  50. {
  51. u64 val;
  52. val = CPTR_EL2_DEFAULT;
  53. val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
  54. write_sysreg(val, cptr_el2);
  55. }
  56. static hyp_alternate_select(__activate_traps_arch,
  57. __activate_traps_nvhe, __activate_traps_vhe,
  58. ARM64_HAS_VIRT_HOST_EXTN);
  59. static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
  60. {
  61. u64 val;
  62. /*
  63. * We are about to set CPTR_EL2.TFP to trap all floating point
  64. * register accesses to EL2, however, the ARM ARM clearly states that
  65. * traps are only taken to EL2 if the operation would not otherwise
  66. * trap to EL1. Therefore, always make sure that for 32-bit guests,
  67. * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
  68. */
  69. val = vcpu->arch.hcr_el2;
  70. if (!(val & HCR_RW)) {
  71. write_sysreg(1 << 30, fpexc32_el2);
  72. isb();
  73. }
  74. write_sysreg(val, hcr_el2);
  75. /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
  76. write_sysreg(1 << 15, hstr_el2);
  77. /*
  78. * Make sure we trap PMU access from EL0 to EL2. Also sanitize
  79. * PMSELR_EL0 to make sure it never contains the cycle
  80. * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
  81. * EL1 instead of being trapped to EL2.
  82. */
  83. write_sysreg(0, pmselr_el0);
  84. write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
  85. write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
  86. __activate_traps_arch()();
  87. }
  88. static void __hyp_text __deactivate_traps_vhe(void)
  89. {
  90. extern char vectors[]; /* kernel exception vectors */
  91. write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
  92. write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
  93. write_sysreg(vectors, vbar_el1);
  94. }
  95. static void __hyp_text __deactivate_traps_nvhe(void)
  96. {
  97. write_sysreg(HCR_RW, hcr_el2);
  98. write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
  99. }
  100. static hyp_alternate_select(__deactivate_traps_arch,
  101. __deactivate_traps_nvhe, __deactivate_traps_vhe,
  102. ARM64_HAS_VIRT_HOST_EXTN);
  103. static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
  104. {
  105. /*
  106. * If we pended a virtual abort, preserve it until it gets
  107. * cleared. See D1.14.3 (Virtual Interrupts) for details, but
  108. * the crucial bit is "On taking a vSError interrupt,
  109. * HCR_EL2.VSE is cleared to 0."
  110. */
  111. if (vcpu->arch.hcr_el2 & HCR_VSE)
  112. vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
  113. __deactivate_traps_arch()();
  114. write_sysreg(0, hstr_el2);
  115. write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
  116. write_sysreg(0, pmuserenr_el0);
  117. }
  118. static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
  119. {
  120. struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  121. write_sysreg(kvm->arch.vttbr, vttbr_el2);
  122. }
  123. static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
  124. {
  125. write_sysreg(0, vttbr_el2);
  126. }
  127. static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
  128. {
  129. if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
  130. __vgic_v3_save_state(vcpu);
  131. else
  132. __vgic_v2_save_state(vcpu);
  133. write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
  134. }
  135. static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
  136. {
  137. u64 val;
  138. val = read_sysreg(hcr_el2);
  139. val |= HCR_INT_OVERRIDE;
  140. val |= vcpu->arch.irq_lines;
  141. write_sysreg(val, hcr_el2);
  142. if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
  143. __vgic_v3_restore_state(vcpu);
  144. else
  145. __vgic_v2_restore_state(vcpu);
  146. }
  147. static bool __hyp_text __true_value(void)
  148. {
  149. return true;
  150. }
  151. static bool __hyp_text __false_value(void)
  152. {
  153. return false;
  154. }
  155. static hyp_alternate_select(__check_arm_834220,
  156. __false_value, __true_value,
  157. ARM64_WORKAROUND_834220);
  158. static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
  159. {
  160. u64 par, tmp;
  161. /*
  162. * Resolve the IPA the hard way using the guest VA.
  163. *
  164. * Stage-1 translation already validated the memory access
  165. * rights. As such, we can use the EL1 translation regime, and
  166. * don't have to distinguish between EL0 and EL1 access.
  167. *
  168. * We do need to save/restore PAR_EL1 though, as we haven't
  169. * saved the guest context yet, and we may return early...
  170. */
  171. par = read_sysreg(par_el1);
  172. asm volatile("at s1e1r, %0" : : "r" (far));
  173. isb();
  174. tmp = read_sysreg(par_el1);
  175. write_sysreg(par, par_el1);
  176. if (unlikely(tmp & 1))
  177. return false; /* Translation failed, back to guest */
  178. /* Convert PAR to HPFAR format */
  179. *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
  180. return true;
  181. }
  182. static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
  183. {
  184. u64 esr = read_sysreg_el2(esr);
  185. u8 ec = ESR_ELx_EC(esr);
  186. u64 hpfar, far;
  187. vcpu->arch.fault.esr_el2 = esr;
  188. if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
  189. return true;
  190. far = read_sysreg_el2(far);
  191. /*
  192. * The HPFAR can be invalid if the stage 2 fault did not
  193. * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
  194. * bit is clear) and one of the two following cases are true:
  195. * 1. The fault was due to a permission fault
  196. * 2. The processor carries errata 834220
  197. *
  198. * Therefore, for all non S1PTW faults where we either have a
  199. * permission fault or the errata workaround is enabled, we
  200. * resolve the IPA using the AT instruction.
  201. */
  202. if (!(esr & ESR_ELx_S1PTW) &&
  203. (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
  204. if (!__translate_far_to_hpfar(far, &hpfar))
  205. return false;
  206. } else {
  207. hpfar = read_sysreg(hpfar_el2);
  208. }
  209. vcpu->arch.fault.far_el2 = far;
  210. vcpu->arch.fault.hpfar_el2 = hpfar;
  211. return true;
  212. }
  213. static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
  214. {
  215. *vcpu_pc(vcpu) = read_sysreg_el2(elr);
  216. if (vcpu_mode_is_32bit(vcpu)) {
  217. vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
  218. kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  219. write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
  220. } else {
  221. *vcpu_pc(vcpu) += 4;
  222. }
  223. write_sysreg_el2(*vcpu_pc(vcpu), elr);
  224. }
  225. static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
  226. {
  227. if (!cpus_have_cap(ARM64_SSBD))
  228. return false;
  229. return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
  230. }
  231. static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
  232. {
  233. #ifdef CONFIG_ARM64_SSBD
  234. /*
  235. * The host runs with the workaround always present. If the
  236. * guest wants it disabled, so be it...
  237. */
  238. if (__needs_ssbd_off(vcpu) &&
  239. __hyp_this_cpu_read(arm64_ssbd_callback_required))
  240. arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
  241. #endif
  242. }
  243. static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
  244. {
  245. #ifdef CONFIG_ARM64_SSBD
  246. /*
  247. * If the guest has disabled the workaround, bring it back on.
  248. */
  249. if (__needs_ssbd_off(vcpu) &&
  250. __hyp_this_cpu_read(arm64_ssbd_callback_required))
  251. arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
  252. #endif
  253. }
  254. int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
  255. {
  256. struct kvm_cpu_context *host_ctxt;
  257. struct kvm_cpu_context *guest_ctxt;
  258. bool fp_enabled;
  259. u64 exit_code;
  260. vcpu = kern_hyp_va(vcpu);
  261. host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
  262. host_ctxt->__hyp_running_vcpu = vcpu;
  263. guest_ctxt = &vcpu->arch.ctxt;
  264. __sysreg_save_host_state(host_ctxt);
  265. __debug_cond_save_host_state(vcpu);
  266. __activate_traps(vcpu);
  267. __activate_vm(vcpu);
  268. __vgic_restore_state(vcpu);
  269. __timer_restore_state(vcpu);
  270. /*
  271. * We must restore the 32-bit state before the sysregs, thanks
  272. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
  273. */
  274. __sysreg32_restore_state(vcpu);
  275. __sysreg_restore_guest_state(guest_ctxt);
  276. __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
  277. __set_guest_arch_workaround_state(vcpu);
  278. /* Jump in the fire! */
  279. again:
  280. exit_code = __guest_enter(vcpu, host_ctxt);
  281. /* And we're baaack! */
  282. /*
  283. * We're using the raw exception code in order to only process
  284. * the trap if no SError is pending. We will come back to the
  285. * same PC once the SError has been injected, and replay the
  286. * trapping instruction.
  287. */
  288. if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
  289. goto again;
  290. if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
  291. exit_code == ARM_EXCEPTION_TRAP) {
  292. bool valid;
  293. valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
  294. kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
  295. kvm_vcpu_dabt_isvalid(vcpu) &&
  296. !kvm_vcpu_dabt_isextabt(vcpu) &&
  297. !kvm_vcpu_dabt_iss1tw(vcpu);
  298. if (valid) {
  299. int ret = __vgic_v2_perform_cpuif_access(vcpu);
  300. if (ret == 1) {
  301. __skip_instr(vcpu);
  302. goto again;
  303. }
  304. if (ret == -1) {
  305. /* Promote an illegal access to an SError */
  306. __skip_instr(vcpu);
  307. exit_code = ARM_EXCEPTION_EL1_SERROR;
  308. }
  309. /* 0 falls through to be handler out of EL2 */
  310. }
  311. }
  312. __set_host_arch_workaround_state(vcpu);
  313. fp_enabled = __fpsimd_enabled();
  314. __sysreg_save_guest_state(guest_ctxt);
  315. __sysreg32_save_state(vcpu);
  316. __timer_save_state(vcpu);
  317. __vgic_save_state(vcpu);
  318. __deactivate_traps(vcpu);
  319. __deactivate_vm(vcpu);
  320. __sysreg_restore_host_state(host_ctxt);
  321. if (fp_enabled) {
  322. __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
  323. __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
  324. }
  325. __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
  326. __debug_cond_restore_host_state(vcpu);
  327. return exit_code;
  328. }
  329. static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
  330. static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
  331. struct kvm_vcpu *vcpu)
  332. {
  333. unsigned long str_va;
  334. /*
  335. * Force the panic string to be loaded from the literal pool,
  336. * making sure it is a kernel address and not a PC-relative
  337. * reference.
  338. */
  339. asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
  340. __hyp_do_panic(str_va,
  341. spsr, elr,
  342. read_sysreg(esr_el2), read_sysreg_el2(far),
  343. read_sysreg(hpfar_el2), par, vcpu);
  344. }
  345. static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
  346. struct kvm_vcpu *vcpu)
  347. {
  348. panic(__hyp_panic_string,
  349. spsr, elr,
  350. read_sysreg_el2(esr), read_sysreg_el2(far),
  351. read_sysreg(hpfar_el2), par, vcpu);
  352. }
  353. static hyp_alternate_select(__hyp_call_panic,
  354. __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
  355. ARM64_HAS_VIRT_HOST_EXTN);
  356. void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
  357. {
  358. struct kvm_vcpu *vcpu = NULL;
  359. u64 spsr = read_sysreg_el2(spsr);
  360. u64 elr = read_sysreg_el2(elr);
  361. u64 par = read_sysreg(par_el1);
  362. if (read_sysreg(vttbr_el2)) {
  363. vcpu = host_ctxt->__hyp_running_vcpu;
  364. __timer_save_state(vcpu);
  365. __deactivate_traps(vcpu);
  366. __deactivate_vm(vcpu);
  367. __sysreg_restore_host_state(host_ctxt);
  368. }
  369. /* Call panic for real */
  370. __hyp_call_panic()(spsr, elr, par, vcpu);
  371. unreachable();
  372. }