hyp-entry.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/arm-smccc.h>
  18. #include <linux/linkage.h>
  19. #include <asm/alternative.h>
  20. #include <asm/assembler.h>
  21. #include <asm/cpufeature.h>
  22. #include <asm/kvm_arm.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_mmu.h>
  25. .text
  26. .pushsection .hyp.text, "ax"
  27. .macro do_el2_call
  28. /*
  29. * Shuffle the parameters before calling the function
  30. * pointed to in x0. Assumes parameters in x[1,2,3].
  31. */
  32. mov lr, x0
  33. mov x0, x1
  34. mov x1, x2
  35. mov x2, x3
  36. blr lr
  37. .endm
  38. ENTRY(__vhe_hyp_call)
  39. str lr, [sp, #-16]!
  40. do_el2_call
  41. ldr lr, [sp], #16
  42. /*
  43. * We used to rely on having an exception return to get
  44. * an implicit isb. In the E2H case, we don't have it anymore.
  45. * rather than changing all the leaf functions, just do it here
  46. * before returning to the rest of the kernel.
  47. */
  48. isb
  49. ret
  50. ENDPROC(__vhe_hyp_call)
  51. /*
  52. * Compute the idmap address of __kvm_hyp_reset based on the idmap
  53. * start passed as a parameter, and jump there.
  54. *
  55. * x0: HYP phys_idmap_start
  56. */
  57. ENTRY(__kvm_hyp_teardown)
  58. mov x4, x0
  59. adr_l x3, __kvm_hyp_reset
  60. /* insert __kvm_hyp_reset()s offset into phys_idmap_start */
  61. bfi x4, x3, #0, #PAGE_SHIFT
  62. br x4
  63. ENDPROC(__kvm_hyp_teardown)
  64. el1_sync: // Guest trapped into EL2
  65. stp x0, x1, [sp, #-16]!
  66. mrs x0, esr_el2
  67. lsr x0, x0, #ESR_ELx_EC_SHIFT
  68. cmp x0, #ESR_ELx_EC_HVC64
  69. ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
  70. b.ne el1_trap
  71. mrs x1, vttbr_el2 // If vttbr is valid, the guest
  72. cbnz x1, el1_hvc_guest // called HVC
  73. /* Here, we're pretty sure the host called HVC. */
  74. ldp x0, x1, [sp], #16
  75. cmp x0, #HVC_GET_VECTORS
  76. b.ne 1f
  77. mrs x0, vbar_el2
  78. b 2f
  79. 1:
  80. /*
  81. * Perform the EL2 call
  82. */
  83. kern_hyp_va x0
  84. do_el2_call
  85. 2: eret
  86. el1_hvc_guest:
  87. /*
  88. * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
  89. * The workaround has already been applied on the host,
  90. * so let's quickly get back to the guest. We don't bother
  91. * restoring x1, as it can be clobbered anyway.
  92. */
  93. ldr x1, [sp] // Guest's x0
  94. eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
  95. cbz w1, wa_epilogue
  96. /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
  97. eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
  98. ARM_SMCCC_ARCH_WORKAROUND_2)
  99. cbnz w1, el1_trap
  100. #ifdef CONFIG_ARM64_SSBD
  101. alternative_cb arm64_enable_wa2_handling
  102. b wa2_end
  103. alternative_cb_end
  104. get_vcpu_ptr x2, x0
  105. ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
  106. // Sanitize the argument and update the guest flags
  107. ldr x1, [sp, #8] // Guest's x1
  108. clz w1, w1 // Murphy's device:
  109. lsr w1, w1, #5 // w1 = !!w1 without using
  110. eor w1, w1, #1 // the flags...
  111. bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
  112. str x0, [x2, #VCPU_WORKAROUND_FLAGS]
  113. /* Check that we actually need to perform the call */
  114. hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
  115. cbz x0, wa2_end
  116. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
  117. smc #0
  118. /* Don't leak data from the SMC call */
  119. mov x3, xzr
  120. wa2_end:
  121. mov x2, xzr
  122. mov x1, xzr
  123. #endif
  124. wa_epilogue:
  125. mov x0, xzr
  126. add sp, sp, #16
  127. eret
  128. el1_trap:
  129. get_vcpu_ptr x1, x0
  130. mrs x0, esr_el2
  131. lsr x0, x0, #ESR_ELx_EC_SHIFT
  132. /*
  133. * x0: ESR_EC
  134. * x1: vcpu pointer
  135. */
  136. /* Guest accessed VFP/SIMD registers, save host, restore Guest */
  137. cmp x0, #ESR_ELx_EC_FP_ASIMD
  138. b.eq __fpsimd_guest_restore
  139. mov x0, #ARM_EXCEPTION_TRAP
  140. b __guest_exit
  141. el1_irq:
  142. stp x0, x1, [sp, #-16]!
  143. get_vcpu_ptr x1, x0
  144. mov x0, #ARM_EXCEPTION_IRQ
  145. b __guest_exit
  146. el1_error:
  147. stp x0, x1, [sp, #-16]!
  148. get_vcpu_ptr x1, x0
  149. mov x0, #ARM_EXCEPTION_EL1_SERROR
  150. b __guest_exit
  151. el2_error:
  152. /*
  153. * Only two possibilities:
  154. * 1) Either we come from the exit path, having just unmasked
  155. * PSTATE.A: change the return code to an EL2 fault, and
  156. * carry on, as we're already in a sane state to handle it.
  157. * 2) Or we come from anywhere else, and that's a bug: we panic.
  158. *
  159. * For (1), x0 contains the original return code and x1 doesn't
  160. * contain anything meaningful at that stage. We can reuse them
  161. * as temp registers.
  162. * For (2), who cares?
  163. */
  164. mrs x0, elr_el2
  165. adr x1, abort_guest_exit_start
  166. cmp x0, x1
  167. adr x1, abort_guest_exit_end
  168. ccmp x0, x1, #4, ne
  169. b.ne __hyp_panic
  170. mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
  171. eret
  172. ENTRY(__hyp_do_panic)
  173. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  174. PSR_MODE_EL1h)
  175. msr spsr_el2, lr
  176. ldr lr, =panic
  177. msr elr_el2, lr
  178. eret
  179. ENDPROC(__hyp_do_panic)
  180. ENTRY(__hyp_panic)
  181. get_host_ctxt x0, x1
  182. b hyp_panic
  183. ENDPROC(__hyp_panic)
  184. .macro invalid_vector label, target = __hyp_panic
  185. .align 2
  186. \label:
  187. b \target
  188. ENDPROC(\label)
  189. .endm
  190. /* None of these should ever happen */
  191. invalid_vector el2t_sync_invalid
  192. invalid_vector el2t_irq_invalid
  193. invalid_vector el2t_fiq_invalid
  194. invalid_vector el2t_error_invalid
  195. invalid_vector el2h_sync_invalid
  196. invalid_vector el2h_irq_invalid
  197. invalid_vector el2h_fiq_invalid
  198. invalid_vector el1_sync_invalid
  199. invalid_vector el1_irq_invalid
  200. invalid_vector el1_fiq_invalid
  201. .ltorg
  202. .align 11
  203. ENTRY(__kvm_hyp_vector)
  204. ventry el2t_sync_invalid // Synchronous EL2t
  205. ventry el2t_irq_invalid // IRQ EL2t
  206. ventry el2t_fiq_invalid // FIQ EL2t
  207. ventry el2t_error_invalid // Error EL2t
  208. ventry el2h_sync_invalid // Synchronous EL2h
  209. ventry el2h_irq_invalid // IRQ EL2h
  210. ventry el2h_fiq_invalid // FIQ EL2h
  211. ventry el2_error // Error EL2h
  212. ventry el1_sync // Synchronous 64-bit EL1
  213. ventry el1_irq // IRQ 64-bit EL1
  214. ventry el1_fiq_invalid // FIQ 64-bit EL1
  215. ventry el1_error // Error 64-bit EL1
  216. ventry el1_sync // Synchronous 32-bit EL1
  217. ventry el1_irq // IRQ 32-bit EL1
  218. ventry el1_fiq_invalid // FIQ 32-bit EL1
  219. ventry el1_error // Error 32-bit EL1
  220. ENDPROC(__kvm_hyp_vector)