xen-asm_64.S 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Asm versions of Xen pv-ops, suitable for either direct use or
  3. * inlining. The inline versions are the same as the direct-use
  4. * versions, with the pre- and post-amble chopped off.
  5. *
  6. * This code is encoded for size rather than absolute efficiency, with
  7. * a view to being able to inline as much as possible.
  8. *
  9. * We only bother with direct forms (ie, vcpu in pda) of the
  10. * operations here; the indirect forms are better handled in C, since
  11. * they're generally too large to inline anyway.
  12. */
  13. #include <asm/errno.h>
  14. #include <asm/percpu.h>
  15. #include <asm/processor-flags.h>
  16. #include <asm/segment.h>
  17. #include <xen/interface/xen.h>
  18. #include "xen-asm.h"
  19. ENTRY(xen_adjust_exception_frame)
  20. mov 8+0(%rsp), %rcx
  21. mov 8+8(%rsp), %r11
  22. ret $16
  23. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  24. /*
  25. * Xen64 iret frame:
  26. *
  27. * ss
  28. * rsp
  29. * rflags
  30. * cs
  31. * rip <-- standard iret frame
  32. *
  33. * flags
  34. *
  35. * rcx }
  36. * r11 }<-- pushed by hypercall page
  37. * rsp->rax }
  38. */
  39. ENTRY(xen_iret)
  40. pushq $0
  41. 1: jmp hypercall_iret
  42. ENDPATCH(xen_iret)
  43. RELOC(xen_iret, 1b+1)
  44. /*
  45. * sysexit is not used for 64-bit processes, so it's only ever used to
  46. * return to 32-bit compat userspace.
  47. */
  48. ENTRY(xen_sysexit)
  49. pushq $__USER32_DS
  50. pushq %rcx
  51. pushq $X86_EFLAGS_IF
  52. pushq $__USER32_CS
  53. pushq %rdx
  54. pushq $0
  55. 1: jmp hypercall_iret
  56. ENDPATCH(xen_sysexit)
  57. RELOC(xen_sysexit, 1b+1)
  58. ENTRY(xen_sysret64)
  59. /*
  60. * We're already on the usermode stack at this point, but
  61. * still with the kernel gs, so we can easily switch back
  62. */
  63. movq %rsp, PER_CPU_VAR(old_rsp)
  64. movq PER_CPU_VAR(kernel_stack), %rsp
  65. pushq $__USER_DS
  66. pushq PER_CPU_VAR(old_rsp)
  67. pushq %r11
  68. pushq $__USER_CS
  69. pushq %rcx
  70. pushq $VGCF_in_syscall
  71. 1: jmp hypercall_iret
  72. ENDPATCH(xen_sysret64)
  73. RELOC(xen_sysret64, 1b+1)
  74. ENTRY(xen_sysret32)
  75. /*
  76. * We're already on the usermode stack at this point, but
  77. * still with the kernel gs, so we can easily switch back
  78. */
  79. movq %rsp, PER_CPU_VAR(old_rsp)
  80. movq PER_CPU_VAR(kernel_stack), %rsp
  81. pushq $__USER32_DS
  82. pushq PER_CPU_VAR(old_rsp)
  83. pushq %r11
  84. pushq $__USER32_CS
  85. pushq %rcx
  86. pushq $0
  87. 1: jmp hypercall_iret
  88. ENDPATCH(xen_sysret32)
  89. RELOC(xen_sysret32, 1b+1)
  90. /*
  91. * Xen handles syscall callbacks much like ordinary exceptions, which
  92. * means we have:
  93. * - kernel gs
  94. * - kernel rsp
  95. * - an iret-like stack frame on the stack (including rcx and r11):
  96. * ss
  97. * rsp
  98. * rflags
  99. * cs
  100. * rip
  101. * r11
  102. * rsp->rcx
  103. *
  104. * In all the entrypoints, we undo all that to make it look like a
  105. * CPU-generated syscall/sysenter and jump to the normal entrypoint.
  106. */
  107. .macro undo_xen_syscall
  108. mov 0*8(%rsp), %rcx
  109. mov 1*8(%rsp), %r11
  110. mov 5*8(%rsp), %rsp
  111. .endm
  112. /* Normal 64-bit system call target */
  113. ENTRY(xen_syscall_target)
  114. undo_xen_syscall
  115. jmp system_call_after_swapgs
  116. ENDPROC(xen_syscall_target)
  117. #ifdef CONFIG_IA32_EMULATION
  118. /* 32-bit compat syscall target */
  119. ENTRY(xen_syscall32_target)
  120. undo_xen_syscall
  121. jmp ia32_cstar_target
  122. ENDPROC(xen_syscall32_target)
  123. /* 32-bit compat sysenter target */
  124. ENTRY(xen_sysenter_target)
  125. undo_xen_syscall
  126. jmp ia32_sysenter_target
  127. ENDPROC(xen_sysenter_target)
  128. #else /* !CONFIG_IA32_EMULATION */
  129. ENTRY(xen_syscall32_target)
  130. ENTRY(xen_sysenter_target)
  131. lea 16(%rsp), %rsp /* strip %rcx, %r11 */
  132. mov $-ENOSYS, %rax
  133. pushq $0
  134. jmp hypercall_iret
  135. ENDPROC(xen_syscall32_target)
  136. ENDPROC(xen_sysenter_target)
  137. #endif /* CONFIG_IA32_EMULATION */