xen-asm_64.S 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * Asm versions of Xen pv-ops, suitable for either direct use or
  3. * inlining. The inline versions are the same as the direct-use
  4. * versions, with the pre- and post-amble chopped off.
  5. *
  6. * This code is encoded for size rather than absolute efficiency, with
  7. * a view to being able to inline as much as possible.
  8. *
  9. * We only bother with direct forms (ie, vcpu in pda) of the
  10. * operations here; the indirect forms are better handled in C, since
  11. * they're generally too large to inline anyway.
  12. */
  13. #include <asm/errno.h>
  14. #include <asm/percpu.h>
  15. #include <asm/processor-flags.h>
  16. #include <asm/segment.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/thread_info.h>
  19. #include <xen/interface/xen.h>
  20. #include "xen-asm.h"
  21. ENTRY(xen_adjust_exception_frame)
  22. mov 8+0(%rsp), %rcx
  23. mov 8+8(%rsp), %r11
  24. ret $16
  25. ENDPROC(xen_adjust_exception_frame)
  26. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  27. /*
  28. * Xen64 iret frame:
  29. *
  30. * ss
  31. * rsp
  32. * rflags
  33. * cs
  34. * rip <-- standard iret frame
  35. *
  36. * flags
  37. *
  38. * rcx }
  39. * r11 }<-- pushed by hypercall page
  40. * rsp->rax }
  41. */
  42. ENTRY(xen_iret)
  43. pushq $0
  44. 1: jmp hypercall_iret
  45. ENDPATCH(xen_iret)
  46. RELOC(xen_iret, 1b+1)
  47. ENTRY(xen_sysret64)
  48. /*
  49. * We're already on the usermode stack at this point, but
  50. * still with the kernel gs, so we can easily switch back
  51. */
  52. movq %rsp, PER_CPU_VAR(rsp_scratch)
  53. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  54. pushq $__USER_DS
  55. pushq PER_CPU_VAR(rsp_scratch)
  56. pushq %r11
  57. pushq $__USER_CS
  58. pushq %rcx
  59. pushq $VGCF_in_syscall
  60. 1: jmp hypercall_iret
  61. ENDPATCH(xen_sysret64)
  62. RELOC(xen_sysret64, 1b+1)
  63. /*
  64. * Xen handles syscall callbacks much like ordinary exceptions, which
  65. * means we have:
  66. * - kernel gs
  67. * - kernel rsp
  68. * - an iret-like stack frame on the stack (including rcx and r11):
  69. * ss
  70. * rsp
  71. * rflags
  72. * cs
  73. * rip
  74. * r11
  75. * rsp->rcx
  76. *
  77. * In all the entrypoints, we undo all that to make it look like a
  78. * CPU-generated syscall/sysenter and jump to the normal entrypoint.
  79. */
  80. .macro undo_xen_syscall
  81. mov 0*8(%rsp), %rcx
  82. mov 1*8(%rsp), %r11
  83. mov 5*8(%rsp), %rsp
  84. .endm
  85. /* Normal 64-bit system call target */
  86. ENTRY(xen_syscall_target)
  87. undo_xen_syscall
  88. jmp entry_SYSCALL_64_after_swapgs
  89. ENDPROC(xen_syscall_target)
  90. #ifdef CONFIG_IA32_EMULATION
  91. /* 32-bit compat syscall target */
  92. ENTRY(xen_syscall32_target)
  93. undo_xen_syscall
  94. jmp entry_SYSCALL_compat
  95. ENDPROC(xen_syscall32_target)
  96. /* 32-bit compat sysenter target */
  97. ENTRY(xen_sysenter_target)
  98. undo_xen_syscall
  99. jmp entry_SYSENTER_compat
  100. ENDPROC(xen_sysenter_target)
  101. #else /* !CONFIG_IA32_EMULATION */
  102. ENTRY(xen_syscall32_target)
  103. ENTRY(xen_sysenter_target)
  104. lea 16(%rsp), %rsp /* strip %rcx, %r11 */
  105. mov $-ENOSYS, %rax
  106. pushq $0
  107. jmp hypercall_iret
  108. ENDPROC(xen_syscall32_target)
  109. ENDPROC(xen_sysenter_target)
  110. #endif /* CONFIG_IA32_EMULATION */