kvm_para.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. #ifndef _ASM_X86_KVM_PARA_H
  2. #define _ASM_X86_KVM_PARA_H
  3. #include <linux/types.h>
  4. #include <asm/hyperv.h>
  5. /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
  6. * should be used to determine that a VM is running under KVM.
  7. */
  8. #define KVM_CPUID_SIGNATURE 0x40000000
  9. /* This CPUID returns a feature bitmap in eax. Before enabling a particular
  10. * paravirtualization, the appropriate feature bit should be checked.
  11. */
  12. #define KVM_CPUID_FEATURES 0x40000001
  13. #define KVM_FEATURE_CLOCKSOURCE 0
  14. #define KVM_FEATURE_NOP_IO_DELAY 1
  15. #define KVM_FEATURE_MMU_OP 2
  16. /* This indicates that the new set of kvmclock msrs
  17. * are available. The use of 0x11 and 0x12 is deprecated
  18. */
  19. #define KVM_FEATURE_CLOCKSOURCE2 3
  20. #define KVM_FEATURE_ASYNC_PF 4
  21. #define KVM_FEATURE_STEAL_TIME 5
  22. /* The last 8 bits are used to indicate how to interpret the flags field
  23. * in pvclock structure. If no bits are set, all flags are ignored.
  24. */
  25. #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
  26. #define MSR_KVM_WALL_CLOCK 0x11
  27. #define MSR_KVM_SYSTEM_TIME 0x12
  28. #define KVM_MSR_ENABLED 1
  29. /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
  30. #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
  31. #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
  32. #define MSR_KVM_ASYNC_PF_EN 0x4b564d02
  33. #define MSR_KVM_STEAL_TIME 0x4b564d03
  34. struct kvm_steal_time {
  35. __u64 steal;
  36. __u32 version;
  37. __u32 flags;
  38. __u32 pad[12];
  39. };
  40. #define KVM_STEAL_ALIGNMENT_BITS 5
  41. #define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
  42. #define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
  43. #define KVM_MAX_MMU_OP_BATCH 32
  44. #define KVM_ASYNC_PF_ENABLED (1 << 0)
  45. #define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
  46. /* Operations for KVM_HC_MMU_OP */
  47. #define KVM_MMU_OP_WRITE_PTE 1
  48. #define KVM_MMU_OP_FLUSH_TLB 2
  49. #define KVM_MMU_OP_RELEASE_PT 3
  50. /* Payload for KVM_HC_MMU_OP */
  51. struct kvm_mmu_op_header {
  52. __u32 op;
  53. __u32 pad;
  54. };
  55. struct kvm_mmu_op_write_pte {
  56. struct kvm_mmu_op_header header;
  57. __u64 pte_phys;
  58. __u64 pte_val;
  59. };
  60. struct kvm_mmu_op_flush_tlb {
  61. struct kvm_mmu_op_header header;
  62. };
  63. struct kvm_mmu_op_release_pt {
  64. struct kvm_mmu_op_header header;
  65. __u64 pt_phys;
  66. };
  67. #define KVM_PV_REASON_PAGE_NOT_PRESENT 1
  68. #define KVM_PV_REASON_PAGE_READY 2
  69. struct kvm_vcpu_pv_apf_data {
  70. __u32 reason;
  71. __u8 pad[60];
  72. __u32 enabled;
  73. };
  74. #ifdef __KERNEL__
  75. #include <asm/processor.h>
  76. extern void kvmclock_init(void);
  77. extern int kvm_register_clock(char *txt);
  78. /* This instruction is vmcall. On non-VT architectures, it will generate a
  79. * trap that we will then rewrite to the appropriate instruction.
  80. */
  81. #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
  82. /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
  83. * instruction. The hypervisor may replace it with something else but only the
  84. * instructions are guaranteed to be supported.
  85. *
  86. * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  87. * The hypercall number should be placed in rax and the return value will be
  88. * placed in rax. No other registers will be clobbered unless explicited
  89. * noted by the particular hypercall.
  90. */
  91. static inline long kvm_hypercall0(unsigned int nr)
  92. {
  93. long ret;
  94. asm volatile(KVM_HYPERCALL
  95. : "=a"(ret)
  96. : "a"(nr)
  97. : "memory");
  98. return ret;
  99. }
  100. static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  101. {
  102. long ret;
  103. asm volatile(KVM_HYPERCALL
  104. : "=a"(ret)
  105. : "a"(nr), "b"(p1)
  106. : "memory");
  107. return ret;
  108. }
  109. static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  110. unsigned long p2)
  111. {
  112. long ret;
  113. asm volatile(KVM_HYPERCALL
  114. : "=a"(ret)
  115. : "a"(nr), "b"(p1), "c"(p2)
  116. : "memory");
  117. return ret;
  118. }
  119. static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  120. unsigned long p2, unsigned long p3)
  121. {
  122. long ret;
  123. asm volatile(KVM_HYPERCALL
  124. : "=a"(ret)
  125. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  126. : "memory");
  127. return ret;
  128. }
  129. static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  130. unsigned long p2, unsigned long p3,
  131. unsigned long p4)
  132. {
  133. long ret;
  134. asm volatile(KVM_HYPERCALL
  135. : "=a"(ret)
  136. : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  137. : "memory");
  138. return ret;
  139. }
  140. static inline int kvm_para_available(void)
  141. {
  142. unsigned int eax, ebx, ecx, edx;
  143. char signature[13];
  144. if (boot_cpu_data.cpuid_level < 0)
  145. return 0; /* So we don't blow up on old processors */
  146. cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
  147. memcpy(signature + 0, &ebx, 4);
  148. memcpy(signature + 4, &ecx, 4);
  149. memcpy(signature + 8, &edx, 4);
  150. signature[12] = 0;
  151. if (strcmp(signature, "KVMKVMKVM") == 0)
  152. return 1;
  153. return 0;
  154. }
  155. static inline unsigned int kvm_arch_para_features(void)
  156. {
  157. return cpuid_eax(KVM_CPUID_FEATURES);
  158. }
  159. #ifdef CONFIG_KVM_GUEST
  160. void __init kvm_guest_init(void);
  161. void kvm_async_pf_task_wait(u32 token);
  162. void kvm_async_pf_task_wake(u32 token);
  163. u32 kvm_read_and_reset_pf_reason(void);
  164. extern void kvm_disable_steal_time(void);
  165. #else
  166. #define kvm_guest_init() do { } while (0)
  167. #define kvm_async_pf_task_wait(T) do {} while(0)
  168. #define kvm_async_pf_task_wake(T) do {} while(0)
  169. static inline u32 kvm_read_and_reset_pf_reason(void)
  170. {
  171. return 0;
  172. }
  173. static inline void kvm_disable_steal_time(void)
  174. {
  175. return;
  176. }
  177. #endif
  178. #endif /* __KERNEL__ */
  179. #endif /* _ASM_X86_KVM_PARA_H */