cpuid.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. #ifndef ARCH_X86_KVM_CPUID_H
  2. #define ARCH_X86_KVM_CPUID_H
  3. #include "x86.h"
  4. #include <asm/cpu.h>
  5. int kvm_update_cpuid(struct kvm_vcpu *vcpu);
  6. bool kvm_mpx_supported(void);
  7. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  8. u32 function, u32 index);
  9. int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
  10. struct kvm_cpuid_entry2 __user *entries,
  11. unsigned int type);
  12. int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  13. struct kvm_cpuid *cpuid,
  14. struct kvm_cpuid_entry __user *entries);
  15. int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  16. struct kvm_cpuid2 *cpuid,
  17. struct kvm_cpuid_entry2 __user *entries);
  18. int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  19. struct kvm_cpuid2 *cpuid,
  20. struct kvm_cpuid_entry2 __user *entries);
  21. void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
  22. int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
  23. static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  24. {
  25. return vcpu->arch.maxphyaddr;
  26. }
  27. static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
  28. {
  29. struct kvm_cpuid_entry2 *best;
  30. if (!static_cpu_has(X86_FEATURE_XSAVE))
  31. return false;
  32. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  33. return best && (best->ecx & bit(X86_FEATURE_XSAVE));
  34. }
  35. static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
  36. {
  37. struct kvm_cpuid_entry2 *best;
  38. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  39. return best && (best->edx & bit(X86_FEATURE_MTRR));
  40. }
  41. static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
  42. {
  43. struct kvm_cpuid_entry2 *best;
  44. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  45. return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
  46. }
  47. static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
  48. {
  49. struct kvm_cpuid_entry2 *best;
  50. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  51. return best && (best->ebx & bit(X86_FEATURE_SMEP));
  52. }
  53. static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
  54. {
  55. struct kvm_cpuid_entry2 *best;
  56. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  57. return best && (best->ebx & bit(X86_FEATURE_SMAP));
  58. }
  59. static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
  60. {
  61. struct kvm_cpuid_entry2 *best;
  62. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  63. return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
  64. }
  65. static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
  66. {
  67. struct kvm_cpuid_entry2 *best;
  68. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  69. return best && (best->ecx & bit(X86_FEATURE_PKU));
  70. }
  71. static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
  72. {
  73. struct kvm_cpuid_entry2 *best;
  74. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  75. return best && (best->edx & bit(X86_FEATURE_LM));
  76. }
  77. static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
  78. {
  79. struct kvm_cpuid_entry2 *best;
  80. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  81. return best && (best->ecx & bit(X86_FEATURE_OSVW));
  82. }
  83. static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
  84. {
  85. struct kvm_cpuid_entry2 *best;
  86. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  87. return best && (best->ecx & bit(X86_FEATURE_PCID));
  88. }
  89. static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
  90. {
  91. struct kvm_cpuid_entry2 *best;
  92. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  93. return best && (best->ecx & bit(X86_FEATURE_X2APIC));
  94. }
  95. static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
  96. {
  97. struct kvm_cpuid_entry2 *best;
  98. best = kvm_find_cpuid_entry(vcpu, 0, 0);
  99. return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
  100. }
  101. static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
  102. {
  103. struct kvm_cpuid_entry2 *best;
  104. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  105. return best && (best->edx & bit(X86_FEATURE_GBPAGES));
  106. }
  107. static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
  108. {
  109. struct kvm_cpuid_entry2 *best;
  110. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  111. return best && (best->ebx & bit(X86_FEATURE_RTM));
  112. }
  113. static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
  114. {
  115. struct kvm_cpuid_entry2 *best;
  116. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  117. return best && (best->ebx & bit(X86_FEATURE_MPX));
  118. }
  119. static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
  120. {
  121. struct kvm_cpuid_entry2 *best;
  122. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  123. return best && (best->edx & bit(X86_FEATURE_RDTSCP));
  124. }
  125. static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
  126. {
  127. struct kvm_cpuid_entry2 *best;
  128. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  129. if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
  130. return true;
  131. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  132. return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
  133. }
  134. static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
  135. {
  136. struct kvm_cpuid_entry2 *best;
  137. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  138. if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
  139. return true;
  140. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  141. return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
  142. }
  143. static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
  144. {
  145. struct kvm_cpuid_entry2 *best;
  146. best = kvm_find_cpuid_entry(vcpu, 7, 0);
  147. return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
  148. }
  149. static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
  150. {
  151. struct kvm_cpuid_entry2 *best;
  152. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  153. return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
  154. }
  155. /*
  156. * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
  157. */
  158. #define BIT_NRIPS 3
  159. static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
  160. {
  161. struct kvm_cpuid_entry2 *best;
  162. best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
  163. /*
  164. * NRIPS is a scattered cpuid feature, so we can't use
  165. * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
  166. * position 8, not 3).
  167. */
  168. return best && (best->edx & bit(BIT_NRIPS));
  169. }
  170. #undef BIT_NRIPS
  171. static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
  172. {
  173. struct kvm_cpuid_entry2 *best;
  174. best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
  175. if (!best)
  176. return -1;
  177. return x86_family(best->eax);
  178. }
  179. static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
  180. {
  181. struct kvm_cpuid_entry2 *best;
  182. best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
  183. if (!best)
  184. return -1;
  185. return x86_model(best->eax);
  186. }
  187. static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
  188. {
  189. struct kvm_cpuid_entry2 *best;
  190. best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
  191. if (!best)
  192. return -1;
  193. return x86_stepping(best->eax);
  194. }
  195. #endif