kvm_cache_regs.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #ifndef ASM_KVM_CACHE_REGS_H
  2. #define ASM_KVM_CACHE_REGS_H
  3. #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
  4. #define KVM_POSSIBLE_CR4_GUEST_BITS \
  5. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  6. | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
  7. static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
  8. enum kvm_reg reg)
  9. {
  10. if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
  11. kvm_x86_ops->cache_reg(vcpu, reg);
  12. return vcpu->arch.regs[reg];
  13. }
  14. static inline void kvm_register_write(struct kvm_vcpu *vcpu,
  15. enum kvm_reg reg,
  16. unsigned long val)
  17. {
  18. vcpu->arch.regs[reg] = val;
  19. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  20. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  21. }
  22. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  23. {
  24. return kvm_register_read(vcpu, VCPU_REGS_RIP);
  25. }
  26. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  27. {
  28. kvm_register_write(vcpu, VCPU_REGS_RIP, val);
  29. }
  30. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  31. {
  32. might_sleep(); /* on svm */
  33. if (!test_bit(VCPU_EXREG_PDPTR,
  34. (unsigned long *)&vcpu->arch.regs_avail))
  35. kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
  36. return vcpu->arch.walk_mmu->pdptrs[index];
  37. }
  38. static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
  39. {
  40. load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
  41. return mmu->pdptrs[index];
  42. }
  43. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  44. {
  45. ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
  46. if (tmask & vcpu->arch.cr0_guest_owned_bits)
  47. kvm_x86_ops->decache_cr0_guest_bits(vcpu);
  48. return vcpu->arch.cr0 & mask;
  49. }
  50. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  51. {
  52. return kvm_read_cr0_bits(vcpu, ~0UL);
  53. }
  54. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  55. {
  56. ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
  57. if (tmask & vcpu->arch.cr4_guest_owned_bits)
  58. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  59. return vcpu->arch.cr4 & mask;
  60. }
  61. static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
  62. {
  63. if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
  64. kvm_x86_ops->decache_cr3(vcpu);
  65. return vcpu->arch.cr3;
  66. }
  67. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  68. {
  69. return kvm_read_cr4_bits(vcpu, ~0UL);
  70. }
  71. static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
  72. {
  73. return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
  74. | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
  75. }
  76. static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
  77. {
  78. vcpu->arch.hflags |= HF_GUEST_MASK;
  79. }
  80. static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
  81. {
  82. vcpu->arch.hflags &= ~HF_GUEST_MASK;
  83. }
  84. static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
  85. {
  86. return vcpu->arch.hflags & HF_GUEST_MASK;
  87. }
  88. #endif