mmu_audit.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /*
  2. * mmu_audit.c:
  3. *
  4. * Audit code for KVM MMU
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  8. *
  9. * Authors:
  10. * Yaniv Kamay <yaniv@qumranet.com>
  11. * Avi Kivity <avi@qumranet.com>
  12. * Marcelo Tosatti <mtosatti@redhat.com>
  13. * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include <linux/ratelimit.h>
  20. #define audit_printk(kvm, fmt, args...) \
  21. printk(KERN_ERR "audit: (%s) error: " \
  22. fmt, audit_point_name[kvm->arch.audit_point], ##args)
  23. typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
  24. static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  25. inspect_spte_fn fn, int level)
  26. {
  27. int i;
  28. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  29. u64 *ent = sp->spt;
  30. fn(vcpu, ent + i, level);
  31. if (is_shadow_present_pte(ent[i]) &&
  32. !is_last_spte(ent[i], level)) {
  33. struct kvm_mmu_page *child;
  34. child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
  35. __mmu_spte_walk(vcpu, child, fn, level - 1);
  36. }
  37. }
  38. }
  39. static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
  40. {
  41. int i;
  42. struct kvm_mmu_page *sp;
  43. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  44. return;
  45. if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
  46. hpa_t root = vcpu->arch.mmu.root_hpa;
  47. sp = page_header(root);
  48. __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
  49. return;
  50. }
  51. for (i = 0; i < 4; ++i) {
  52. hpa_t root = vcpu->arch.mmu.pae_root[i];
  53. if (root && VALID_PAGE(root)) {
  54. root &= PT64_BASE_ADDR_MASK;
  55. sp = page_header(root);
  56. __mmu_spte_walk(vcpu, sp, fn, 2);
  57. }
  58. }
  59. return;
  60. }
  61. typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
  62. static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
  63. {
  64. struct kvm_mmu_page *sp;
  65. list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
  66. fn(kvm, sp);
  67. }
  68. static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
  69. {
  70. struct kvm_mmu_page *sp;
  71. gfn_t gfn;
  72. pfn_t pfn;
  73. hpa_t hpa;
  74. sp = page_header(__pa(sptep));
  75. if (sp->unsync) {
  76. if (level != PT_PAGE_TABLE_LEVEL) {
  77. audit_printk(vcpu->kvm, "unsync sp: %p "
  78. "level = %d\n", sp, level);
  79. return;
  80. }
  81. if (*sptep == shadow_notrap_nonpresent_pte) {
  82. audit_printk(vcpu->kvm, "notrap spte in unsync "
  83. "sp: %p\n", sp);
  84. return;
  85. }
  86. }
  87. if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
  88. audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
  89. sp);
  90. return;
  91. }
  92. if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
  93. return;
  94. gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
  95. pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
  96. if (is_error_pfn(pfn)) {
  97. kvm_release_pfn_clean(pfn);
  98. return;
  99. }
  100. hpa = pfn << PAGE_SHIFT;
  101. if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
  102. audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
  103. "ent %llxn", vcpu->arch.mmu.root_level, pfn,
  104. hpa, *sptep);
  105. }
  106. static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
  107. {
  108. unsigned long *rmapp;
  109. struct kvm_mmu_page *rev_sp;
  110. gfn_t gfn;
  111. rev_sp = page_header(__pa(sptep));
  112. gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
  113. if (!gfn_to_memslot(kvm, gfn)) {
  114. if (!printk_ratelimit())
  115. return;
  116. audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
  117. audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
  118. (long int)(sptep - rev_sp->spt), rev_sp->gfn);
  119. dump_stack();
  120. return;
  121. }
  122. rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
  123. if (!*rmapp) {
  124. if (!printk_ratelimit())
  125. return;
  126. audit_printk(kvm, "no rmap for writable spte %llx\n",
  127. *sptep);
  128. dump_stack();
  129. }
  130. }
  131. static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
  132. {
  133. if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
  134. inspect_spte_has_rmap(vcpu->kvm, sptep);
  135. }
  136. static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
  137. {
  138. struct kvm_mmu_page *sp = page_header(__pa(sptep));
  139. if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
  140. audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
  141. "root.\n", sp);
  142. }
  143. static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
  144. {
  145. int i;
  146. if (sp->role.level != PT_PAGE_TABLE_LEVEL)
  147. return;
  148. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  149. if (!is_rmap_spte(sp->spt[i]))
  150. continue;
  151. inspect_spte_has_rmap(kvm, sp->spt + i);
  152. }
  153. }
  154. static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
  155. {
  156. struct kvm_memory_slot *slot;
  157. unsigned long *rmapp;
  158. u64 *spte;
  159. if (sp->role.direct || sp->unsync || sp->role.invalid)
  160. return;
  161. slot = gfn_to_memslot(kvm, sp->gfn);
  162. rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
  163. spte = rmap_next(kvm, rmapp, NULL);
  164. while (spte) {
  165. if (is_writable_pte(*spte))
  166. audit_printk(kvm, "shadow page has writable "
  167. "mappings: gfn %llx role %x\n",
  168. sp->gfn, sp->role.word);
  169. spte = rmap_next(kvm, rmapp, spte);
  170. }
  171. }
  172. static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
  173. {
  174. check_mappings_rmap(kvm, sp);
  175. audit_write_protection(kvm, sp);
  176. }
  177. static void audit_all_active_sps(struct kvm *kvm)
  178. {
  179. walk_all_active_sps(kvm, audit_sp);
  180. }
  181. static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
  182. {
  183. audit_sptes_have_rmaps(vcpu, sptep, level);
  184. audit_mappings(vcpu, sptep, level);
  185. audit_spte_after_sync(vcpu, sptep, level);
  186. }
  187. static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
  188. {
  189. mmu_spte_walk(vcpu, audit_spte);
  190. }
  191. static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
  192. {
  193. static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
  194. if (!__ratelimit(&ratelimit_state))
  195. return;
  196. vcpu->kvm->arch.audit_point = point;
  197. audit_all_active_sps(vcpu->kvm);
  198. audit_vcpu_spte(vcpu);
  199. }
  200. static bool mmu_audit;
  201. static void mmu_audit_enable(void)
  202. {
  203. int ret;
  204. if (mmu_audit)
  205. return;
  206. ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
  207. WARN_ON(ret);
  208. mmu_audit = true;
  209. }
  210. static void mmu_audit_disable(void)
  211. {
  212. if (!mmu_audit)
  213. return;
  214. unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
  215. tracepoint_synchronize_unregister();
  216. mmu_audit = false;
  217. }
  218. static int mmu_audit_set(const char *val, const struct kernel_param *kp)
  219. {
  220. int ret;
  221. unsigned long enable;
  222. ret = strict_strtoul(val, 10, &enable);
  223. if (ret < 0)
  224. return -EINVAL;
  225. switch (enable) {
  226. case 0:
  227. mmu_audit_disable();
  228. break;
  229. case 1:
  230. mmu_audit_enable();
  231. break;
  232. default:
  233. return -EINVAL;
  234. }
  235. return 0;
  236. }
  237. static struct kernel_param_ops audit_param_ops = {
  238. .set = mmu_audit_set,
  239. .get = param_get_bool,
  240. };
  241. module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);