page_track.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * Support KVM gust page tracking
  3. *
  4. * This feature allows us to track page access in guest. Currently, only
  5. * write access is tracked.
  6. *
  7. * Copyright(C) 2015 Intel Corporation.
  8. *
  9. * Author:
  10. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. */
  15. #include <linux/kvm_host.h>
  16. #include <asm/kvm_host.h>
  17. #include <asm/kvm_page_track.h>
  18. #include "mmu.h"
  19. void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
  20. struct kvm_memory_slot *dont)
  21. {
  22. int i;
  23. for (i = 0; i < KVM_PAGE_TRACK_MAX; i++)
  24. if (!dont || free->arch.gfn_track[i] !=
  25. dont->arch.gfn_track[i]) {
  26. kvfree(free->arch.gfn_track[i]);
  27. free->arch.gfn_track[i] = NULL;
  28. }
  29. }
  30. int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
  31. unsigned long npages)
  32. {
  33. int i;
  34. for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
  35. slot->arch.gfn_track[i] = kvm_kvzalloc(npages *
  36. sizeof(*slot->arch.gfn_track[i]));
  37. if (!slot->arch.gfn_track[i])
  38. goto track_free;
  39. }
  40. return 0;
  41. track_free:
  42. kvm_page_track_free_memslot(slot, NULL);
  43. return -ENOMEM;
  44. }
  45. static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
  46. {
  47. if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
  48. return false;
  49. return true;
  50. }
  51. static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
  52. enum kvm_page_track_mode mode, short count)
  53. {
  54. int index, val;
  55. index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
  56. val = slot->arch.gfn_track[mode][index];
  57. if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
  58. return;
  59. slot->arch.gfn_track[mode][index] += count;
  60. }
  61. /*
  62. * add guest page to the tracking pool so that corresponding access on that
  63. * page will be intercepted.
  64. *
  65. * It should be called under the protection both of mmu-lock and kvm->srcu
  66. * or kvm->slots_lock.
  67. *
  68. * @kvm: the guest instance we are interested in.
  69. * @slot: the @gfn belongs to.
  70. * @gfn: the guest page.
  71. * @mode: tracking mode, currently only write track is supported.
  72. */
  73. void kvm_slot_page_track_add_page(struct kvm *kvm,
  74. struct kvm_memory_slot *slot, gfn_t gfn,
  75. enum kvm_page_track_mode mode)
  76. {
  77. if (WARN_ON(!page_track_mode_is_valid(mode)))
  78. return;
  79. update_gfn_track(slot, gfn, mode, 1);
  80. /*
  81. * new track stops large page mapping for the
  82. * tracked page.
  83. */
  84. kvm_mmu_gfn_disallow_lpage(slot, gfn);
  85. if (mode == KVM_PAGE_TRACK_WRITE)
  86. if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
  87. kvm_flush_remote_tlbs(kvm);
  88. }
  89. /*
  90. * remove the guest page from the tracking pool which stops the interception
  91. * of corresponding access on that page. It is the opposed operation of
  92. * kvm_slot_page_track_add_page().
  93. *
  94. * It should be called under the protection both of mmu-lock and kvm->srcu
  95. * or kvm->slots_lock.
  96. *
  97. * @kvm: the guest instance we are interested in.
  98. * @slot: the @gfn belongs to.
  99. * @gfn: the guest page.
  100. * @mode: tracking mode, currently only write track is supported.
  101. */
  102. void kvm_slot_page_track_remove_page(struct kvm *kvm,
  103. struct kvm_memory_slot *slot, gfn_t gfn,
  104. enum kvm_page_track_mode mode)
  105. {
  106. if (WARN_ON(!page_track_mode_is_valid(mode)))
  107. return;
  108. update_gfn_track(slot, gfn, mode, -1);
  109. /*
  110. * allow large page mapping for the tracked page
  111. * after the tracker is gone.
  112. */
  113. kvm_mmu_gfn_allow_lpage(slot, gfn);
  114. }
  115. /*
  116. * check if the corresponding access on the specified guest page is tracked.
  117. */
  118. bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
  119. enum kvm_page_track_mode mode)
  120. {
  121. struct kvm_memory_slot *slot;
  122. int index;
  123. if (WARN_ON(!page_track_mode_is_valid(mode)))
  124. return false;
  125. slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  126. if (!slot)
  127. return false;
  128. index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
  129. return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
  130. }
  131. void kvm_page_track_cleanup(struct kvm *kvm)
  132. {
  133. struct kvm_page_track_notifier_head *head;
  134. head = &kvm->arch.track_notifier_head;
  135. cleanup_srcu_struct(&head->track_srcu);
  136. }
  137. void kvm_page_track_init(struct kvm *kvm)
  138. {
  139. struct kvm_page_track_notifier_head *head;
  140. head = &kvm->arch.track_notifier_head;
  141. init_srcu_struct(&head->track_srcu);
  142. INIT_HLIST_HEAD(&head->track_notifier_list);
  143. }
  144. /*
  145. * register the notifier so that event interception for the tracked guest
  146. * pages can be received.
  147. */
  148. void
  149. kvm_page_track_register_notifier(struct kvm *kvm,
  150. struct kvm_page_track_notifier_node *n)
  151. {
  152. struct kvm_page_track_notifier_head *head;
  153. head = &kvm->arch.track_notifier_head;
  154. spin_lock(&kvm->mmu_lock);
  155. hlist_add_head_rcu(&n->node, &head->track_notifier_list);
  156. spin_unlock(&kvm->mmu_lock);
  157. }
  158. /*
  159. * stop receiving the event interception. It is the opposed operation of
  160. * kvm_page_track_register_notifier().
  161. */
  162. void
  163. kvm_page_track_unregister_notifier(struct kvm *kvm,
  164. struct kvm_page_track_notifier_node *n)
  165. {
  166. struct kvm_page_track_notifier_head *head;
  167. head = &kvm->arch.track_notifier_head;
  168. spin_lock(&kvm->mmu_lock);
  169. hlist_del_rcu(&n->node);
  170. spin_unlock(&kvm->mmu_lock);
  171. synchronize_srcu(&head->track_srcu);
  172. }
  173. /*
  174. * Notify the node that write access is intercepted and write emulation is
  175. * finished at this time.
  176. *
  177. * The node should figure out if the written page is the one that node is
  178. * interested in by itself.
  179. */
  180. void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
  181. int bytes)
  182. {
  183. struct kvm_page_track_notifier_head *head;
  184. struct kvm_page_track_notifier_node *n;
  185. int idx;
  186. head = &vcpu->kvm->arch.track_notifier_head;
  187. if (hlist_empty(&head->track_notifier_list))
  188. return;
  189. idx = srcu_read_lock(&head->track_srcu);
  190. hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
  191. if (n->track_write)
  192. n->track_write(vcpu, gpa, new, bytes);
  193. srcu_read_unlock(&head->track_srcu, idx);
  194. }