paging_tmpl.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11. *
  12. * Authors:
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Avi Kivity <avi@qumranet.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. /*
  21. * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  22. * so the code in this file is compiled twice, once per pte size.
  23. */
  24. #if PTTYPE == 64
  25. #define pt_element_t u64
  26. #define guest_walker guest_walker64
  27. #define FNAME(name) paging##64_##name
  28. #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  29. #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  30. #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  31. #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  32. #define PT_LEVEL_BITS PT64_LEVEL_BITS
  33. #ifdef CONFIG_X86_64
  34. #define PT_MAX_FULL_LEVELS 4
  35. #define CMPXCHG cmpxchg
  36. #else
  37. #define CMPXCHG cmpxchg64
  38. #define PT_MAX_FULL_LEVELS 2
  39. #endif
  40. #elif PTTYPE == 32
  41. #define pt_element_t u32
  42. #define guest_walker guest_walker32
  43. #define FNAME(name) paging##32_##name
  44. #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  45. #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  46. #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  47. #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  48. #define PT_LEVEL_BITS PT32_LEVEL_BITS
  49. #define PT_MAX_FULL_LEVELS 2
  50. #define CMPXCHG cmpxchg
  51. #else
  52. #error Invalid PTTYPE value
  53. #endif
  54. #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  55. #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  56. /*
  57. * The guest_walker structure emulates the behavior of the hardware page
  58. * table walker.
  59. */
  60. struct guest_walker {
  61. int level;
  62. gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  63. pt_element_t ptes[PT_MAX_FULL_LEVELS];
  64. pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  65. gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  66. unsigned pt_access;
  67. unsigned pte_access;
  68. gfn_t gfn;
  69. struct x86_exception fault;
  70. };
  71. static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
  72. {
  73. return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
  74. }
  75. static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  76. pt_element_t __user *ptep_user, unsigned index,
  77. pt_element_t orig_pte, pt_element_t new_pte)
  78. {
  79. int npages;
  80. pt_element_t ret;
  81. pt_element_t *table;
  82. struct page *page;
  83. npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
  84. /* Check if the user is doing something meaningless. */
  85. if (unlikely(npages != 1))
  86. return -EFAULT;
  87. table = kmap_atomic(page);
  88. ret = CMPXCHG(&table[index], orig_pte, new_pte);
  89. kunmap_atomic(table);
  90. kvm_release_page_dirty(page);
  91. return (ret != orig_pte);
  92. }
  93. static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte,
  94. bool last)
  95. {
  96. unsigned access;
  97. access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
  98. if (last && !is_dirty_gpte(gpte))
  99. access &= ~ACC_WRITE_MASK;
  100. #if PTTYPE == 64
  101. if (vcpu->arch.mmu.nx)
  102. access &= ~(gpte >> PT64_NX_SHIFT);
  103. #endif
  104. return access;
  105. }
  106. static bool FNAME(is_last_gpte)(struct guest_walker *walker,
  107. struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  108. pt_element_t gpte)
  109. {
  110. if (walker->level == PT_PAGE_TABLE_LEVEL)
  111. return true;
  112. if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
  113. (PTTYPE == 64 || is_pse(vcpu)))
  114. return true;
  115. if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
  116. (mmu->root_level == PT64_ROOT_LEVEL))
  117. return true;
  118. return false;
  119. }
  120. /*
  121. * Fetch a guest pte for a guest virtual address
  122. */
  123. static int FNAME(walk_addr_generic)(struct guest_walker *walker,
  124. struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  125. gva_t addr, u32 access)
  126. {
  127. pt_element_t pte;
  128. pt_element_t __user *uninitialized_var(ptep_user);
  129. gfn_t table_gfn;
  130. unsigned index, pt_access, uninitialized_var(pte_access);
  131. gpa_t pte_gpa;
  132. bool eperm, last_gpte;
  133. int offset;
  134. const int write_fault = access & PFERR_WRITE_MASK;
  135. const int user_fault = access & PFERR_USER_MASK;
  136. const int fetch_fault = access & PFERR_FETCH_MASK;
  137. u16 errcode = 0;
  138. trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
  139. fetch_fault);
  140. retry_walk:
  141. eperm = false;
  142. walker->level = mmu->root_level;
  143. pte = mmu->get_cr3(vcpu);
  144. #if PTTYPE == 64
  145. if (walker->level == PT32E_ROOT_LEVEL) {
  146. pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
  147. trace_kvm_mmu_paging_element(pte, walker->level);
  148. if (!is_present_gpte(pte))
  149. goto error;
  150. --walker->level;
  151. }
  152. #endif
  153. ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
  154. (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
  155. pt_access = ACC_ALL;
  156. for (;;) {
  157. gfn_t real_gfn;
  158. unsigned long host_addr;
  159. index = PT_INDEX(addr, walker->level);
  160. table_gfn = gpte_to_gfn(pte);
  161. offset = index * sizeof(pt_element_t);
  162. pte_gpa = gfn_to_gpa(table_gfn) + offset;
  163. walker->table_gfn[walker->level - 1] = table_gfn;
  164. walker->pte_gpa[walker->level - 1] = pte_gpa;
  165. real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
  166. PFERR_USER_MASK|PFERR_WRITE_MASK);
  167. if (unlikely(real_gfn == UNMAPPED_GVA))
  168. goto error;
  169. real_gfn = gpa_to_gfn(real_gfn);
  170. host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
  171. if (unlikely(kvm_is_error_hva(host_addr)))
  172. goto error;
  173. ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
  174. if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
  175. goto error;
  176. trace_kvm_mmu_paging_element(pte, walker->level);
  177. if (unlikely(!is_present_gpte(pte)))
  178. goto error;
  179. if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
  180. walker->level))) {
  181. errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
  182. goto error;
  183. }
  184. if (!check_write_user_access(vcpu, write_fault, user_fault,
  185. pte))
  186. eperm = true;
  187. #if PTTYPE == 64
  188. if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
  189. eperm = true;
  190. #endif
  191. last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
  192. if (last_gpte) {
  193. pte_access = pt_access &
  194. FNAME(gpte_access)(vcpu, pte, true);
  195. /* check if the kernel is fetching from user page */
  196. if (unlikely(pte_access & PT_USER_MASK) &&
  197. kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
  198. if (fetch_fault && !user_fault)
  199. eperm = true;
  200. }
  201. if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
  202. int ret;
  203. trace_kvm_mmu_set_accessed_bit(table_gfn, index,
  204. sizeof(pte));
  205. ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
  206. pte, pte|PT_ACCESSED_MASK);
  207. if (unlikely(ret < 0))
  208. goto error;
  209. else if (ret)
  210. goto retry_walk;
  211. mark_page_dirty(vcpu->kvm, table_gfn);
  212. pte |= PT_ACCESSED_MASK;
  213. }
  214. walker->ptes[walker->level - 1] = pte;
  215. if (last_gpte) {
  216. int lvl = walker->level;
  217. gpa_t real_gpa;
  218. gfn_t gfn;
  219. u32 ac;
  220. gfn = gpte_to_gfn_lvl(pte, lvl);
  221. gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
  222. if (PTTYPE == 32 &&
  223. walker->level == PT_DIRECTORY_LEVEL &&
  224. is_cpuid_PSE36())
  225. gfn += pse36_gfn_delta(pte);
  226. ac = write_fault | fetch_fault | user_fault;
  227. real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
  228. ac);
  229. if (real_gpa == UNMAPPED_GVA)
  230. return 0;
  231. walker->gfn = real_gpa >> PAGE_SHIFT;
  232. break;
  233. }
  234. pt_access &= FNAME(gpte_access)(vcpu, pte, false);
  235. --walker->level;
  236. }
  237. if (unlikely(eperm)) {
  238. errcode |= PFERR_PRESENT_MASK;
  239. goto error;
  240. }
  241. if (write_fault && unlikely(!is_dirty_gpte(pte))) {
  242. int ret;
  243. trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
  244. ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
  245. pte, pte|PT_DIRTY_MASK);
  246. if (unlikely(ret < 0))
  247. goto error;
  248. else if (ret)
  249. goto retry_walk;
  250. mark_page_dirty(vcpu->kvm, table_gfn);
  251. pte |= PT_DIRTY_MASK;
  252. walker->ptes[walker->level - 1] = pte;
  253. }
  254. walker->pt_access = pt_access;
  255. walker->pte_access = pte_access;
  256. pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
  257. __func__, (u64)pte, pte_access, pt_access);
  258. return 1;
  259. error:
  260. errcode |= write_fault | user_fault;
  261. if (fetch_fault && (mmu->nx ||
  262. kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
  263. errcode |= PFERR_FETCH_MASK;
  264. walker->fault.vector = PF_VECTOR;
  265. walker->fault.error_code_valid = true;
  266. walker->fault.error_code = errcode;
  267. walker->fault.address = addr;
  268. walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
  269. trace_kvm_mmu_walker_error(walker->fault.error_code);
  270. return 0;
  271. }
  272. static int FNAME(walk_addr)(struct guest_walker *walker,
  273. struct kvm_vcpu *vcpu, gva_t addr, u32 access)
  274. {
  275. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
  276. access);
  277. }
  278. static int FNAME(walk_addr_nested)(struct guest_walker *walker,
  279. struct kvm_vcpu *vcpu, gva_t addr,
  280. u32 access)
  281. {
  282. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
  283. addr, access);
  284. }
  285. static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
  286. struct kvm_mmu_page *sp, u64 *spte,
  287. pt_element_t gpte)
  288. {
  289. if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
  290. goto no_present;
  291. if (!is_present_gpte(gpte))
  292. goto no_present;
  293. if (!(gpte & PT_ACCESSED_MASK))
  294. goto no_present;
  295. return false;
  296. no_present:
  297. drop_spte(vcpu->kvm, spte);
  298. return true;
  299. }
  300. static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  301. u64 *spte, const void *pte)
  302. {
  303. pt_element_t gpte;
  304. unsigned pte_access;
  305. pfn_t pfn;
  306. gpte = *(const pt_element_t *)pte;
  307. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
  308. return;
  309. pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
  310. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
  311. pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
  312. if (mmu_invalid_pfn(pfn)) {
  313. kvm_release_pfn_clean(pfn);
  314. return;
  315. }
  316. /*
  317. * we call mmu_set_spte() with host_writable = true because that
  318. * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
  319. */
  320. mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
  321. NULL, PT_PAGE_TABLE_LEVEL,
  322. gpte_to_gfn(gpte), pfn, true, true);
  323. }
  324. static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
  325. struct guest_walker *gw, int level)
  326. {
  327. pt_element_t curr_pte;
  328. gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
  329. u64 mask;
  330. int r, index;
  331. if (level == PT_PAGE_TABLE_LEVEL) {
  332. mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
  333. base_gpa = pte_gpa & ~mask;
  334. index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
  335. r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
  336. gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
  337. curr_pte = gw->prefetch_ptes[index];
  338. } else
  339. r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
  340. &curr_pte, sizeof(curr_pte));
  341. return r || curr_pte != gw->ptes[level - 1];
  342. }
  343. static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  344. u64 *sptep)
  345. {
  346. struct kvm_mmu_page *sp;
  347. pt_element_t *gptep = gw->prefetch_ptes;
  348. u64 *spte;
  349. int i;
  350. sp = page_header(__pa(sptep));
  351. if (sp->role.level > PT_PAGE_TABLE_LEVEL)
  352. return;
  353. if (sp->role.direct)
  354. return __direct_pte_prefetch(vcpu, sp, sptep);
  355. i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
  356. spte = sp->spt + i;
  357. for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
  358. pt_element_t gpte;
  359. unsigned pte_access;
  360. gfn_t gfn;
  361. pfn_t pfn;
  362. if (spte == sptep)
  363. continue;
  364. if (is_shadow_present_pte(*spte))
  365. continue;
  366. gpte = gptep[i];
  367. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
  368. continue;
  369. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
  370. true);
  371. gfn = gpte_to_gfn(gpte);
  372. pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
  373. pte_access & ACC_WRITE_MASK);
  374. if (mmu_invalid_pfn(pfn)) {
  375. kvm_release_pfn_clean(pfn);
  376. break;
  377. }
  378. mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
  379. NULL, PT_PAGE_TABLE_LEVEL, gfn,
  380. pfn, true, true);
  381. }
  382. }
  383. /*
  384. * Fetch a shadow pte for a specific level in the paging hierarchy.
  385. */
  386. static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
  387. struct guest_walker *gw,
  388. int user_fault, int write_fault, int hlevel,
  389. int *emulate, pfn_t pfn, bool map_writable,
  390. bool prefault)
  391. {
  392. unsigned access = gw->pt_access;
  393. struct kvm_mmu_page *sp = NULL;
  394. int top_level;
  395. unsigned direct_access;
  396. struct kvm_shadow_walk_iterator it;
  397. if (!is_present_gpte(gw->ptes[gw->level - 1]))
  398. return NULL;
  399. direct_access = gw->pte_access;
  400. top_level = vcpu->arch.mmu.root_level;
  401. if (top_level == PT32E_ROOT_LEVEL)
  402. top_level = PT32_ROOT_LEVEL;
  403. /*
  404. * Verify that the top-level gpte is still there. Since the page
  405. * is a root page, it is either write protected (and cannot be
  406. * changed from now on) or it is invalid (in which case, we don't
  407. * really care if it changes underneath us after this point).
  408. */
  409. if (FNAME(gpte_changed)(vcpu, gw, top_level))
  410. goto out_gpte_changed;
  411. for (shadow_walk_init(&it, vcpu, addr);
  412. shadow_walk_okay(&it) && it.level > gw->level;
  413. shadow_walk_next(&it)) {
  414. gfn_t table_gfn;
  415. clear_sp_write_flooding_count(it.sptep);
  416. drop_large_spte(vcpu, it.sptep);
  417. sp = NULL;
  418. if (!is_shadow_present_pte(*it.sptep)) {
  419. table_gfn = gw->table_gfn[it.level - 2];
  420. sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
  421. false, access, it.sptep);
  422. }
  423. /*
  424. * Verify that the gpte in the page we've just write
  425. * protected is still there.
  426. */
  427. if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
  428. goto out_gpte_changed;
  429. if (sp)
  430. link_shadow_page(it.sptep, sp);
  431. }
  432. for (;
  433. shadow_walk_okay(&it) && it.level > hlevel;
  434. shadow_walk_next(&it)) {
  435. gfn_t direct_gfn;
  436. clear_sp_write_flooding_count(it.sptep);
  437. validate_direct_spte(vcpu, it.sptep, direct_access);
  438. drop_large_spte(vcpu, it.sptep);
  439. if (is_shadow_present_pte(*it.sptep))
  440. continue;
  441. direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
  442. sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
  443. true, direct_access, it.sptep);
  444. link_shadow_page(it.sptep, sp);
  445. }
  446. clear_sp_write_flooding_count(it.sptep);
  447. mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
  448. user_fault, write_fault, emulate, it.level,
  449. gw->gfn, pfn, prefault, map_writable);
  450. FNAME(pte_prefetch)(vcpu, gw, it.sptep);
  451. return it.sptep;
  452. out_gpte_changed:
  453. if (sp)
  454. kvm_mmu_put_page(sp, it.sptep);
  455. kvm_release_pfn_clean(pfn);
  456. return NULL;
  457. }
  458. /*
  459. * Page fault handler. There are several causes for a page fault:
  460. * - there is no shadow pte for the guest pte
  461. * - write access through a shadow pte marked read only so that we can set
  462. * the dirty bit
  463. * - write access to a shadow pte marked read only so we can update the page
  464. * dirty bitmap, when userspace requests it
  465. * - mmio access; in this case we will never install a present shadow pte
  466. * - normal guest page fault due to the guest pte marked not present, not
  467. * writable, or not executable
  468. *
  469. * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  470. * a negative value on error.
  471. */
  472. static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
  473. bool prefault)
  474. {
  475. int write_fault = error_code & PFERR_WRITE_MASK;
  476. int user_fault = error_code & PFERR_USER_MASK;
  477. struct guest_walker walker;
  478. u64 *sptep;
  479. int emulate = 0;
  480. int r;
  481. pfn_t pfn;
  482. int level = PT_PAGE_TABLE_LEVEL;
  483. int force_pt_level;
  484. unsigned long mmu_seq;
  485. bool map_writable;
  486. pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
  487. if (unlikely(error_code & PFERR_RSVD_MASK))
  488. return handle_mmio_page_fault(vcpu, addr, error_code,
  489. mmu_is_nested(vcpu));
  490. r = mmu_topup_memory_caches(vcpu);
  491. if (r)
  492. return r;
  493. /*
  494. * Look up the guest pte for the faulting address.
  495. */
  496. r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
  497. /*
  498. * The page is not mapped by the guest. Let the guest handle it.
  499. */
  500. if (!r) {
  501. pgprintk("%s: guest page fault\n", __func__);
  502. if (!prefault)
  503. inject_page_fault(vcpu, &walker.fault);
  504. return 0;
  505. }
  506. if (walker.level >= PT_DIRECTORY_LEVEL)
  507. force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
  508. else
  509. force_pt_level = 1;
  510. if (!force_pt_level) {
  511. level = min(walker.level, mapping_level(vcpu, walker.gfn));
  512. walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
  513. }
  514. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  515. smp_rmb();
  516. if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
  517. &map_writable))
  518. return 0;
  519. if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
  520. walker.gfn, pfn, walker.pte_access, &r))
  521. return r;
  522. spin_lock(&vcpu->kvm->mmu_lock);
  523. if (mmu_notifier_retry(vcpu, mmu_seq))
  524. goto out_unlock;
  525. kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
  526. kvm_mmu_free_some_pages(vcpu);
  527. if (!force_pt_level)
  528. transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
  529. sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
  530. level, &emulate, pfn, map_writable, prefault);
  531. (void)sptep;
  532. pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
  533. sptep, *sptep, emulate);
  534. ++vcpu->stat.pf_fixed;
  535. kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
  536. spin_unlock(&vcpu->kvm->mmu_lock);
  537. return emulate;
  538. out_unlock:
  539. spin_unlock(&vcpu->kvm->mmu_lock);
  540. kvm_release_pfn_clean(pfn);
  541. return 0;
  542. }
  543. static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
  544. {
  545. int offset = 0;
  546. WARN_ON(sp->role.level != 1);
  547. if (PTTYPE == 32)
  548. offset = sp->role.quadrant << PT64_LEVEL_BITS;
  549. return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
  550. }
  551. static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
  552. {
  553. struct kvm_shadow_walk_iterator iterator;
  554. struct kvm_mmu_page *sp;
  555. int level;
  556. u64 *sptep;
  557. vcpu_clear_mmio_info(vcpu, gva);
  558. /*
  559. * No need to check return value here, rmap_can_add() can
  560. * help us to skip pte prefetch later.
  561. */
  562. mmu_topup_memory_caches(vcpu);
  563. spin_lock(&vcpu->kvm->mmu_lock);
  564. for_each_shadow_entry(vcpu, gva, iterator) {
  565. level = iterator.level;
  566. sptep = iterator.sptep;
  567. sp = page_header(__pa(sptep));
  568. if (is_last_spte(*sptep, level)) {
  569. pt_element_t gpte;
  570. gpa_t pte_gpa;
  571. if (!sp->unsync)
  572. break;
  573. pte_gpa = FNAME(get_level1_sp_gpa)(sp);
  574. pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
  575. if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
  576. kvm_flush_remote_tlbs(vcpu->kvm);
  577. if (!rmap_can_add(vcpu))
  578. break;
  579. if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
  580. sizeof(pt_element_t)))
  581. break;
  582. FNAME(update_pte)(vcpu, sp, sptep, &gpte);
  583. }
  584. if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
  585. break;
  586. }
  587. spin_unlock(&vcpu->kvm->mmu_lock);
  588. }
  589. static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
  590. struct x86_exception *exception)
  591. {
  592. struct guest_walker walker;
  593. gpa_t gpa = UNMAPPED_GVA;
  594. int r;
  595. r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
  596. if (r) {
  597. gpa = gfn_to_gpa(walker.gfn);
  598. gpa |= vaddr & ~PAGE_MASK;
  599. } else if (exception)
  600. *exception = walker.fault;
  601. return gpa;
  602. }
  603. static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  604. u32 access,
  605. struct x86_exception *exception)
  606. {
  607. struct guest_walker walker;
  608. gpa_t gpa = UNMAPPED_GVA;
  609. int r;
  610. r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
  611. if (r) {
  612. gpa = gfn_to_gpa(walker.gfn);
  613. gpa |= vaddr & ~PAGE_MASK;
  614. } else if (exception)
  615. *exception = walker.fault;
  616. return gpa;
  617. }
  618. /*
  619. * Using the cached information from sp->gfns is safe because:
  620. * - The spte has a reference to the struct page, so the pfn for a given gfn
  621. * can't change unless all sptes pointing to it are nuked first.
  622. *
  623. * Note:
  624. * We should flush all tlbs if spte is dropped even though guest is
  625. * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
  626. * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
  627. * used by guest then tlbs are not flushed, so guest is allowed to access the
  628. * freed pages.
  629. * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  630. */
  631. static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  632. {
  633. int i, nr_present = 0;
  634. bool host_writable;
  635. gpa_t first_pte_gpa;
  636. /* direct kvm_mmu_page can not be unsync. */
  637. BUG_ON(sp->role.direct);
  638. first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
  639. for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
  640. unsigned pte_access;
  641. pt_element_t gpte;
  642. gpa_t pte_gpa;
  643. gfn_t gfn;
  644. if (!sp->spt[i])
  645. continue;
  646. pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
  647. if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
  648. sizeof(pt_element_t)))
  649. return -EINVAL;
  650. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
  651. vcpu->kvm->tlbs_dirty++;
  652. continue;
  653. }
  654. gfn = gpte_to_gfn(gpte);
  655. pte_access = sp->role.access;
  656. pte_access &= FNAME(gpte_access)(vcpu, gpte, true);
  657. if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
  658. continue;
  659. if (gfn != sp->gfns[i]) {
  660. drop_spte(vcpu->kvm, &sp->spt[i]);
  661. vcpu->kvm->tlbs_dirty++;
  662. continue;
  663. }
  664. nr_present++;
  665. host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
  666. set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
  667. PT_PAGE_TABLE_LEVEL, gfn,
  668. spte_to_pfn(sp->spt[i]), true, false,
  669. host_writable);
  670. }
  671. return !nr_present;
  672. }
  673. #undef pt_element_t
  674. #undef guest_walker
  675. #undef FNAME
  676. #undef PT_BASE_ADDR_MASK
  677. #undef PT_INDEX
  678. #undef PT_LVL_ADDR_MASK
  679. #undef PT_LVL_OFFSET_MASK
  680. #undef PT_LEVEL_BITS
  681. #undef PT_MAX_FULL_LEVELS
  682. #undef gpte_to_gfn
  683. #undef gpte_to_gfn_lvl
  684. #undef CMPXCHG