page_tables.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. /*P:700
  2. * The pagetable code, on the other hand, still shows the scars of
  3. * previous encounters. It's functional, and as neat as it can be in the
  4. * circumstances, but be wary, for these things are subtle and break easily.
  5. * The Guest provides a virtual to physical mapping, but we can neither trust
  6. * it nor use it: we verify and convert it here then point the CPU to the
  7. * converted Guest pages when running the Guest.
  8. :*/
  9. /* Copyright (C) Rusty Russell IBM Corporation 2006.
  10. * GPL v2 and any later version */
  11. #include <linux/mm.h>
  12. #include <linux/gfp.h>
  13. #include <linux/types.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/random.h>
  16. #include <linux/percpu.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/uaccess.h>
  19. #include "lg.h"
  20. /*M:008
  21. * We hold reference to pages, which prevents them from being swapped.
  22. * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
  23. * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
  24. * could probably consider launching Guests as non-root.
  25. :*/
  26. /*H:300
  27. * The Page Table Code
  28. *
  29. * We use two-level page tables for the Guest, or three-level with PAE. If
  30. * you're not entirely comfortable with virtual addresses, physical addresses
  31. * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
  32. * Table Handling" (with diagrams!).
  33. *
  34. * The Guest keeps page tables, but we maintain the actual ones here: these are
  35. * called "shadow" page tables. Which is a very Guest-centric name: these are
  36. * the real page tables the CPU uses, although we keep them up to date to
  37. * reflect the Guest's. (See what I mean about weird naming? Since when do
  38. * shadows reflect anything?)
  39. *
  40. * Anyway, this is the most complicated part of the Host code. There are seven
  41. * parts to this:
  42. * (i) Looking up a page table entry when the Guest faults,
  43. * (ii) Making sure the Guest stack is mapped,
  44. * (iii) Setting up a page table entry when the Guest tells us one has changed,
  45. * (iv) Switching page tables,
  46. * (v) Flushing (throwing away) page tables,
  47. * (vi) Mapping the Switcher when the Guest is about to run,
  48. * (vii) Setting up the page tables initially.
  49. :*/
  50. /*
  51. * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
  52. * or 512 PTE entries with PAE (2MB).
  53. */
  54. #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
  55. /*
  56. * For PAE we need the PMD index as well. We use the last 2MB, so we
  57. * will need the last pmd entry of the last pmd page.
  58. */
  59. #ifdef CONFIG_X86_PAE
  60. #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
  61. #define RESERVE_MEM 2U
  62. #define CHECK_GPGD_MASK _PAGE_PRESENT
  63. #else
  64. #define RESERVE_MEM 4U
  65. #define CHECK_GPGD_MASK _PAGE_TABLE
  66. #endif
  67. /*
  68. * We actually need a separate PTE page for each CPU. Remember that after the
  69. * Switcher code itself comes two pages for each CPU, and we don't want this
  70. * CPU's guest to see the pages of any other CPU.
  71. */
  72. static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
  73. #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
  74. /*H:320
  75. * The page table code is curly enough to need helper functions to keep it
  76. * clear and clean. The kernel itself provides many of them; one advantage
  77. * of insisting that the Guest and Host use the same CONFIG_PAE setting.
  78. *
  79. * There are two functions which return pointers to the shadow (aka "real")
  80. * page tables.
  81. *
  82. * spgd_addr() takes the virtual address and returns a pointer to the top-level
  83. * page directory entry (PGD) for that address. Since we keep track of several
  84. * page tables, the "i" argument tells us which one we're interested in (it's
  85. * usually the current one).
  86. */
  87. static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
  88. {
  89. unsigned int index = pgd_index(vaddr);
  90. #ifndef CONFIG_X86_PAE
  91. /* We kill any Guest trying to touch the Switcher addresses. */
  92. if (index >= SWITCHER_PGD_INDEX) {
  93. kill_guest(cpu, "attempt to access switcher pages");
  94. index = 0;
  95. }
  96. #endif
  97. /* Return a pointer index'th pgd entry for the i'th page table. */
  98. return &cpu->lg->pgdirs[i].pgdir[index];
  99. }
  100. #ifdef CONFIG_X86_PAE
  101. /*
  102. * This routine then takes the PGD entry given above, which contains the
  103. * address of the PMD page. It then returns a pointer to the PMD entry for the
  104. * given address.
  105. */
  106. static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
  107. {
  108. unsigned int index = pmd_index(vaddr);
  109. pmd_t *page;
  110. /* We kill any Guest trying to touch the Switcher addresses. */
  111. if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
  112. index >= SWITCHER_PMD_INDEX) {
  113. kill_guest(cpu, "attempt to access switcher pages");
  114. index = 0;
  115. }
  116. /* You should never call this if the PGD entry wasn't valid */
  117. BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
  118. page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
  119. return &page[index];
  120. }
  121. #endif
  122. /*
  123. * This routine then takes the page directory entry returned above, which
  124. * contains the address of the page table entry (PTE) page. It then returns a
  125. * pointer to the PTE entry for the given address.
  126. */
  127. static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
  128. {
  129. #ifdef CONFIG_X86_PAE
  130. pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
  131. pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
  132. /* You should never call this if the PMD entry wasn't valid */
  133. BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
  134. #else
  135. pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
  136. /* You should never call this if the PGD entry wasn't valid */
  137. BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
  138. #endif
  139. return &page[pte_index(vaddr)];
  140. }
  141. /*
  142. * These functions are just like the above, except they access the Guest
  143. * page tables. Hence they return a Guest address.
  144. */
  145. static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
  146. {
  147. unsigned int index = vaddr >> (PGDIR_SHIFT);
  148. return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
  149. }
  150. #ifdef CONFIG_X86_PAE
  151. /* Follow the PGD to the PMD. */
  152. static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
  153. {
  154. unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
  155. BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
  156. return gpage + pmd_index(vaddr) * sizeof(pmd_t);
  157. }
  158. /* Follow the PMD to the PTE. */
  159. static unsigned long gpte_addr(struct lg_cpu *cpu,
  160. pmd_t gpmd, unsigned long vaddr)
  161. {
  162. unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
  163. BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
  164. return gpage + pte_index(vaddr) * sizeof(pte_t);
  165. }
  166. #else
  167. /* Follow the PGD to the PTE (no mid-level for !PAE). */
  168. static unsigned long gpte_addr(struct lg_cpu *cpu,
  169. pgd_t gpgd, unsigned long vaddr)
  170. {
  171. unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
  172. BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
  173. return gpage + pte_index(vaddr) * sizeof(pte_t);
  174. }
  175. #endif
  176. /*:*/
  177. /*M:007
  178. * get_pfn is slow: we could probably try to grab batches of pages here as
  179. * an optimization (ie. pre-faulting).
  180. :*/
  181. /*H:350
  182. * This routine takes a page number given by the Guest and converts it to
  183. * an actual, physical page number. It can fail for several reasons: the
  184. * virtual address might not be mapped by the Launcher, the write flag is set
  185. * and the page is read-only, or the write flag was set and the page was
  186. * shared so had to be copied, but we ran out of memory.
  187. *
  188. * This holds a reference to the page, so release_pte() is careful to put that
  189. * back.
  190. */
  191. static unsigned long get_pfn(unsigned long virtpfn, int write)
  192. {
  193. struct page *page;
  194. /* gup me one page at this address please! */
  195. if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
  196. return page_to_pfn(page);
  197. /* This value indicates failure. */
  198. return -1UL;
  199. }
  200. /*H:340
  201. * Converting a Guest page table entry to a shadow (ie. real) page table
  202. * entry can be a little tricky. The flags are (almost) the same, but the
  203. * Guest PTE contains a virtual page number: the CPU needs the real page
  204. * number.
  205. */
  206. static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
  207. {
  208. unsigned long pfn, base, flags;
  209. /*
  210. * The Guest sets the global flag, because it thinks that it is using
  211. * PGE. We only told it to use PGE so it would tell us whether it was
  212. * flushing a kernel mapping or a userspace mapping. We don't actually
  213. * use the global bit, so throw it away.
  214. */
  215. flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
  216. /* The Guest's pages are offset inside the Launcher. */
  217. base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
  218. /*
  219. * We need a temporary "unsigned long" variable to hold the answer from
  220. * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
  221. * fit in spte.pfn. get_pfn() finds the real physical number of the
  222. * page, given the virtual number.
  223. */
  224. pfn = get_pfn(base + pte_pfn(gpte), write);
  225. if (pfn == -1UL) {
  226. kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
  227. /*
  228. * When we destroy the Guest, we'll go through the shadow page
  229. * tables and release_pte() them. Make sure we don't think
  230. * this one is valid!
  231. */
  232. flags = 0;
  233. }
  234. /* Now we assemble our shadow PTE from the page number and flags. */
  235. return pfn_pte(pfn, __pgprot(flags));
  236. }
  237. /*H:460 And to complete the chain, release_pte() looks like this: */
  238. static void release_pte(pte_t pte)
  239. {
  240. /*
  241. * Remember that get_user_pages_fast() took a reference to the page, in
  242. * get_pfn()? We have to put it back now.
  243. */
  244. if (pte_flags(pte) & _PAGE_PRESENT)
  245. put_page(pte_page(pte));
  246. }
  247. /*:*/
  248. static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
  249. {
  250. if ((pte_flags(gpte) & _PAGE_PSE) ||
  251. pte_pfn(gpte) >= cpu->lg->pfn_limit)
  252. kill_guest(cpu, "bad page table entry");
  253. }
  254. static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
  255. {
  256. if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
  257. (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
  258. kill_guest(cpu, "bad page directory entry");
  259. }
  260. #ifdef CONFIG_X86_PAE
  261. static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
  262. {
  263. if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
  264. (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
  265. kill_guest(cpu, "bad page middle directory entry");
  266. }
  267. #endif
  268. /*H:330
  269. * (i) Looking up a page table entry when the Guest faults.
  270. *
  271. * We saw this call in run_guest(): when we see a page fault in the Guest, we
  272. * come here. That's because we only set up the shadow page tables lazily as
  273. * they're needed, so we get page faults all the time and quietly fix them up
  274. * and return to the Guest without it knowing.
  275. *
  276. * If we fixed up the fault (ie. we mapped the address), this routine returns
  277. * true. Otherwise, it was a real fault and we need to tell the Guest.
  278. */
  279. bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
  280. {
  281. pgd_t gpgd;
  282. pgd_t *spgd;
  283. unsigned long gpte_ptr;
  284. pte_t gpte;
  285. pte_t *spte;
  286. /* Mid level for PAE. */
  287. #ifdef CONFIG_X86_PAE
  288. pmd_t *spmd;
  289. pmd_t gpmd;
  290. #endif
  291. /* First step: get the top-level Guest page table entry. */
  292. if (unlikely(cpu->linear_pages)) {
  293. /* Faking up a linear mapping. */
  294. gpgd = __pgd(CHECK_GPGD_MASK);
  295. } else {
  296. gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
  297. /* Toplevel not present? We can't map it in. */
  298. if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
  299. return false;
  300. }
  301. /* Now look at the matching shadow entry. */
  302. spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
  303. if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
  304. /* No shadow entry: allocate a new shadow PTE page. */
  305. unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
  306. /*
  307. * This is not really the Guest's fault, but killing it is
  308. * simple for this corner case.
  309. */
  310. if (!ptepage) {
  311. kill_guest(cpu, "out of memory allocating pte page");
  312. return false;
  313. }
  314. /* We check that the Guest pgd is OK. */
  315. check_gpgd(cpu, gpgd);
  316. /*
  317. * And we copy the flags to the shadow PGD entry. The page
  318. * number in the shadow PGD is the page we just allocated.
  319. */
  320. set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
  321. }
  322. #ifdef CONFIG_X86_PAE
  323. if (unlikely(cpu->linear_pages)) {
  324. /* Faking up a linear mapping. */
  325. gpmd = __pmd(_PAGE_TABLE);
  326. } else {
  327. gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
  328. /* Middle level not present? We can't map it in. */
  329. if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
  330. return false;
  331. }
  332. /* Now look at the matching shadow entry. */
  333. spmd = spmd_addr(cpu, *spgd, vaddr);
  334. if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
  335. /* No shadow entry: allocate a new shadow PTE page. */
  336. unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
  337. /*
  338. * This is not really the Guest's fault, but killing it is
  339. * simple for this corner case.
  340. */
  341. if (!ptepage) {
  342. kill_guest(cpu, "out of memory allocating pte page");
  343. return false;
  344. }
  345. /* We check that the Guest pmd is OK. */
  346. check_gpmd(cpu, gpmd);
  347. /*
  348. * And we copy the flags to the shadow PMD entry. The page
  349. * number in the shadow PMD is the page we just allocated.
  350. */
  351. set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
  352. }
  353. /*
  354. * OK, now we look at the lower level in the Guest page table: keep its
  355. * address, because we might update it later.
  356. */
  357. gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
  358. #else
  359. /*
  360. * OK, now we look at the lower level in the Guest page table: keep its
  361. * address, because we might update it later.
  362. */
  363. gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
  364. #endif
  365. if (unlikely(cpu->linear_pages)) {
  366. /* Linear? Make up a PTE which points to same page. */
  367. gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
  368. } else {
  369. /* Read the actual PTE value. */
  370. gpte = lgread(cpu, gpte_ptr, pte_t);
  371. }
  372. /* If this page isn't in the Guest page tables, we can't page it in. */
  373. if (!(pte_flags(gpte) & _PAGE_PRESENT))
  374. return false;
  375. /*
  376. * Check they're not trying to write to a page the Guest wants
  377. * read-only (bit 2 of errcode == write).
  378. */
  379. if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
  380. return false;
  381. /* User access to a kernel-only page? (bit 3 == user access) */
  382. if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
  383. return false;
  384. /*
  385. * Check that the Guest PTE flags are OK, and the page number is below
  386. * the pfn_limit (ie. not mapping the Launcher binary).
  387. */
  388. check_gpte(cpu, gpte);
  389. /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
  390. gpte = pte_mkyoung(gpte);
  391. if (errcode & 2)
  392. gpte = pte_mkdirty(gpte);
  393. /* Get the pointer to the shadow PTE entry we're going to set. */
  394. spte = spte_addr(cpu, *spgd, vaddr);
  395. /*
  396. * If there was a valid shadow PTE entry here before, we release it.
  397. * This can happen with a write to a previously read-only entry.
  398. */
  399. release_pte(*spte);
  400. /*
  401. * If this is a write, we insist that the Guest page is writable (the
  402. * final arg to gpte_to_spte()).
  403. */
  404. if (pte_dirty(gpte))
  405. *spte = gpte_to_spte(cpu, gpte, 1);
  406. else
  407. /*
  408. * If this is a read, don't set the "writable" bit in the page
  409. * table entry, even if the Guest says it's writable. That way
  410. * we will come back here when a write does actually occur, so
  411. * we can update the Guest's _PAGE_DIRTY flag.
  412. */
  413. set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
  414. /*
  415. * Finally, we write the Guest PTE entry back: we've set the
  416. * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
  417. */
  418. if (likely(!cpu->linear_pages))
  419. lgwrite(cpu, gpte_ptr, pte_t, gpte);
  420. /*
  421. * The fault is fixed, the page table is populated, the mapping
  422. * manipulated, the result returned and the code complete. A small
  423. * delay and a trace of alliteration are the only indications the Guest
  424. * has that a page fault occurred at all.
  425. */
  426. return true;
  427. }
  428. /*H:360
  429. * (ii) Making sure the Guest stack is mapped.
  430. *
  431. * Remember that direct traps into the Guest need a mapped Guest kernel stack.
  432. * pin_stack_pages() calls us here: we could simply call demand_page(), but as
  433. * we've seen that logic is quite long, and usually the stack pages are already
  434. * mapped, so it's overkill.
  435. *
  436. * This is a quick version which answers the question: is this virtual address
  437. * mapped by the shadow page tables, and is it writable?
  438. */
  439. static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
  440. {
  441. pgd_t *spgd;
  442. unsigned long flags;
  443. #ifdef CONFIG_X86_PAE
  444. pmd_t *spmd;
  445. #endif
  446. /* Look at the current top level entry: is it present? */
  447. spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
  448. if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
  449. return false;
  450. #ifdef CONFIG_X86_PAE
  451. spmd = spmd_addr(cpu, *spgd, vaddr);
  452. if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
  453. return false;
  454. #endif
  455. /*
  456. * Check the flags on the pte entry itself: it must be present and
  457. * writable.
  458. */
  459. flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
  460. return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
  461. }
  462. /*
  463. * So, when pin_stack_pages() asks us to pin a page, we check if it's already
  464. * in the page tables, and if not, we call demand_page() with error code 2
  465. * (meaning "write").
  466. */
  467. void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
  468. {
  469. if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
  470. kill_guest(cpu, "bad stack page %#lx", vaddr);
  471. }
  472. /*:*/
  473. #ifdef CONFIG_X86_PAE
  474. static void release_pmd(pmd_t *spmd)
  475. {
  476. /* If the entry's not present, there's nothing to release. */
  477. if (pmd_flags(*spmd) & _PAGE_PRESENT) {
  478. unsigned int i;
  479. pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
  480. /* For each entry in the page, we might need to release it. */
  481. for (i = 0; i < PTRS_PER_PTE; i++)
  482. release_pte(ptepage[i]);
  483. /* Now we can free the page of PTEs */
  484. free_page((long)ptepage);
  485. /* And zero out the PMD entry so we never release it twice. */
  486. set_pmd(spmd, __pmd(0));
  487. }
  488. }
  489. static void release_pgd(pgd_t *spgd)
  490. {
  491. /* If the entry's not present, there's nothing to release. */
  492. if (pgd_flags(*spgd) & _PAGE_PRESENT) {
  493. unsigned int i;
  494. pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
  495. for (i = 0; i < PTRS_PER_PMD; i++)
  496. release_pmd(&pmdpage[i]);
  497. /* Now we can free the page of PMDs */
  498. free_page((long)pmdpage);
  499. /* And zero out the PGD entry so we never release it twice. */
  500. set_pgd(spgd, __pgd(0));
  501. }
  502. }
  503. #else /* !CONFIG_X86_PAE */
  504. /*H:450
  505. * If we chase down the release_pgd() code, the non-PAE version looks like
  506. * this. The PAE version is almost identical, but instead of calling
  507. * release_pte it calls release_pmd(), which looks much like this.
  508. */
  509. static void release_pgd(pgd_t *spgd)
  510. {
  511. /* If the entry's not present, there's nothing to release. */
  512. if (pgd_flags(*spgd) & _PAGE_PRESENT) {
  513. unsigned int i;
  514. /*
  515. * Converting the pfn to find the actual PTE page is easy: turn
  516. * the page number into a physical address, then convert to a
  517. * virtual address (easy for kernel pages like this one).
  518. */
  519. pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
  520. /* For each entry in the page, we might need to release it. */
  521. for (i = 0; i < PTRS_PER_PTE; i++)
  522. release_pte(ptepage[i]);
  523. /* Now we can free the page of PTEs */
  524. free_page((long)ptepage);
  525. /* And zero out the PGD entry so we never release it twice. */
  526. *spgd = __pgd(0);
  527. }
  528. }
  529. #endif
  530. /*H:445
  531. * We saw flush_user_mappings() twice: once from the flush_user_mappings()
  532. * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
  533. * It simply releases every PTE page from 0 up to the Guest's kernel address.
  534. */
  535. static void flush_user_mappings(struct lguest *lg, int idx)
  536. {
  537. unsigned int i;
  538. /* Release every pgd entry up to the kernel's address. */
  539. for (i = 0; i < pgd_index(lg->kernel_address); i++)
  540. release_pgd(lg->pgdirs[idx].pgdir + i);
  541. }
  542. /*H:440
  543. * (v) Flushing (throwing away) page tables,
  544. *
  545. * The Guest has a hypercall to throw away the page tables: it's used when a
  546. * large number of mappings have been changed.
  547. */
  548. void guest_pagetable_flush_user(struct lg_cpu *cpu)
  549. {
  550. /* Drop the userspace part of the current page table. */
  551. flush_user_mappings(cpu->lg, cpu->cpu_pgd);
  552. }
  553. /*:*/
  554. /* We walk down the guest page tables to get a guest-physical address */
  555. unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
  556. {
  557. pgd_t gpgd;
  558. pte_t gpte;
  559. #ifdef CONFIG_X86_PAE
  560. pmd_t gpmd;
  561. #endif
  562. /* Still not set up? Just map 1:1. */
  563. if (unlikely(cpu->linear_pages))
  564. return vaddr;
  565. /* First step: get the top-level Guest page table entry. */
  566. gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
  567. /* Toplevel not present? We can't map it in. */
  568. if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
  569. kill_guest(cpu, "Bad address %#lx", vaddr);
  570. return -1UL;
  571. }
  572. #ifdef CONFIG_X86_PAE
  573. gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
  574. if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
  575. kill_guest(cpu, "Bad address %#lx", vaddr);
  576. gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
  577. #else
  578. gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
  579. #endif
  580. if (!(pte_flags(gpte) & _PAGE_PRESENT))
  581. kill_guest(cpu, "Bad address %#lx", vaddr);
  582. return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
  583. }
  584. /*
  585. * We keep several page tables. This is a simple routine to find the page
  586. * table (if any) corresponding to this top-level address the Guest has given
  587. * us.
  588. */
  589. static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
  590. {
  591. unsigned int i;
  592. for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
  593. if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
  594. break;
  595. return i;
  596. }
  597. /*H:435
  598. * And this is us, creating the new page directory. If we really do
  599. * allocate a new one (and so the kernel parts are not there), we set
  600. * blank_pgdir.
  601. */
  602. static unsigned int new_pgdir(struct lg_cpu *cpu,
  603. unsigned long gpgdir,
  604. int *blank_pgdir)
  605. {
  606. unsigned int next;
  607. #ifdef CONFIG_X86_PAE
  608. pmd_t *pmd_table;
  609. #endif
  610. /*
  611. * We pick one entry at random to throw out. Choosing the Least
  612. * Recently Used might be better, but this is easy.
  613. */
  614. next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
  615. /* If it's never been allocated at all before, try now. */
  616. if (!cpu->lg->pgdirs[next].pgdir) {
  617. cpu->lg->pgdirs[next].pgdir =
  618. (pgd_t *)get_zeroed_page(GFP_KERNEL);
  619. /* If the allocation fails, just keep using the one we have */
  620. if (!cpu->lg->pgdirs[next].pgdir)
  621. next = cpu->cpu_pgd;
  622. else {
  623. #ifdef CONFIG_X86_PAE
  624. /*
  625. * In PAE mode, allocate a pmd page and populate the
  626. * last pgd entry.
  627. */
  628. pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
  629. if (!pmd_table) {
  630. free_page((long)cpu->lg->pgdirs[next].pgdir);
  631. set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
  632. next = cpu->cpu_pgd;
  633. } else {
  634. set_pgd(cpu->lg->pgdirs[next].pgdir +
  635. SWITCHER_PGD_INDEX,
  636. __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  637. /*
  638. * This is a blank page, so there are no kernel
  639. * mappings: caller must map the stack!
  640. */
  641. *blank_pgdir = 1;
  642. }
  643. #else
  644. *blank_pgdir = 1;
  645. #endif
  646. }
  647. }
  648. /* Record which Guest toplevel this shadows. */
  649. cpu->lg->pgdirs[next].gpgdir = gpgdir;
  650. /* Release all the non-kernel mappings. */
  651. flush_user_mappings(cpu->lg, next);
  652. return next;
  653. }
  654. /*H:470
  655. * Finally, a routine which throws away everything: all PGD entries in all
  656. * the shadow page tables, including the Guest's kernel mappings. This is used
  657. * when we destroy the Guest.
  658. */
  659. static void release_all_pagetables(struct lguest *lg)
  660. {
  661. unsigned int i, j;
  662. /* Every shadow pagetable this Guest has */
  663. for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
  664. if (lg->pgdirs[i].pgdir) {
  665. #ifdef CONFIG_X86_PAE
  666. pgd_t *spgd;
  667. pmd_t *pmdpage;
  668. unsigned int k;
  669. /* Get the last pmd page. */
  670. spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
  671. pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
  672. /*
  673. * And release the pmd entries of that pmd page,
  674. * except for the switcher pmd.
  675. */
  676. for (k = 0; k < SWITCHER_PMD_INDEX; k++)
  677. release_pmd(&pmdpage[k]);
  678. #endif
  679. /* Every PGD entry except the Switcher at the top */
  680. for (j = 0; j < SWITCHER_PGD_INDEX; j++)
  681. release_pgd(lg->pgdirs[i].pgdir + j);
  682. }
  683. }
  684. /*
  685. * We also throw away everything when a Guest tells us it's changed a kernel
  686. * mapping. Since kernel mappings are in every page table, it's easiest to
  687. * throw them all away. This traps the Guest in amber for a while as
  688. * everything faults back in, but it's rare.
  689. */
  690. void guest_pagetable_clear_all(struct lg_cpu *cpu)
  691. {
  692. release_all_pagetables(cpu->lg);
  693. /* We need the Guest kernel stack mapped again. */
  694. pin_stack_pages(cpu);
  695. }
  696. /*H:430
  697. * (iv) Switching page tables
  698. *
  699. * Now we've seen all the page table setting and manipulation, let's see
  700. * what happens when the Guest changes page tables (ie. changes the top-level
  701. * pgdir). This occurs on almost every context switch.
  702. */
  703. void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
  704. {
  705. int newpgdir, repin = 0;
  706. /*
  707. * The very first time they call this, we're actually running without
  708. * any page tables; we've been making it up. Throw them away now.
  709. */
  710. if (unlikely(cpu->linear_pages)) {
  711. release_all_pagetables(cpu->lg);
  712. cpu->linear_pages = false;
  713. /* Force allocation of a new pgdir. */
  714. newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
  715. } else {
  716. /* Look to see if we have this one already. */
  717. newpgdir = find_pgdir(cpu->lg, pgtable);
  718. }
  719. /*
  720. * If not, we allocate or mug an existing one: if it's a fresh one,
  721. * repin gets set to 1.
  722. */
  723. if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
  724. newpgdir = new_pgdir(cpu, pgtable, &repin);
  725. /* Change the current pgd index to the new one. */
  726. cpu->cpu_pgd = newpgdir;
  727. /* If it was completely blank, we map in the Guest kernel stack */
  728. if (repin)
  729. pin_stack_pages(cpu);
  730. }
  731. /*:*/
  732. /*M:009
  733. * Since we throw away all mappings when a kernel mapping changes, our
  734. * performance sucks for guests using highmem. In fact, a guest with
  735. * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
  736. * usually slower than a Guest with less memory.
  737. *
  738. * This, of course, cannot be fixed. It would take some kind of... well, I
  739. * don't know, but the term "puissant code-fu" comes to mind.
  740. :*/
  741. /*H:420
  742. * This is the routine which actually sets the page table entry for then
  743. * "idx"'th shadow page table.
  744. *
  745. * Normally, we can just throw out the old entry and replace it with 0: if they
  746. * use it demand_page() will put the new entry in. We need to do this anyway:
  747. * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
  748. * is read from, and _PAGE_DIRTY when it's written to.
  749. *
  750. * But Avi Kivity pointed out that most Operating Systems (Linux included) set
  751. * these bits on PTEs immediately anyway. This is done to save the CPU from
  752. * having to update them, but it helps us the same way: if they set
  753. * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
  754. * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
  755. */
  756. static void do_set_pte(struct lg_cpu *cpu, int idx,
  757. unsigned long vaddr, pte_t gpte)
  758. {
  759. /* Look up the matching shadow page directory entry. */
  760. pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
  761. #ifdef CONFIG_X86_PAE
  762. pmd_t *spmd;
  763. #endif
  764. /* If the top level isn't present, there's no entry to update. */
  765. if (pgd_flags(*spgd) & _PAGE_PRESENT) {
  766. #ifdef CONFIG_X86_PAE
  767. spmd = spmd_addr(cpu, *spgd, vaddr);
  768. if (pmd_flags(*spmd) & _PAGE_PRESENT) {
  769. #endif
  770. /* Otherwise, start by releasing the existing entry. */
  771. pte_t *spte = spte_addr(cpu, *spgd, vaddr);
  772. release_pte(*spte);
  773. /*
  774. * If they're setting this entry as dirty or accessed,
  775. * we might as well put that entry they've given us in
  776. * now. This shaves 10% off a copy-on-write
  777. * micro-benchmark.
  778. */
  779. if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
  780. check_gpte(cpu, gpte);
  781. set_pte(spte,
  782. gpte_to_spte(cpu, gpte,
  783. pte_flags(gpte) & _PAGE_DIRTY));
  784. } else {
  785. /*
  786. * Otherwise kill it and we can demand_page()
  787. * it in later.
  788. */
  789. set_pte(spte, __pte(0));
  790. }
  791. #ifdef CONFIG_X86_PAE
  792. }
  793. #endif
  794. }
  795. }
  796. /*H:410
  797. * Updating a PTE entry is a little trickier.
  798. *
  799. * We keep track of several different page tables (the Guest uses one for each
  800. * process, so it makes sense to cache at least a few). Each of these have
  801. * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
  802. * all processes. So when the page table above that address changes, we update
  803. * all the page tables, not just the current one. This is rare.
  804. *
  805. * The benefit is that when we have to track a new page table, we can keep all
  806. * the kernel mappings. This speeds up context switch immensely.
  807. */
  808. void guest_set_pte(struct lg_cpu *cpu,
  809. unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
  810. {
  811. /*
  812. * Kernel mappings must be changed on all top levels. Slow, but doesn't
  813. * happen often.
  814. */
  815. if (vaddr >= cpu->lg->kernel_address) {
  816. unsigned int i;
  817. for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
  818. if (cpu->lg->pgdirs[i].pgdir)
  819. do_set_pte(cpu, i, vaddr, gpte);
  820. } else {
  821. /* Is this page table one we have a shadow for? */
  822. int pgdir = find_pgdir(cpu->lg, gpgdir);
  823. if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
  824. /* If so, do the update. */
  825. do_set_pte(cpu, pgdir, vaddr, gpte);
  826. }
  827. }
  828. /*H:400
  829. * (iii) Setting up a page table entry when the Guest tells us one has changed.
  830. *
  831. * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
  832. * with the other side of page tables while we're here: what happens when the
  833. * Guest asks for a page table to be updated?
  834. *
  835. * We already saw that demand_page() will fill in the shadow page tables when
  836. * needed, so we can simply remove shadow page table entries whenever the Guest
  837. * tells us they've changed. When the Guest tries to use the new entry it will
  838. * fault and demand_page() will fix it up.
  839. *
  840. * So with that in mind here's our code to update a (top-level) PGD entry:
  841. */
  842. void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
  843. {
  844. int pgdir;
  845. if (idx >= SWITCHER_PGD_INDEX)
  846. return;
  847. /* If they're talking about a page table we have a shadow for... */
  848. pgdir = find_pgdir(lg, gpgdir);
  849. if (pgdir < ARRAY_SIZE(lg->pgdirs))
  850. /* ... throw it away. */
  851. release_pgd(lg->pgdirs[pgdir].pgdir + idx);
  852. }
  853. #ifdef CONFIG_X86_PAE
  854. /* For setting a mid-level, we just throw everything away. It's easy. */
  855. void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
  856. {
  857. guest_pagetable_clear_all(&lg->cpus[0]);
  858. }
  859. #endif
  860. /*H:500
  861. * (vii) Setting up the page tables initially.
  862. *
  863. * When a Guest is first created, set initialize a shadow page table which
  864. * we will populate on future faults. The Guest doesn't have any actual
  865. * pagetables yet, so we set linear_pages to tell demand_page() to fake it
  866. * for the moment.
  867. */
  868. int init_guest_pagetable(struct lguest *lg)
  869. {
  870. struct lg_cpu *cpu = &lg->cpus[0];
  871. int allocated = 0;
  872. /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
  873. cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
  874. if (!allocated)
  875. return -ENOMEM;
  876. /* We start with a linear mapping until the initialize. */
  877. cpu->linear_pages = true;
  878. return 0;
  879. }
  880. /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
  881. void page_table_guest_data_init(struct lg_cpu *cpu)
  882. {
  883. /* We get the kernel address: above this is all kernel memory. */
  884. if (get_user(cpu->lg->kernel_address,
  885. &cpu->lg->lguest_data->kernel_address)
  886. /*
  887. * We tell the Guest that it can't use the top 2 or 4 MB
  888. * of virtual addresses used by the Switcher.
  889. */
  890. || put_user(RESERVE_MEM * 1024 * 1024,
  891. &cpu->lg->lguest_data->reserve_mem)) {
  892. kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
  893. return;
  894. }
  895. /*
  896. * In flush_user_mappings() we loop from 0 to
  897. * "pgd_index(lg->kernel_address)". This assumes it won't hit the
  898. * Switcher mappings, so check that now.
  899. */
  900. #ifdef CONFIG_X86_PAE
  901. if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
  902. pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
  903. #else
  904. if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
  905. #endif
  906. kill_guest(cpu, "bad kernel address %#lx",
  907. cpu->lg->kernel_address);
  908. }
  909. /* When a Guest dies, our cleanup is fairly simple. */
  910. void free_guest_pagetable(struct lguest *lg)
  911. {
  912. unsigned int i;
  913. /* Throw away all page table pages. */
  914. release_all_pagetables(lg);
  915. /* Now free the top levels: free_page() can handle 0 just fine. */
  916. for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
  917. free_page((long)lg->pgdirs[i].pgdir);
  918. }
  919. /*H:480
  920. * (vi) Mapping the Switcher when the Guest is about to run.
  921. *
  922. * The Switcher and the two pages for this CPU need to be visible in the
  923. * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
  924. * for each CPU already set up, we just need to hook them in now we know which
  925. * Guest is about to run on this CPU.
  926. */
  927. void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
  928. {
  929. pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
  930. pte_t regs_pte;
  931. #ifdef CONFIG_X86_PAE
  932. pmd_t switcher_pmd;
  933. pmd_t *pmd_table;
  934. switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT,
  935. PAGE_KERNEL_EXEC);
  936. /* Figure out where the pmd page is, by reading the PGD, and converting
  937. * it to a virtual address. */
  938. pmd_table = __va(pgd_pfn(cpu->lg->
  939. pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
  940. << PAGE_SHIFT);
  941. /* Now write it into the shadow page table. */
  942. set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
  943. #else
  944. pgd_t switcher_pgd;
  945. /*
  946. * Make the last PGD entry for this Guest point to the Switcher's PTE
  947. * page for this CPU (with appropriate flags).
  948. */
  949. switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
  950. cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
  951. #endif
  952. /*
  953. * We also change the Switcher PTE page. When we're running the Guest,
  954. * we want the Guest's "regs" page to appear where the first Switcher
  955. * page for this CPU is. This is an optimization: when the Switcher
  956. * saves the Guest registers, it saves them into the first page of this
  957. * CPU's "struct lguest_pages": if we make sure the Guest's register
  958. * page is already mapped there, we don't have to copy them out
  959. * again.
  960. */
  961. regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
  962. set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
  963. }
  964. /*:*/
  965. static void free_switcher_pte_pages(void)
  966. {
  967. unsigned int i;
  968. for_each_possible_cpu(i)
  969. free_page((long)switcher_pte_page(i));
  970. }
  971. /*H:520
  972. * Setting up the Switcher PTE page for given CPU is fairly easy, given
  973. * the CPU number and the "struct page"s for the Switcher code itself.
  974. *
  975. * Currently the Switcher is less than a page long, so "pages" is always 1.
  976. */
  977. static __init void populate_switcher_pte_page(unsigned int cpu,
  978. struct page *switcher_page[],
  979. unsigned int pages)
  980. {
  981. unsigned int i;
  982. pte_t *pte = switcher_pte_page(cpu);
  983. /* The first entries are easy: they map the Switcher code. */
  984. for (i = 0; i < pages; i++) {
  985. set_pte(&pte[i], mk_pte(switcher_page[i],
  986. __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
  987. }
  988. /* The only other thing we map is this CPU's pair of pages. */
  989. i = pages + cpu*2;
  990. /* First page (Guest registers) is writable from the Guest */
  991. set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
  992. __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
  993. /*
  994. * The second page contains the "struct lguest_ro_state", and is
  995. * read-only.
  996. */
  997. set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
  998. __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
  999. }
  1000. /*
  1001. * We've made it through the page table code. Perhaps our tired brains are
  1002. * still processing the details, or perhaps we're simply glad it's over.
  1003. *
  1004. * If nothing else, note that all this complexity in juggling shadow page tables
  1005. * in sync with the Guest's page tables is for one reason: for most Guests this
  1006. * page table dance determines how bad performance will be. This is why Xen
  1007. * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
  1008. * have implemented shadow page table support directly into hardware.
  1009. *
  1010. * There is just one file remaining in the Host.
  1011. */
  1012. /*H:510
  1013. * At boot or module load time, init_pagetables() allocates and populates
  1014. * the Switcher PTE page for each CPU.
  1015. */
  1016. __init int init_pagetables(struct page **switcher_page, unsigned int pages)
  1017. {
  1018. unsigned int i;
  1019. for_each_possible_cpu(i) {
  1020. switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
  1021. if (!switcher_pte_page(i)) {
  1022. free_switcher_pte_pages();
  1023. return -ENOMEM;
  1024. }
  1025. populate_switcher_pte_page(i, switcher_page, pages);
  1026. }
  1027. return 0;
  1028. }
  1029. /*:*/
  1030. /* Cleaning up simply involves freeing the PTE page for each CPU. */
  1031. void free_pagetables(void)
  1032. {
  1033. free_switcher_pte_pages();
  1034. }