gmap.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244
  1. /*
  2. * KVM guest address space mapping code
  3. *
  4. * Copyright IBM Corp. 2007, 2016
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/smp.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/slab.h>
  13. #include <linux/swapops.h>
  14. #include <linux/ksm.h>
  15. #include <linux/mman.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/gmap.h>
  19. #include <asm/tlb.h>
  20. #define GMAP_SHADOW_FAKE_TABLE 1ULL
  21. /**
  22. * gmap_alloc - allocate and initialize a guest address space
  23. * @mm: pointer to the parent mm_struct
  24. * @limit: maximum address of the gmap address space
  25. *
  26. * Returns a guest address space structure.
  27. */
  28. static struct gmap *gmap_alloc(unsigned long limit)
  29. {
  30. struct gmap *gmap;
  31. struct page *page;
  32. unsigned long *table;
  33. unsigned long etype, atype;
  34. if (limit < (1UL << 31)) {
  35. limit = (1UL << 31) - 1;
  36. atype = _ASCE_TYPE_SEGMENT;
  37. etype = _SEGMENT_ENTRY_EMPTY;
  38. } else if (limit < (1UL << 42)) {
  39. limit = (1UL << 42) - 1;
  40. atype = _ASCE_TYPE_REGION3;
  41. etype = _REGION3_ENTRY_EMPTY;
  42. } else if (limit < (1UL << 53)) {
  43. limit = (1UL << 53) - 1;
  44. atype = _ASCE_TYPE_REGION2;
  45. etype = _REGION2_ENTRY_EMPTY;
  46. } else {
  47. limit = -1UL;
  48. atype = _ASCE_TYPE_REGION1;
  49. etype = _REGION1_ENTRY_EMPTY;
  50. }
  51. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  52. if (!gmap)
  53. goto out;
  54. INIT_LIST_HEAD(&gmap->crst_list);
  55. INIT_LIST_HEAD(&gmap->children);
  56. INIT_LIST_HEAD(&gmap->pt_list);
  57. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  58. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  59. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  60. spin_lock_init(&gmap->guest_table_lock);
  61. spin_lock_init(&gmap->shadow_lock);
  62. atomic_set(&gmap->ref_count, 1);
  63. page = alloc_pages(GFP_KERNEL, 2);
  64. if (!page)
  65. goto out_free;
  66. page->index = 0;
  67. list_add(&page->lru, &gmap->crst_list);
  68. table = (unsigned long *) page_to_phys(page);
  69. crst_table_init(table, etype);
  70. gmap->table = table;
  71. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  72. _ASCE_USER_BITS | __pa(table);
  73. gmap->asce_end = limit;
  74. return gmap;
  75. out_free:
  76. kfree(gmap);
  77. out:
  78. return NULL;
  79. }
  80. /**
  81. * gmap_create - create a guest address space
  82. * @mm: pointer to the parent mm_struct
  83. * @limit: maximum size of the gmap address space
  84. *
  85. * Returns a guest address space structure.
  86. */
  87. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  88. {
  89. struct gmap *gmap;
  90. unsigned long gmap_asce;
  91. gmap = gmap_alloc(limit);
  92. if (!gmap)
  93. return NULL;
  94. gmap->mm = mm;
  95. spin_lock(&mm->context.gmap_lock);
  96. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  97. if (list_is_singular(&mm->context.gmap_list))
  98. gmap_asce = gmap->asce;
  99. else
  100. gmap_asce = -1UL;
  101. WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
  102. spin_unlock(&mm->context.gmap_lock);
  103. return gmap;
  104. }
  105. EXPORT_SYMBOL_GPL(gmap_create);
  106. static void gmap_flush_tlb(struct gmap *gmap)
  107. {
  108. if (MACHINE_HAS_IDTE)
  109. __tlb_flush_idte(gmap->asce);
  110. else
  111. __tlb_flush_global();
  112. }
  113. static void gmap_radix_tree_free(struct radix_tree_root *root)
  114. {
  115. struct radix_tree_iter iter;
  116. unsigned long indices[16];
  117. unsigned long index;
  118. void **slot;
  119. int i, nr;
  120. /* A radix tree is freed by deleting all of its entries */
  121. index = 0;
  122. do {
  123. nr = 0;
  124. radix_tree_for_each_slot(slot, root, &iter, index) {
  125. indices[nr] = iter.index;
  126. if (++nr == 16)
  127. break;
  128. }
  129. for (i = 0; i < nr; i++) {
  130. index = indices[i];
  131. radix_tree_delete(root, index);
  132. }
  133. } while (nr > 0);
  134. }
  135. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  136. {
  137. struct gmap_rmap *rmap, *rnext, *head;
  138. struct radix_tree_iter iter;
  139. unsigned long indices[16];
  140. unsigned long index;
  141. void **slot;
  142. int i, nr;
  143. /* A radix tree is freed by deleting all of its entries */
  144. index = 0;
  145. do {
  146. nr = 0;
  147. radix_tree_for_each_slot(slot, root, &iter, index) {
  148. indices[nr] = iter.index;
  149. if (++nr == 16)
  150. break;
  151. }
  152. for (i = 0; i < nr; i++) {
  153. index = indices[i];
  154. head = radix_tree_delete(root, index);
  155. gmap_for_each_rmap_safe(rmap, rnext, head)
  156. kfree(rmap);
  157. }
  158. } while (nr > 0);
  159. }
  160. /**
  161. * gmap_free - free a guest address space
  162. * @gmap: pointer to the guest address space structure
  163. *
  164. * No locks required. There are no references to this gmap anymore.
  165. */
  166. static void gmap_free(struct gmap *gmap)
  167. {
  168. struct page *page, *next;
  169. /* Flush tlb of all gmaps (if not already done for shadows) */
  170. if (!(gmap_is_shadow(gmap) && gmap->removed))
  171. gmap_flush_tlb(gmap);
  172. /* Free all segment & region tables. */
  173. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  174. __free_pages(page, 2);
  175. gmap_radix_tree_free(&gmap->guest_to_host);
  176. gmap_radix_tree_free(&gmap->host_to_guest);
  177. /* Free additional data for a shadow gmap */
  178. if (gmap_is_shadow(gmap)) {
  179. /* Free all page tables. */
  180. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  181. page_table_free_pgste(page);
  182. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  183. /* Release reference to the parent */
  184. gmap_put(gmap->parent);
  185. }
  186. kfree(gmap);
  187. }
  188. /**
  189. * gmap_get - increase reference counter for guest address space
  190. * @gmap: pointer to the guest address space structure
  191. *
  192. * Returns the gmap pointer
  193. */
  194. struct gmap *gmap_get(struct gmap *gmap)
  195. {
  196. atomic_inc(&gmap->ref_count);
  197. return gmap;
  198. }
  199. EXPORT_SYMBOL_GPL(gmap_get);
  200. /**
  201. * gmap_put - decrease reference counter for guest address space
  202. * @gmap: pointer to the guest address space structure
  203. *
  204. * If the reference counter reaches zero the guest address space is freed.
  205. */
  206. void gmap_put(struct gmap *gmap)
  207. {
  208. if (atomic_dec_return(&gmap->ref_count) == 0)
  209. gmap_free(gmap);
  210. }
  211. EXPORT_SYMBOL_GPL(gmap_put);
  212. /**
  213. * gmap_remove - remove a guest address space but do not free it yet
  214. * @gmap: pointer to the guest address space structure
  215. */
  216. void gmap_remove(struct gmap *gmap)
  217. {
  218. struct gmap *sg, *next;
  219. unsigned long gmap_asce;
  220. /* Remove all shadow gmaps linked to this gmap */
  221. if (!list_empty(&gmap->children)) {
  222. spin_lock(&gmap->shadow_lock);
  223. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  224. list_del(&sg->list);
  225. gmap_put(sg);
  226. }
  227. spin_unlock(&gmap->shadow_lock);
  228. }
  229. /* Remove gmap from the pre-mm list */
  230. spin_lock(&gmap->mm->context.gmap_lock);
  231. list_del_rcu(&gmap->list);
  232. if (list_empty(&gmap->mm->context.gmap_list))
  233. gmap_asce = 0;
  234. else if (list_is_singular(&gmap->mm->context.gmap_list))
  235. gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
  236. struct gmap, list)->asce;
  237. else
  238. gmap_asce = -1UL;
  239. WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
  240. spin_unlock(&gmap->mm->context.gmap_lock);
  241. synchronize_rcu();
  242. /* Put reference */
  243. gmap_put(gmap);
  244. }
  245. EXPORT_SYMBOL_GPL(gmap_remove);
  246. /**
  247. * gmap_enable - switch primary space to the guest address space
  248. * @gmap: pointer to the guest address space structure
  249. */
  250. void gmap_enable(struct gmap *gmap)
  251. {
  252. S390_lowcore.gmap = (unsigned long) gmap;
  253. }
  254. EXPORT_SYMBOL_GPL(gmap_enable);
  255. /**
  256. * gmap_disable - switch back to the standard primary address space
  257. * @gmap: pointer to the guest address space structure
  258. */
  259. void gmap_disable(struct gmap *gmap)
  260. {
  261. S390_lowcore.gmap = 0UL;
  262. }
  263. EXPORT_SYMBOL_GPL(gmap_disable);
  264. /**
  265. * gmap_get_enabled - get a pointer to the currently enabled gmap
  266. *
  267. * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
  268. */
  269. struct gmap *gmap_get_enabled(void)
  270. {
  271. return (struct gmap *) S390_lowcore.gmap;
  272. }
  273. EXPORT_SYMBOL_GPL(gmap_get_enabled);
  274. /*
  275. * gmap_alloc_table is assumed to be called with mmap_sem held
  276. */
  277. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  278. unsigned long init, unsigned long gaddr)
  279. {
  280. struct page *page;
  281. unsigned long *new;
  282. /* since we dont free the gmap table until gmap_free we can unlock */
  283. page = alloc_pages(GFP_KERNEL, 2);
  284. if (!page)
  285. return -ENOMEM;
  286. new = (unsigned long *) page_to_phys(page);
  287. crst_table_init(new, init);
  288. spin_lock(&gmap->guest_table_lock);
  289. if (*table & _REGION_ENTRY_INVALID) {
  290. list_add(&page->lru, &gmap->crst_list);
  291. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  292. (*table & _REGION_ENTRY_TYPE_MASK);
  293. page->index = gaddr;
  294. page = NULL;
  295. }
  296. spin_unlock(&gmap->guest_table_lock);
  297. if (page)
  298. __free_pages(page, 2);
  299. return 0;
  300. }
  301. /**
  302. * __gmap_segment_gaddr - find virtual address from segment pointer
  303. * @entry: pointer to a segment table entry in the guest address space
  304. *
  305. * Returns the virtual address in the guest address space for the segment
  306. */
  307. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  308. {
  309. struct page *page;
  310. unsigned long offset, mask;
  311. offset = (unsigned long) entry / sizeof(unsigned long);
  312. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  313. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  314. page = virt_to_page((void *)((unsigned long) entry & mask));
  315. return page->index + offset;
  316. }
  317. /**
  318. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  319. * @gmap: pointer to the guest address space structure
  320. * @vmaddr: address in the host process address space
  321. *
  322. * Returns 1 if a TLB flush is required
  323. */
  324. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  325. {
  326. unsigned long *entry;
  327. int flush = 0;
  328. BUG_ON(gmap_is_shadow(gmap));
  329. spin_lock(&gmap->guest_table_lock);
  330. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  331. if (entry) {
  332. flush = (*entry != _SEGMENT_ENTRY_INVALID);
  333. *entry = _SEGMENT_ENTRY_INVALID;
  334. }
  335. spin_unlock(&gmap->guest_table_lock);
  336. return flush;
  337. }
  338. /**
  339. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  340. * @gmap: pointer to the guest address space structure
  341. * @gaddr: address in the guest address space
  342. *
  343. * Returns 1 if a TLB flush is required
  344. */
  345. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  346. {
  347. unsigned long vmaddr;
  348. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  349. gaddr >> PMD_SHIFT);
  350. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  351. }
  352. /**
  353. * gmap_unmap_segment - unmap segment from the guest address space
  354. * @gmap: pointer to the guest address space structure
  355. * @to: address in the guest address space
  356. * @len: length of the memory area to unmap
  357. *
  358. * Returns 0 if the unmap succeeded, -EINVAL if not.
  359. */
  360. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  361. {
  362. unsigned long off;
  363. int flush;
  364. BUG_ON(gmap_is_shadow(gmap));
  365. if ((to | len) & (PMD_SIZE - 1))
  366. return -EINVAL;
  367. if (len == 0 || to + len < to)
  368. return -EINVAL;
  369. flush = 0;
  370. down_write(&gmap->mm->mmap_sem);
  371. for (off = 0; off < len; off += PMD_SIZE)
  372. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  373. up_write(&gmap->mm->mmap_sem);
  374. if (flush)
  375. gmap_flush_tlb(gmap);
  376. return 0;
  377. }
  378. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  379. /**
  380. * gmap_map_segment - map a segment to the guest address space
  381. * @gmap: pointer to the guest address space structure
  382. * @from: source address in the parent address space
  383. * @to: target address in the guest address space
  384. * @len: length of the memory area to map
  385. *
  386. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  387. */
  388. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  389. unsigned long to, unsigned long len)
  390. {
  391. unsigned long off;
  392. int flush;
  393. BUG_ON(gmap_is_shadow(gmap));
  394. if ((from | to | len) & (PMD_SIZE - 1))
  395. return -EINVAL;
  396. if (len == 0 || from + len < from || to + len < to ||
  397. from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
  398. return -EINVAL;
  399. flush = 0;
  400. down_write(&gmap->mm->mmap_sem);
  401. for (off = 0; off < len; off += PMD_SIZE) {
  402. /* Remove old translation */
  403. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  404. /* Store new translation */
  405. if (radix_tree_insert(&gmap->guest_to_host,
  406. (to + off) >> PMD_SHIFT,
  407. (void *) from + off))
  408. break;
  409. }
  410. up_write(&gmap->mm->mmap_sem);
  411. if (flush)
  412. gmap_flush_tlb(gmap);
  413. if (off >= len)
  414. return 0;
  415. gmap_unmap_segment(gmap, to, len);
  416. return -ENOMEM;
  417. }
  418. EXPORT_SYMBOL_GPL(gmap_map_segment);
  419. /**
  420. * __gmap_translate - translate a guest address to a user space address
  421. * @gmap: pointer to guest mapping meta data structure
  422. * @gaddr: guest address
  423. *
  424. * Returns user space address which corresponds to the guest address or
  425. * -EFAULT if no such mapping exists.
  426. * This function does not establish potentially missing page table entries.
  427. * The mmap_sem of the mm that belongs to the address space must be held
  428. * when this function gets called.
  429. *
  430. * Note: Can also be called for shadow gmaps.
  431. */
  432. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  433. {
  434. unsigned long vmaddr;
  435. vmaddr = (unsigned long)
  436. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  437. /* Note: guest_to_host is empty for a shadow gmap */
  438. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  439. }
  440. EXPORT_SYMBOL_GPL(__gmap_translate);
  441. /**
  442. * gmap_translate - translate a guest address to a user space address
  443. * @gmap: pointer to guest mapping meta data structure
  444. * @gaddr: guest address
  445. *
  446. * Returns user space address which corresponds to the guest address or
  447. * -EFAULT if no such mapping exists.
  448. * This function does not establish potentially missing page table entries.
  449. */
  450. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  451. {
  452. unsigned long rc;
  453. down_read(&gmap->mm->mmap_sem);
  454. rc = __gmap_translate(gmap, gaddr);
  455. up_read(&gmap->mm->mmap_sem);
  456. return rc;
  457. }
  458. EXPORT_SYMBOL_GPL(gmap_translate);
  459. /**
  460. * gmap_unlink - disconnect a page table from the gmap shadow tables
  461. * @gmap: pointer to guest mapping meta data structure
  462. * @table: pointer to the host page table
  463. * @vmaddr: vm address associated with the host page table
  464. */
  465. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  466. unsigned long vmaddr)
  467. {
  468. struct gmap *gmap;
  469. int flush;
  470. rcu_read_lock();
  471. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  472. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  473. if (flush)
  474. gmap_flush_tlb(gmap);
  475. }
  476. rcu_read_unlock();
  477. }
  478. /**
  479. * gmap_link - set up shadow page tables to connect a host to a guest address
  480. * @gmap: pointer to guest mapping meta data structure
  481. * @gaddr: guest address
  482. * @vmaddr: vm address
  483. *
  484. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  485. * if the vm address is already mapped to a different guest segment.
  486. * The mmap_sem of the mm that belongs to the address space must be held
  487. * when this function gets called.
  488. */
  489. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  490. {
  491. struct mm_struct *mm;
  492. unsigned long *table;
  493. spinlock_t *ptl;
  494. pgd_t *pgd;
  495. pud_t *pud;
  496. pmd_t *pmd;
  497. int rc;
  498. BUG_ON(gmap_is_shadow(gmap));
  499. /* Create higher level tables in the gmap page table */
  500. table = gmap->table;
  501. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  502. table += (gaddr >> 53) & 0x7ff;
  503. if ((*table & _REGION_ENTRY_INVALID) &&
  504. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  505. gaddr & 0xffe0000000000000UL))
  506. return -ENOMEM;
  507. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  508. }
  509. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  510. table += (gaddr >> 42) & 0x7ff;
  511. if ((*table & _REGION_ENTRY_INVALID) &&
  512. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  513. gaddr & 0xfffffc0000000000UL))
  514. return -ENOMEM;
  515. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  516. }
  517. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  518. table += (gaddr >> 31) & 0x7ff;
  519. if ((*table & _REGION_ENTRY_INVALID) &&
  520. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  521. gaddr & 0xffffffff80000000UL))
  522. return -ENOMEM;
  523. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  524. }
  525. table += (gaddr >> 20) & 0x7ff;
  526. /* Walk the parent mm page table */
  527. mm = gmap->mm;
  528. pgd = pgd_offset(mm, vmaddr);
  529. VM_BUG_ON(pgd_none(*pgd));
  530. pud = pud_offset(pgd, vmaddr);
  531. VM_BUG_ON(pud_none(*pud));
  532. /* large puds cannot yet be handled */
  533. if (pud_large(*pud))
  534. return -EFAULT;
  535. pmd = pmd_offset(pud, vmaddr);
  536. VM_BUG_ON(pmd_none(*pmd));
  537. /* large pmds cannot yet be handled */
  538. if (pmd_large(*pmd))
  539. return -EFAULT;
  540. /* Link gmap segment table entry location to page table. */
  541. rc = radix_tree_preload(GFP_KERNEL);
  542. if (rc)
  543. return rc;
  544. ptl = pmd_lock(mm, pmd);
  545. spin_lock(&gmap->guest_table_lock);
  546. if (*table == _SEGMENT_ENTRY_INVALID) {
  547. rc = radix_tree_insert(&gmap->host_to_guest,
  548. vmaddr >> PMD_SHIFT, table);
  549. if (!rc)
  550. *table = pmd_val(*pmd);
  551. } else
  552. rc = 0;
  553. spin_unlock(&gmap->guest_table_lock);
  554. spin_unlock(ptl);
  555. radix_tree_preload_end();
  556. return rc;
  557. }
  558. /**
  559. * gmap_fault - resolve a fault on a guest address
  560. * @gmap: pointer to guest mapping meta data structure
  561. * @gaddr: guest address
  562. * @fault_flags: flags to pass down to handle_mm_fault()
  563. *
  564. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  565. * if the vm address is already mapped to a different guest segment.
  566. */
  567. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  568. unsigned int fault_flags)
  569. {
  570. unsigned long vmaddr;
  571. int rc;
  572. bool unlocked;
  573. down_read(&gmap->mm->mmap_sem);
  574. retry:
  575. unlocked = false;
  576. vmaddr = __gmap_translate(gmap, gaddr);
  577. if (IS_ERR_VALUE(vmaddr)) {
  578. rc = vmaddr;
  579. goto out_up;
  580. }
  581. if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
  582. &unlocked)) {
  583. rc = -EFAULT;
  584. goto out_up;
  585. }
  586. /*
  587. * In the case that fixup_user_fault unlocked the mmap_sem during
  588. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  589. */
  590. if (unlocked)
  591. goto retry;
  592. rc = __gmap_link(gmap, gaddr, vmaddr);
  593. out_up:
  594. up_read(&gmap->mm->mmap_sem);
  595. return rc;
  596. }
  597. EXPORT_SYMBOL_GPL(gmap_fault);
  598. /*
  599. * this function is assumed to be called with mmap_sem held
  600. */
  601. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  602. {
  603. unsigned long vmaddr;
  604. spinlock_t *ptl;
  605. pte_t *ptep;
  606. /* Find the vm address for the guest address */
  607. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  608. gaddr >> PMD_SHIFT);
  609. if (vmaddr) {
  610. vmaddr |= gaddr & ~PMD_MASK;
  611. /* Get pointer to the page table entry */
  612. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  613. if (likely(ptep))
  614. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  615. pte_unmap_unlock(ptep, ptl);
  616. }
  617. }
  618. EXPORT_SYMBOL_GPL(__gmap_zap);
  619. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  620. {
  621. unsigned long gaddr, vmaddr, size;
  622. struct vm_area_struct *vma;
  623. down_read(&gmap->mm->mmap_sem);
  624. for (gaddr = from; gaddr < to;
  625. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  626. /* Find the vm address for the guest address */
  627. vmaddr = (unsigned long)
  628. radix_tree_lookup(&gmap->guest_to_host,
  629. gaddr >> PMD_SHIFT);
  630. if (!vmaddr)
  631. continue;
  632. vmaddr |= gaddr & ~PMD_MASK;
  633. /* Find vma in the parent mm */
  634. vma = find_vma(gmap->mm, vmaddr);
  635. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  636. zap_page_range(vma, vmaddr, size, NULL);
  637. }
  638. up_read(&gmap->mm->mmap_sem);
  639. }
  640. EXPORT_SYMBOL_GPL(gmap_discard);
  641. static LIST_HEAD(gmap_notifier_list);
  642. static DEFINE_SPINLOCK(gmap_notifier_lock);
  643. /**
  644. * gmap_register_pte_notifier - register a pte invalidation callback
  645. * @nb: pointer to the gmap notifier block
  646. */
  647. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  648. {
  649. spin_lock(&gmap_notifier_lock);
  650. list_add_rcu(&nb->list, &gmap_notifier_list);
  651. spin_unlock(&gmap_notifier_lock);
  652. }
  653. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  654. /**
  655. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  656. * @nb: pointer to the gmap notifier block
  657. */
  658. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  659. {
  660. spin_lock(&gmap_notifier_lock);
  661. list_del_rcu(&nb->list);
  662. spin_unlock(&gmap_notifier_lock);
  663. synchronize_rcu();
  664. }
  665. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  666. /**
  667. * gmap_call_notifier - call all registered invalidation callbacks
  668. * @gmap: pointer to guest mapping meta data structure
  669. * @start: start virtual address in the guest address space
  670. * @end: end virtual address in the guest address space
  671. */
  672. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  673. unsigned long end)
  674. {
  675. struct gmap_notifier *nb;
  676. list_for_each_entry(nb, &gmap_notifier_list, list)
  677. nb->notifier_call(gmap, start, end);
  678. }
  679. /**
  680. * gmap_table_walk - walk the gmap page tables
  681. * @gmap: pointer to guest mapping meta data structure
  682. * @gaddr: virtual address in the guest address space
  683. * @level: page table level to stop at
  684. *
  685. * Returns a table entry pointer for the given guest address and @level
  686. * @level=0 : returns a pointer to a page table table entry (or NULL)
  687. * @level=1 : returns a pointer to a segment table entry (or NULL)
  688. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  689. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  690. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  691. *
  692. * Returns NULL if the gmap page tables could not be walked to the
  693. * requested level.
  694. *
  695. * Note: Can also be called for shadow gmaps.
  696. */
  697. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  698. unsigned long gaddr, int level)
  699. {
  700. unsigned long *table;
  701. if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
  702. return NULL;
  703. if (gmap_is_shadow(gmap) && gmap->removed)
  704. return NULL;
  705. if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
  706. return NULL;
  707. table = gmap->table;
  708. switch (gmap->asce & _ASCE_TYPE_MASK) {
  709. case _ASCE_TYPE_REGION1:
  710. table += (gaddr >> 53) & 0x7ff;
  711. if (level == 4)
  712. break;
  713. if (*table & _REGION_ENTRY_INVALID)
  714. return NULL;
  715. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  716. /* Fallthrough */
  717. case _ASCE_TYPE_REGION2:
  718. table += (gaddr >> 42) & 0x7ff;
  719. if (level == 3)
  720. break;
  721. if (*table & _REGION_ENTRY_INVALID)
  722. return NULL;
  723. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  724. /* Fallthrough */
  725. case _ASCE_TYPE_REGION3:
  726. table += (gaddr >> 31) & 0x7ff;
  727. if (level == 2)
  728. break;
  729. if (*table & _REGION_ENTRY_INVALID)
  730. return NULL;
  731. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  732. /* Fallthrough */
  733. case _ASCE_TYPE_SEGMENT:
  734. table += (gaddr >> 20) & 0x7ff;
  735. if (level == 1)
  736. break;
  737. if (*table & _REGION_ENTRY_INVALID)
  738. return NULL;
  739. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  740. table += (gaddr >> 12) & 0xff;
  741. }
  742. return table;
  743. }
  744. /**
  745. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  746. * and return the pte pointer
  747. * @gmap: pointer to guest mapping meta data structure
  748. * @gaddr: virtual address in the guest address space
  749. * @ptl: pointer to the spinlock pointer
  750. *
  751. * Returns a pointer to the locked pte for a guest address, or NULL
  752. *
  753. * Note: Can also be called for shadow gmaps.
  754. */
  755. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  756. spinlock_t **ptl)
  757. {
  758. unsigned long *table;
  759. if (gmap_is_shadow(gmap))
  760. spin_lock(&gmap->guest_table_lock);
  761. /* Walk the gmap page table, lock and get pte pointer */
  762. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  763. if (!table || *table & _SEGMENT_ENTRY_INVALID) {
  764. if (gmap_is_shadow(gmap))
  765. spin_unlock(&gmap->guest_table_lock);
  766. return NULL;
  767. }
  768. if (gmap_is_shadow(gmap)) {
  769. *ptl = &gmap->guest_table_lock;
  770. return pte_offset_map((pmd_t *) table, gaddr);
  771. }
  772. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  773. }
  774. /**
  775. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  776. * @gmap: pointer to guest mapping meta data structure
  777. * @gaddr: virtual address in the guest address space
  778. * @vmaddr: address in the host process address space
  779. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  780. *
  781. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  782. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  783. * up or connecting the gmap page table.
  784. */
  785. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  786. unsigned long vmaddr, int prot)
  787. {
  788. struct mm_struct *mm = gmap->mm;
  789. unsigned int fault_flags;
  790. bool unlocked = false;
  791. BUG_ON(gmap_is_shadow(gmap));
  792. fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
  793. if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
  794. return -EFAULT;
  795. if (unlocked)
  796. /* lost mmap_sem, caller has to retry __gmap_translate */
  797. return 0;
  798. /* Connect the page tables */
  799. return __gmap_link(gmap, gaddr, vmaddr);
  800. }
  801. /**
  802. * gmap_pte_op_end - release the page table lock
  803. * @ptl: pointer to the spinlock pointer
  804. */
  805. static void gmap_pte_op_end(spinlock_t *ptl)
  806. {
  807. spin_unlock(ptl);
  808. }
  809. /*
  810. * gmap_protect_range - remove access rights to memory and set pgste bits
  811. * @gmap: pointer to guest mapping meta data structure
  812. * @gaddr: virtual address in the guest address space
  813. * @len: size of area
  814. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  815. * @bits: pgste notification bits to set
  816. *
  817. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  818. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  819. *
  820. * Called with sg->mm->mmap_sem in read.
  821. *
  822. * Note: Can also be called for shadow gmaps.
  823. */
  824. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  825. unsigned long len, int prot, unsigned long bits)
  826. {
  827. unsigned long vmaddr;
  828. spinlock_t *ptl;
  829. pte_t *ptep;
  830. int rc;
  831. while (len) {
  832. rc = -EAGAIN;
  833. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  834. if (ptep) {
  835. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
  836. gmap_pte_op_end(ptl);
  837. }
  838. if (rc) {
  839. vmaddr = __gmap_translate(gmap, gaddr);
  840. if (IS_ERR_VALUE(vmaddr))
  841. return vmaddr;
  842. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
  843. if (rc)
  844. return rc;
  845. continue;
  846. }
  847. gaddr += PAGE_SIZE;
  848. len -= PAGE_SIZE;
  849. }
  850. return 0;
  851. }
  852. /**
  853. * gmap_mprotect_notify - change access rights for a range of ptes and
  854. * call the notifier if any pte changes again
  855. * @gmap: pointer to guest mapping meta data structure
  856. * @gaddr: virtual address in the guest address space
  857. * @len: size of area
  858. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  859. *
  860. * Returns 0 if for each page in the given range a gmap mapping exists,
  861. * the new access rights could be set and the notifier could be armed.
  862. * If the gmap mapping is missing for one or more pages -EFAULT is
  863. * returned. If no memory could be allocated -ENOMEM is returned.
  864. * This function establishes missing page table entries.
  865. */
  866. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  867. unsigned long len, int prot)
  868. {
  869. int rc;
  870. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  871. return -EINVAL;
  872. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  873. return -EINVAL;
  874. down_read(&gmap->mm->mmap_sem);
  875. rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
  876. up_read(&gmap->mm->mmap_sem);
  877. return rc;
  878. }
  879. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  880. /**
  881. * gmap_read_table - get an unsigned long value from a guest page table using
  882. * absolute addressing, without marking the page referenced.
  883. * @gmap: pointer to guest mapping meta data structure
  884. * @gaddr: virtual address in the guest address space
  885. * @val: pointer to the unsigned long value to return
  886. *
  887. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  888. * if reading using the virtual address failed.
  889. *
  890. * Called with gmap->mm->mmap_sem in read.
  891. */
  892. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  893. {
  894. unsigned long address, vmaddr;
  895. spinlock_t *ptl;
  896. pte_t *ptep, pte;
  897. int rc;
  898. while (1) {
  899. rc = -EAGAIN;
  900. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  901. if (ptep) {
  902. pte = *ptep;
  903. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  904. address = pte_val(pte) & PAGE_MASK;
  905. address += gaddr & ~PAGE_MASK;
  906. *val = *(unsigned long *) address;
  907. pte_val(*ptep) |= _PAGE_YOUNG;
  908. /* Do *NOT* clear the _PAGE_INVALID bit! */
  909. rc = 0;
  910. }
  911. gmap_pte_op_end(ptl);
  912. }
  913. if (!rc)
  914. break;
  915. vmaddr = __gmap_translate(gmap, gaddr);
  916. if (IS_ERR_VALUE(vmaddr)) {
  917. rc = vmaddr;
  918. break;
  919. }
  920. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
  921. if (rc)
  922. break;
  923. }
  924. return rc;
  925. }
  926. EXPORT_SYMBOL_GPL(gmap_read_table);
  927. /**
  928. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  929. * @sg: pointer to the shadow guest address space structure
  930. * @vmaddr: vm address associated with the rmap
  931. * @rmap: pointer to the rmap structure
  932. *
  933. * Called with the sg->guest_table_lock
  934. */
  935. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  936. struct gmap_rmap *rmap)
  937. {
  938. void **slot;
  939. BUG_ON(!gmap_is_shadow(sg));
  940. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  941. if (slot) {
  942. rmap->next = radix_tree_deref_slot_protected(slot,
  943. &sg->guest_table_lock);
  944. radix_tree_replace_slot(slot, rmap);
  945. } else {
  946. rmap->next = NULL;
  947. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  948. rmap);
  949. }
  950. }
  951. /**
  952. * gmap_protect_rmap - modify access rights to memory and create an rmap
  953. * @sg: pointer to the shadow guest address space structure
  954. * @raddr: rmap address in the shadow gmap
  955. * @paddr: address in the parent guest address space
  956. * @len: length of the memory area to protect
  957. * @prot: indicates access rights: none, read-only or read-write
  958. *
  959. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  960. * if out of memory and -EFAULT if paddr is invalid.
  961. */
  962. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  963. unsigned long paddr, unsigned long len, int prot)
  964. {
  965. struct gmap *parent;
  966. struct gmap_rmap *rmap;
  967. unsigned long vmaddr;
  968. spinlock_t *ptl;
  969. pte_t *ptep;
  970. int rc;
  971. BUG_ON(!gmap_is_shadow(sg));
  972. parent = sg->parent;
  973. while (len) {
  974. vmaddr = __gmap_translate(parent, paddr);
  975. if (IS_ERR_VALUE(vmaddr))
  976. return vmaddr;
  977. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  978. if (!rmap)
  979. return -ENOMEM;
  980. rmap->raddr = raddr;
  981. rc = radix_tree_preload(GFP_KERNEL);
  982. if (rc) {
  983. kfree(rmap);
  984. return rc;
  985. }
  986. rc = -EAGAIN;
  987. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  988. if (ptep) {
  989. spin_lock(&sg->guest_table_lock);
  990. rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
  991. PGSTE_VSIE_BIT);
  992. if (!rc)
  993. gmap_insert_rmap(sg, vmaddr, rmap);
  994. spin_unlock(&sg->guest_table_lock);
  995. gmap_pte_op_end(ptl);
  996. }
  997. radix_tree_preload_end();
  998. if (rc) {
  999. kfree(rmap);
  1000. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  1001. if (rc)
  1002. return rc;
  1003. continue;
  1004. }
  1005. paddr += PAGE_SIZE;
  1006. len -= PAGE_SIZE;
  1007. }
  1008. return 0;
  1009. }
  1010. #define _SHADOW_RMAP_MASK 0x7
  1011. #define _SHADOW_RMAP_REGION1 0x5
  1012. #define _SHADOW_RMAP_REGION2 0x4
  1013. #define _SHADOW_RMAP_REGION3 0x3
  1014. #define _SHADOW_RMAP_SEGMENT 0x2
  1015. #define _SHADOW_RMAP_PGTABLE 0x1
  1016. /**
  1017. * gmap_idte_one - invalidate a single region or segment table entry
  1018. * @asce: region or segment table *origin* + table-type bits
  1019. * @vaddr: virtual address to identify the table entry to flush
  1020. *
  1021. * The invalid bit of a single region or segment table entry is set
  1022. * and the associated TLB entries depending on the entry are flushed.
  1023. * The table-type of the @asce identifies the portion of the @vaddr
  1024. * that is used as the invalidation index.
  1025. */
  1026. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  1027. {
  1028. asm volatile(
  1029. " .insn rrf,0xb98e0000,%0,%1,0,0"
  1030. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  1031. }
  1032. /**
  1033. * gmap_unshadow_page - remove a page from a shadow page table
  1034. * @sg: pointer to the shadow guest address space structure
  1035. * @raddr: rmap address in the shadow guest address space
  1036. *
  1037. * Called with the sg->guest_table_lock
  1038. */
  1039. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1040. {
  1041. unsigned long *table;
  1042. BUG_ON(!gmap_is_shadow(sg));
  1043. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1044. if (!table || *table & _PAGE_INVALID)
  1045. return;
  1046. gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
  1047. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1048. }
  1049. /**
  1050. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1051. * @sg: pointer to the shadow guest address space structure
  1052. * @raddr: rmap address in the shadow guest address space
  1053. * @pgt: pointer to the start of a shadow page table
  1054. *
  1055. * Called with the sg->guest_table_lock
  1056. */
  1057. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1058. unsigned long *pgt)
  1059. {
  1060. int i;
  1061. BUG_ON(!gmap_is_shadow(sg));
  1062. for (i = 0; i < 256; i++, raddr += 1UL << 12)
  1063. pgt[i] = _PAGE_INVALID;
  1064. }
  1065. /**
  1066. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1067. * @sg: pointer to the shadow guest address space structure
  1068. * @raddr: address in the shadow guest address space
  1069. *
  1070. * Called with the sg->guest_table_lock
  1071. */
  1072. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1073. {
  1074. unsigned long sto, *ste, *pgt;
  1075. struct page *page;
  1076. BUG_ON(!gmap_is_shadow(sg));
  1077. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1078. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1079. return;
  1080. gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
  1081. sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
  1082. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1083. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1084. *ste = _SEGMENT_ENTRY_EMPTY;
  1085. __gmap_unshadow_pgt(sg, raddr, pgt);
  1086. /* Free page table */
  1087. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1088. list_del(&page->lru);
  1089. page_table_free_pgste(page);
  1090. }
  1091. /**
  1092. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1093. * @sg: pointer to the shadow guest address space structure
  1094. * @raddr: rmap address in the shadow guest address space
  1095. * @sgt: pointer to the start of a shadow segment table
  1096. *
  1097. * Called with the sg->guest_table_lock
  1098. */
  1099. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1100. unsigned long *sgt)
  1101. {
  1102. unsigned long asce, *pgt;
  1103. struct page *page;
  1104. int i;
  1105. BUG_ON(!gmap_is_shadow(sg));
  1106. asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
  1107. for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
  1108. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1109. continue;
  1110. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1111. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1112. __gmap_unshadow_pgt(sg, raddr, pgt);
  1113. /* Free page table */
  1114. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1115. list_del(&page->lru);
  1116. page_table_free_pgste(page);
  1117. }
  1118. }
  1119. /**
  1120. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1121. * @sg: pointer to the shadow guest address space structure
  1122. * @raddr: rmap address in the shadow guest address space
  1123. *
  1124. * Called with the shadow->guest_table_lock
  1125. */
  1126. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1127. {
  1128. unsigned long r3o, *r3e, *sgt;
  1129. struct page *page;
  1130. BUG_ON(!gmap_is_shadow(sg));
  1131. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1132. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1133. return;
  1134. gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
  1135. r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
  1136. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1137. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1138. *r3e = _REGION3_ENTRY_EMPTY;
  1139. __gmap_unshadow_sgt(sg, raddr, sgt);
  1140. /* Free segment table */
  1141. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1142. list_del(&page->lru);
  1143. __free_pages(page, 2);
  1144. }
  1145. /**
  1146. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1147. * @sg: pointer to the shadow guest address space structure
  1148. * @raddr: address in the shadow guest address space
  1149. * @r3t: pointer to the start of a shadow region-3 table
  1150. *
  1151. * Called with the sg->guest_table_lock
  1152. */
  1153. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1154. unsigned long *r3t)
  1155. {
  1156. unsigned long asce, *sgt;
  1157. struct page *page;
  1158. int i;
  1159. BUG_ON(!gmap_is_shadow(sg));
  1160. asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
  1161. for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
  1162. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1163. continue;
  1164. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1165. r3t[i] = _REGION3_ENTRY_EMPTY;
  1166. __gmap_unshadow_sgt(sg, raddr, sgt);
  1167. /* Free segment table */
  1168. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1169. list_del(&page->lru);
  1170. __free_pages(page, 2);
  1171. }
  1172. }
  1173. /**
  1174. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1175. * @sg: pointer to the shadow guest address space structure
  1176. * @raddr: rmap address in the shadow guest address space
  1177. *
  1178. * Called with the sg->guest_table_lock
  1179. */
  1180. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1181. {
  1182. unsigned long r2o, *r2e, *r3t;
  1183. struct page *page;
  1184. BUG_ON(!gmap_is_shadow(sg));
  1185. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1186. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1187. return;
  1188. gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
  1189. r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
  1190. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1191. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1192. *r2e = _REGION2_ENTRY_EMPTY;
  1193. __gmap_unshadow_r3t(sg, raddr, r3t);
  1194. /* Free region 3 table */
  1195. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1196. list_del(&page->lru);
  1197. __free_pages(page, 2);
  1198. }
  1199. /**
  1200. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1201. * @sg: pointer to the shadow guest address space structure
  1202. * @raddr: rmap address in the shadow guest address space
  1203. * @r2t: pointer to the start of a shadow region-2 table
  1204. *
  1205. * Called with the sg->guest_table_lock
  1206. */
  1207. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1208. unsigned long *r2t)
  1209. {
  1210. unsigned long asce, *r3t;
  1211. struct page *page;
  1212. int i;
  1213. BUG_ON(!gmap_is_shadow(sg));
  1214. asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
  1215. for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
  1216. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1217. continue;
  1218. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1219. r2t[i] = _REGION2_ENTRY_EMPTY;
  1220. __gmap_unshadow_r3t(sg, raddr, r3t);
  1221. /* Free region 3 table */
  1222. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1223. list_del(&page->lru);
  1224. __free_pages(page, 2);
  1225. }
  1226. }
  1227. /**
  1228. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1229. * @sg: pointer to the shadow guest address space structure
  1230. * @raddr: rmap address in the shadow guest address space
  1231. *
  1232. * Called with the sg->guest_table_lock
  1233. */
  1234. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1235. {
  1236. unsigned long r1o, *r1e, *r2t;
  1237. struct page *page;
  1238. BUG_ON(!gmap_is_shadow(sg));
  1239. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1240. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1241. return;
  1242. gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
  1243. r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
  1244. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1245. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1246. *r1e = _REGION1_ENTRY_EMPTY;
  1247. __gmap_unshadow_r2t(sg, raddr, r2t);
  1248. /* Free region 2 table */
  1249. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1250. list_del(&page->lru);
  1251. __free_pages(page, 2);
  1252. }
  1253. /**
  1254. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1255. * @sg: pointer to the shadow guest address space structure
  1256. * @raddr: rmap address in the shadow guest address space
  1257. * @r1t: pointer to the start of a shadow region-1 table
  1258. *
  1259. * Called with the shadow->guest_table_lock
  1260. */
  1261. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1262. unsigned long *r1t)
  1263. {
  1264. unsigned long asce, *r2t;
  1265. struct page *page;
  1266. int i;
  1267. BUG_ON(!gmap_is_shadow(sg));
  1268. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1269. for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
  1270. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1271. continue;
  1272. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1273. __gmap_unshadow_r2t(sg, raddr, r2t);
  1274. /* Clear entry and flush translation r1t -> r2t */
  1275. gmap_idte_one(asce, raddr);
  1276. r1t[i] = _REGION1_ENTRY_EMPTY;
  1277. /* Free region 2 table */
  1278. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1279. list_del(&page->lru);
  1280. __free_pages(page, 2);
  1281. }
  1282. }
  1283. /**
  1284. * gmap_unshadow - remove a shadow page table completely
  1285. * @sg: pointer to the shadow guest address space structure
  1286. *
  1287. * Called with sg->guest_table_lock
  1288. */
  1289. static void gmap_unshadow(struct gmap *sg)
  1290. {
  1291. unsigned long *table;
  1292. BUG_ON(!gmap_is_shadow(sg));
  1293. if (sg->removed)
  1294. return;
  1295. sg->removed = 1;
  1296. gmap_call_notifier(sg, 0, -1UL);
  1297. gmap_flush_tlb(sg);
  1298. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1299. switch (sg->asce & _ASCE_TYPE_MASK) {
  1300. case _ASCE_TYPE_REGION1:
  1301. __gmap_unshadow_r1t(sg, 0, table);
  1302. break;
  1303. case _ASCE_TYPE_REGION2:
  1304. __gmap_unshadow_r2t(sg, 0, table);
  1305. break;
  1306. case _ASCE_TYPE_REGION3:
  1307. __gmap_unshadow_r3t(sg, 0, table);
  1308. break;
  1309. case _ASCE_TYPE_SEGMENT:
  1310. __gmap_unshadow_sgt(sg, 0, table);
  1311. break;
  1312. }
  1313. }
  1314. /**
  1315. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1316. * @parent: pointer to the parent gmap
  1317. * @asce: ASCE for which the shadow table is created
  1318. * @edat_level: edat level to be used for the shadow translation
  1319. *
  1320. * Returns the pointer to a gmap if a shadow table with the given asce is
  1321. * already available, ERR_PTR(-EAGAIN) if another one is just being created,
  1322. * otherwise NULL
  1323. */
  1324. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
  1325. int edat_level)
  1326. {
  1327. struct gmap *sg;
  1328. list_for_each_entry(sg, &parent->children, list) {
  1329. if (sg->orig_asce != asce || sg->edat_level != edat_level ||
  1330. sg->removed)
  1331. continue;
  1332. if (!sg->initialized)
  1333. return ERR_PTR(-EAGAIN);
  1334. atomic_inc(&sg->ref_count);
  1335. return sg;
  1336. }
  1337. return NULL;
  1338. }
  1339. /**
  1340. * gmap_shadow_valid - check if a shadow guest address space matches the
  1341. * given properties and is still valid
  1342. * @sg: pointer to the shadow guest address space structure
  1343. * @asce: ASCE for which the shadow table is requested
  1344. * @edat_level: edat level to be used for the shadow translation
  1345. *
  1346. * Returns 1 if the gmap shadow is still valid and matches the given
  1347. * properties, the caller can continue using it. Returns 0 otherwise, the
  1348. * caller has to request a new shadow gmap in this case.
  1349. *
  1350. */
  1351. int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
  1352. {
  1353. if (sg->removed)
  1354. return 0;
  1355. return sg->orig_asce == asce && sg->edat_level == edat_level;
  1356. }
  1357. EXPORT_SYMBOL_GPL(gmap_shadow_valid);
  1358. /**
  1359. * gmap_shadow - create/find a shadow guest address space
  1360. * @parent: pointer to the parent gmap
  1361. * @asce: ASCE for which the shadow table is created
  1362. * @edat_level: edat level to be used for the shadow translation
  1363. *
  1364. * The pages of the top level page table referred by the asce parameter
  1365. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1366. * The shadow table will be removed automatically on any change to the
  1367. * PTE mapping for the source table.
  1368. *
  1369. * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
  1370. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
  1371. * parent gmap table could not be protected.
  1372. */
  1373. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
  1374. int edat_level)
  1375. {
  1376. struct gmap *sg, *new;
  1377. unsigned long limit;
  1378. int rc;
  1379. BUG_ON(gmap_is_shadow(parent));
  1380. spin_lock(&parent->shadow_lock);
  1381. sg = gmap_find_shadow(parent, asce, edat_level);
  1382. spin_unlock(&parent->shadow_lock);
  1383. if (sg)
  1384. return sg;
  1385. /* Create a new shadow gmap */
  1386. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1387. if (asce & _ASCE_REAL_SPACE)
  1388. limit = -1UL;
  1389. new = gmap_alloc(limit);
  1390. if (!new)
  1391. return ERR_PTR(-ENOMEM);
  1392. new->mm = parent->mm;
  1393. new->parent = gmap_get(parent);
  1394. new->orig_asce = asce;
  1395. new->edat_level = edat_level;
  1396. new->initialized = false;
  1397. spin_lock(&parent->shadow_lock);
  1398. /* Recheck if another CPU created the same shadow */
  1399. sg = gmap_find_shadow(parent, asce, edat_level);
  1400. if (sg) {
  1401. spin_unlock(&parent->shadow_lock);
  1402. gmap_free(new);
  1403. return sg;
  1404. }
  1405. if (asce & _ASCE_REAL_SPACE) {
  1406. /* only allow one real-space gmap shadow */
  1407. list_for_each_entry(sg, &parent->children, list) {
  1408. if (sg->orig_asce & _ASCE_REAL_SPACE) {
  1409. spin_lock(&sg->guest_table_lock);
  1410. gmap_unshadow(sg);
  1411. spin_unlock(&sg->guest_table_lock);
  1412. list_del(&sg->list);
  1413. gmap_put(sg);
  1414. break;
  1415. }
  1416. }
  1417. }
  1418. atomic_set(&new->ref_count, 2);
  1419. list_add(&new->list, &parent->children);
  1420. if (asce & _ASCE_REAL_SPACE) {
  1421. /* nothing to protect, return right away */
  1422. new->initialized = true;
  1423. spin_unlock(&parent->shadow_lock);
  1424. return new;
  1425. }
  1426. spin_unlock(&parent->shadow_lock);
  1427. /* protect after insertion, so it will get properly invalidated */
  1428. down_read(&parent->mm->mmap_sem);
  1429. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1430. ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
  1431. PROT_READ, PGSTE_VSIE_BIT);
  1432. up_read(&parent->mm->mmap_sem);
  1433. spin_lock(&parent->shadow_lock);
  1434. new->initialized = true;
  1435. if (rc) {
  1436. list_del(&new->list);
  1437. gmap_free(new);
  1438. new = ERR_PTR(rc);
  1439. }
  1440. spin_unlock(&parent->shadow_lock);
  1441. return new;
  1442. }
  1443. EXPORT_SYMBOL_GPL(gmap_shadow);
  1444. /**
  1445. * gmap_shadow_r2t - create an empty shadow region 2 table
  1446. * @sg: pointer to the shadow guest address space structure
  1447. * @saddr: faulting address in the shadow gmap
  1448. * @r2t: parent gmap address of the region 2 table to get shadowed
  1449. * @fake: r2t references contiguous guest memory block, not a r2t
  1450. *
  1451. * The r2t parameter specifies the address of the source table. The
  1452. * four pages of the source table are made read-only in the parent gmap
  1453. * address space. A write to the source table area @r2t will automatically
  1454. * remove the shadow r2 table and all of its decendents.
  1455. *
  1456. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1457. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1458. * -EFAULT if an address in the parent gmap could not be resolved.
  1459. *
  1460. * Called with sg->mm->mmap_sem in read.
  1461. */
  1462. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
  1463. int fake)
  1464. {
  1465. unsigned long raddr, origin, offset, len;
  1466. unsigned long *s_r2t, *table;
  1467. struct page *page;
  1468. int rc;
  1469. BUG_ON(!gmap_is_shadow(sg));
  1470. /* Allocate a shadow region second table */
  1471. page = alloc_pages(GFP_KERNEL, 2);
  1472. if (!page)
  1473. return -ENOMEM;
  1474. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1475. if (fake)
  1476. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1477. s_r2t = (unsigned long *) page_to_phys(page);
  1478. /* Install shadow region second table */
  1479. spin_lock(&sg->guest_table_lock);
  1480. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1481. if (!table) {
  1482. rc = -EAGAIN; /* Race with unshadow */
  1483. goto out_free;
  1484. }
  1485. if (!(*table & _REGION_ENTRY_INVALID)) {
  1486. rc = 0; /* Already established */
  1487. goto out_free;
  1488. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1489. rc = -EAGAIN; /* Race with shadow */
  1490. goto out_free;
  1491. }
  1492. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1493. /* mark as invalid as long as the parent table is not protected */
  1494. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1495. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1496. if (sg->edat_level >= 1)
  1497. *table |= (r2t & _REGION_ENTRY_PROTECT);
  1498. list_add(&page->lru, &sg->crst_list);
  1499. if (fake) {
  1500. /* nothing to protect for fake tables */
  1501. *table &= ~_REGION_ENTRY_INVALID;
  1502. spin_unlock(&sg->guest_table_lock);
  1503. return 0;
  1504. }
  1505. spin_unlock(&sg->guest_table_lock);
  1506. /* Make r2t read-only in parent gmap page table */
  1507. raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
  1508. origin = r2t & _REGION_ENTRY_ORIGIN;
  1509. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1510. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1511. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1512. spin_lock(&sg->guest_table_lock);
  1513. if (!rc) {
  1514. table = gmap_table_walk(sg, saddr, 4);
  1515. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1516. (unsigned long) s_r2t)
  1517. rc = -EAGAIN; /* Race with unshadow */
  1518. else
  1519. *table &= ~_REGION_ENTRY_INVALID;
  1520. } else {
  1521. gmap_unshadow_r2t(sg, raddr);
  1522. }
  1523. spin_unlock(&sg->guest_table_lock);
  1524. return rc;
  1525. out_free:
  1526. spin_unlock(&sg->guest_table_lock);
  1527. __free_pages(page, 2);
  1528. return rc;
  1529. }
  1530. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1531. /**
  1532. * gmap_shadow_r3t - create a shadow region 3 table
  1533. * @sg: pointer to the shadow guest address space structure
  1534. * @saddr: faulting address in the shadow gmap
  1535. * @r3t: parent gmap address of the region 3 table to get shadowed
  1536. * @fake: r3t references contiguous guest memory block, not a r3t
  1537. *
  1538. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1539. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1540. * -EFAULT if an address in the parent gmap could not be resolved.
  1541. *
  1542. * Called with sg->mm->mmap_sem in read.
  1543. */
  1544. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
  1545. int fake)
  1546. {
  1547. unsigned long raddr, origin, offset, len;
  1548. unsigned long *s_r3t, *table;
  1549. struct page *page;
  1550. int rc;
  1551. BUG_ON(!gmap_is_shadow(sg));
  1552. /* Allocate a shadow region second table */
  1553. page = alloc_pages(GFP_KERNEL, 2);
  1554. if (!page)
  1555. return -ENOMEM;
  1556. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1557. if (fake)
  1558. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1559. s_r3t = (unsigned long *) page_to_phys(page);
  1560. /* Install shadow region second table */
  1561. spin_lock(&sg->guest_table_lock);
  1562. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1563. if (!table) {
  1564. rc = -EAGAIN; /* Race with unshadow */
  1565. goto out_free;
  1566. }
  1567. if (!(*table & _REGION_ENTRY_INVALID)) {
  1568. rc = 0; /* Already established */
  1569. goto out_free;
  1570. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1571. rc = -EAGAIN; /* Race with shadow */
  1572. }
  1573. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1574. /* mark as invalid as long as the parent table is not protected */
  1575. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1576. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1577. if (sg->edat_level >= 1)
  1578. *table |= (r3t & _REGION_ENTRY_PROTECT);
  1579. list_add(&page->lru, &sg->crst_list);
  1580. if (fake) {
  1581. /* nothing to protect for fake tables */
  1582. *table &= ~_REGION_ENTRY_INVALID;
  1583. spin_unlock(&sg->guest_table_lock);
  1584. return 0;
  1585. }
  1586. spin_unlock(&sg->guest_table_lock);
  1587. /* Make r3t read-only in parent gmap page table */
  1588. raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
  1589. origin = r3t & _REGION_ENTRY_ORIGIN;
  1590. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1591. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1592. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1593. spin_lock(&sg->guest_table_lock);
  1594. if (!rc) {
  1595. table = gmap_table_walk(sg, saddr, 3);
  1596. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1597. (unsigned long) s_r3t)
  1598. rc = -EAGAIN; /* Race with unshadow */
  1599. else
  1600. *table &= ~_REGION_ENTRY_INVALID;
  1601. } else {
  1602. gmap_unshadow_r3t(sg, raddr);
  1603. }
  1604. spin_unlock(&sg->guest_table_lock);
  1605. return rc;
  1606. out_free:
  1607. spin_unlock(&sg->guest_table_lock);
  1608. __free_pages(page, 2);
  1609. return rc;
  1610. }
  1611. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1612. /**
  1613. * gmap_shadow_sgt - create a shadow segment table
  1614. * @sg: pointer to the shadow guest address space structure
  1615. * @saddr: faulting address in the shadow gmap
  1616. * @sgt: parent gmap address of the segment table to get shadowed
  1617. * @fake: sgt references contiguous guest memory block, not a sgt
  1618. *
  1619. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1620. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1621. * -EFAULT if an address in the parent gmap could not be resolved.
  1622. *
  1623. * Called with sg->mm->mmap_sem in read.
  1624. */
  1625. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
  1626. int fake)
  1627. {
  1628. unsigned long raddr, origin, offset, len;
  1629. unsigned long *s_sgt, *table;
  1630. struct page *page;
  1631. int rc;
  1632. BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
  1633. /* Allocate a shadow segment table */
  1634. page = alloc_pages(GFP_KERNEL, 2);
  1635. if (!page)
  1636. return -ENOMEM;
  1637. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1638. if (fake)
  1639. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1640. s_sgt = (unsigned long *) page_to_phys(page);
  1641. /* Install shadow region second table */
  1642. spin_lock(&sg->guest_table_lock);
  1643. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1644. if (!table) {
  1645. rc = -EAGAIN; /* Race with unshadow */
  1646. goto out_free;
  1647. }
  1648. if (!(*table & _REGION_ENTRY_INVALID)) {
  1649. rc = 0; /* Already established */
  1650. goto out_free;
  1651. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1652. rc = -EAGAIN; /* Race with shadow */
  1653. goto out_free;
  1654. }
  1655. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1656. /* mark as invalid as long as the parent table is not protected */
  1657. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1658. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1659. if (sg->edat_level >= 1)
  1660. *table |= sgt & _REGION_ENTRY_PROTECT;
  1661. list_add(&page->lru, &sg->crst_list);
  1662. if (fake) {
  1663. /* nothing to protect for fake tables */
  1664. *table &= ~_REGION_ENTRY_INVALID;
  1665. spin_unlock(&sg->guest_table_lock);
  1666. return 0;
  1667. }
  1668. spin_unlock(&sg->guest_table_lock);
  1669. /* Make sgt read-only in parent gmap page table */
  1670. raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
  1671. origin = sgt & _REGION_ENTRY_ORIGIN;
  1672. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1673. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1674. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1675. spin_lock(&sg->guest_table_lock);
  1676. if (!rc) {
  1677. table = gmap_table_walk(sg, saddr, 2);
  1678. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1679. (unsigned long) s_sgt)
  1680. rc = -EAGAIN; /* Race with unshadow */
  1681. else
  1682. *table &= ~_REGION_ENTRY_INVALID;
  1683. } else {
  1684. gmap_unshadow_sgt(sg, raddr);
  1685. }
  1686. spin_unlock(&sg->guest_table_lock);
  1687. return rc;
  1688. out_free:
  1689. spin_unlock(&sg->guest_table_lock);
  1690. __free_pages(page, 2);
  1691. return rc;
  1692. }
  1693. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1694. /**
  1695. * gmap_shadow_lookup_pgtable - find a shadow page table
  1696. * @sg: pointer to the shadow guest address space structure
  1697. * @saddr: the address in the shadow aguest address space
  1698. * @pgt: parent gmap address of the page table to get shadowed
  1699. * @dat_protection: if the pgtable is marked as protected by dat
  1700. * @fake: pgt references contiguous guest memory block, not a pgtable
  1701. *
  1702. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1703. * table was not found.
  1704. *
  1705. * Called with sg->mm->mmap_sem in read.
  1706. */
  1707. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1708. unsigned long *pgt, int *dat_protection,
  1709. int *fake)
  1710. {
  1711. unsigned long *table;
  1712. struct page *page;
  1713. int rc;
  1714. BUG_ON(!gmap_is_shadow(sg));
  1715. spin_lock(&sg->guest_table_lock);
  1716. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1717. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1718. /* Shadow page tables are full pages (pte+pgste) */
  1719. page = pfn_to_page(*table >> PAGE_SHIFT);
  1720. *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
  1721. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1722. *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
  1723. rc = 0;
  1724. } else {
  1725. rc = -EAGAIN;
  1726. }
  1727. spin_unlock(&sg->guest_table_lock);
  1728. return rc;
  1729. }
  1730. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1731. /**
  1732. * gmap_shadow_pgt - instantiate a shadow page table
  1733. * @sg: pointer to the shadow guest address space structure
  1734. * @saddr: faulting address in the shadow gmap
  1735. * @pgt: parent gmap address of the page table to get shadowed
  1736. * @fake: pgt references contiguous guest memory block, not a pgtable
  1737. *
  1738. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1739. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1740. * -EFAULT if an address in the parent gmap could not be resolved and
  1741. *
  1742. * Called with gmap->mm->mmap_sem in read
  1743. */
  1744. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
  1745. int fake)
  1746. {
  1747. unsigned long raddr, origin;
  1748. unsigned long *s_pgt, *table;
  1749. struct page *page;
  1750. int rc;
  1751. BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
  1752. /* Allocate a shadow page table */
  1753. page = page_table_alloc_pgste(sg->mm);
  1754. if (!page)
  1755. return -ENOMEM;
  1756. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1757. if (fake)
  1758. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1759. s_pgt = (unsigned long *) page_to_phys(page);
  1760. /* Install shadow page table */
  1761. spin_lock(&sg->guest_table_lock);
  1762. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1763. if (!table) {
  1764. rc = -EAGAIN; /* Race with unshadow */
  1765. goto out_free;
  1766. }
  1767. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1768. rc = 0; /* Already established */
  1769. goto out_free;
  1770. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1771. rc = -EAGAIN; /* Race with shadow */
  1772. goto out_free;
  1773. }
  1774. /* mark as invalid as long as the parent table is not protected */
  1775. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1776. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1777. list_add(&page->lru, &sg->pt_list);
  1778. if (fake) {
  1779. /* nothing to protect for fake tables */
  1780. *table &= ~_SEGMENT_ENTRY_INVALID;
  1781. spin_unlock(&sg->guest_table_lock);
  1782. return 0;
  1783. }
  1784. spin_unlock(&sg->guest_table_lock);
  1785. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1786. raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
  1787. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1788. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
  1789. spin_lock(&sg->guest_table_lock);
  1790. if (!rc) {
  1791. table = gmap_table_walk(sg, saddr, 1);
  1792. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1793. (unsigned long) s_pgt)
  1794. rc = -EAGAIN; /* Race with unshadow */
  1795. else
  1796. *table &= ~_SEGMENT_ENTRY_INVALID;
  1797. } else {
  1798. gmap_unshadow_pgt(sg, raddr);
  1799. }
  1800. spin_unlock(&sg->guest_table_lock);
  1801. return rc;
  1802. out_free:
  1803. spin_unlock(&sg->guest_table_lock);
  1804. page_table_free_pgste(page);
  1805. return rc;
  1806. }
  1807. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1808. /**
  1809. * gmap_shadow_page - create a shadow page mapping
  1810. * @sg: pointer to the shadow guest address space structure
  1811. * @saddr: faulting address in the shadow gmap
  1812. * @pte: pte in parent gmap address space to get shadowed
  1813. *
  1814. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1815. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1816. * -EFAULT if an address in the parent gmap could not be resolved.
  1817. *
  1818. * Called with sg->mm->mmap_sem in read.
  1819. */
  1820. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1821. {
  1822. struct gmap *parent;
  1823. struct gmap_rmap *rmap;
  1824. unsigned long vmaddr, paddr;
  1825. spinlock_t *ptl;
  1826. pte_t *sptep, *tptep;
  1827. int prot;
  1828. int rc;
  1829. BUG_ON(!gmap_is_shadow(sg));
  1830. parent = sg->parent;
  1831. prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
  1832. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1833. if (!rmap)
  1834. return -ENOMEM;
  1835. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1836. while (1) {
  1837. paddr = pte_val(pte) & PAGE_MASK;
  1838. vmaddr = __gmap_translate(parent, paddr);
  1839. if (IS_ERR_VALUE(vmaddr)) {
  1840. rc = vmaddr;
  1841. break;
  1842. }
  1843. rc = radix_tree_preload(GFP_KERNEL);
  1844. if (rc)
  1845. break;
  1846. rc = -EAGAIN;
  1847. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1848. if (sptep) {
  1849. spin_lock(&sg->guest_table_lock);
  1850. /* Get page table pointer */
  1851. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  1852. if (!tptep) {
  1853. spin_unlock(&sg->guest_table_lock);
  1854. gmap_pte_op_end(ptl);
  1855. radix_tree_preload_end();
  1856. break;
  1857. }
  1858. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  1859. if (rc > 0) {
  1860. /* Success and a new mapping */
  1861. gmap_insert_rmap(sg, vmaddr, rmap);
  1862. rmap = NULL;
  1863. rc = 0;
  1864. }
  1865. gmap_pte_op_end(ptl);
  1866. spin_unlock(&sg->guest_table_lock);
  1867. }
  1868. radix_tree_preload_end();
  1869. if (!rc)
  1870. break;
  1871. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  1872. if (rc)
  1873. break;
  1874. }
  1875. kfree(rmap);
  1876. return rc;
  1877. }
  1878. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  1879. /**
  1880. * gmap_shadow_notify - handle notifications for shadow gmap
  1881. *
  1882. * Called with sg->parent->shadow_lock.
  1883. */
  1884. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  1885. unsigned long offset, pte_t *pte)
  1886. {
  1887. struct gmap_rmap *rmap, *rnext, *head;
  1888. unsigned long gaddr, start, end, bits, raddr;
  1889. unsigned long *table;
  1890. BUG_ON(!gmap_is_shadow(sg));
  1891. spin_lock(&sg->parent->guest_table_lock);
  1892. table = radix_tree_lookup(&sg->parent->host_to_guest,
  1893. vmaddr >> PMD_SHIFT);
  1894. gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
  1895. spin_unlock(&sg->parent->guest_table_lock);
  1896. if (!table)
  1897. return;
  1898. spin_lock(&sg->guest_table_lock);
  1899. if (sg->removed) {
  1900. spin_unlock(&sg->guest_table_lock);
  1901. return;
  1902. }
  1903. /* Check for top level table */
  1904. start = sg->orig_asce & _ASCE_ORIGIN;
  1905. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
  1906. if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
  1907. gaddr < end) {
  1908. /* The complete shadow table has to go */
  1909. gmap_unshadow(sg);
  1910. spin_unlock(&sg->guest_table_lock);
  1911. list_del(&sg->list);
  1912. gmap_put(sg);
  1913. return;
  1914. }
  1915. /* Remove the page table tree from on specific entry */
  1916. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
  1917. gmap_for_each_rmap_safe(rmap, rnext, head) {
  1918. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  1919. raddr = rmap->raddr ^ bits;
  1920. switch (bits) {
  1921. case _SHADOW_RMAP_REGION1:
  1922. gmap_unshadow_r2t(sg, raddr);
  1923. break;
  1924. case _SHADOW_RMAP_REGION2:
  1925. gmap_unshadow_r3t(sg, raddr);
  1926. break;
  1927. case _SHADOW_RMAP_REGION3:
  1928. gmap_unshadow_sgt(sg, raddr);
  1929. break;
  1930. case _SHADOW_RMAP_SEGMENT:
  1931. gmap_unshadow_pgt(sg, raddr);
  1932. break;
  1933. case _SHADOW_RMAP_PGTABLE:
  1934. gmap_unshadow_page(sg, raddr);
  1935. break;
  1936. }
  1937. kfree(rmap);
  1938. }
  1939. spin_unlock(&sg->guest_table_lock);
  1940. }
  1941. /**
  1942. * ptep_notify - call all invalidation callbacks for a specific pte.
  1943. * @mm: pointer to the process mm_struct
  1944. * @addr: virtual address in the process address space
  1945. * @pte: pointer to the page table entry
  1946. * @bits: bits from the pgste that caused the notify call
  1947. *
  1948. * This function is assumed to be called with the page table lock held
  1949. * for the pte to notify.
  1950. */
  1951. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  1952. pte_t *pte, unsigned long bits)
  1953. {
  1954. unsigned long offset, gaddr;
  1955. unsigned long *table;
  1956. struct gmap *gmap, *sg, *next;
  1957. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  1958. offset = offset * (4096 / sizeof(pte_t));
  1959. rcu_read_lock();
  1960. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  1961. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  1962. spin_lock(&gmap->shadow_lock);
  1963. list_for_each_entry_safe(sg, next,
  1964. &gmap->children, list)
  1965. gmap_shadow_notify(sg, vmaddr, offset, pte);
  1966. spin_unlock(&gmap->shadow_lock);
  1967. }
  1968. if (!(bits & PGSTE_IN_BIT))
  1969. continue;
  1970. spin_lock(&gmap->guest_table_lock);
  1971. table = radix_tree_lookup(&gmap->host_to_guest,
  1972. vmaddr >> PMD_SHIFT);
  1973. if (table)
  1974. gaddr = __gmap_segment_gaddr(table) + offset;
  1975. spin_unlock(&gmap->guest_table_lock);
  1976. if (table)
  1977. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  1978. }
  1979. rcu_read_unlock();
  1980. }
  1981. EXPORT_SYMBOL_GPL(ptep_notify);
  1982. static inline void thp_split_mm(struct mm_struct *mm)
  1983. {
  1984. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1985. struct vm_area_struct *vma;
  1986. unsigned long addr;
  1987. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  1988. for (addr = vma->vm_start;
  1989. addr < vma->vm_end;
  1990. addr += PAGE_SIZE)
  1991. follow_page(vma, addr, FOLL_SPLIT);
  1992. vma->vm_flags &= ~VM_HUGEPAGE;
  1993. vma->vm_flags |= VM_NOHUGEPAGE;
  1994. }
  1995. mm->def_flags |= VM_NOHUGEPAGE;
  1996. #endif
  1997. }
  1998. /*
  1999. * Remove all empty zero pages from the mapping for lazy refaulting
  2000. * - This must be called after mm->context.has_pgste is set, to avoid
  2001. * future creation of zero pages
  2002. * - This must be called after THP was enabled
  2003. */
  2004. static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
  2005. unsigned long end, struct mm_walk *walk)
  2006. {
  2007. unsigned long addr;
  2008. for (addr = start; addr != end; addr += PAGE_SIZE) {
  2009. pte_t *ptep;
  2010. spinlock_t *ptl;
  2011. ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  2012. if (is_zero_pfn(pte_pfn(*ptep)))
  2013. ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
  2014. pte_unmap_unlock(ptep, ptl);
  2015. }
  2016. return 0;
  2017. }
  2018. static inline void zap_zero_pages(struct mm_struct *mm)
  2019. {
  2020. struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
  2021. walk.mm = mm;
  2022. walk_page_range(0, TASK_SIZE, &walk);
  2023. }
  2024. /*
  2025. * switch on pgstes for its userspace process (for kvm)
  2026. */
  2027. int s390_enable_sie(void)
  2028. {
  2029. struct mm_struct *mm = current->mm;
  2030. /* Do we have pgstes? if yes, we are done */
  2031. if (mm_has_pgste(mm))
  2032. return 0;
  2033. /* Fail if the page tables are 2K */
  2034. if (!mm_alloc_pgste(mm))
  2035. return -EINVAL;
  2036. down_write(&mm->mmap_sem);
  2037. mm->context.has_pgste = 1;
  2038. /* split thp mappings and disable thp for future mappings */
  2039. thp_split_mm(mm);
  2040. zap_zero_pages(mm);
  2041. up_write(&mm->mmap_sem);
  2042. return 0;
  2043. }
  2044. EXPORT_SYMBOL_GPL(s390_enable_sie);
  2045. /*
  2046. * Enable storage key handling from now on and initialize the storage
  2047. * keys with the default key.
  2048. */
  2049. static int __s390_enable_skey(pte_t *pte, unsigned long addr,
  2050. unsigned long next, struct mm_walk *walk)
  2051. {
  2052. /* Clear storage key */
  2053. ptep_zap_key(walk->mm, addr, pte);
  2054. return 0;
  2055. }
  2056. int s390_enable_skey(void)
  2057. {
  2058. struct mm_walk walk = { .pte_entry = __s390_enable_skey };
  2059. struct mm_struct *mm = current->mm;
  2060. struct vm_area_struct *vma;
  2061. int rc = 0;
  2062. down_write(&mm->mmap_sem);
  2063. if (mm_use_skey(mm))
  2064. goto out_up;
  2065. mm->context.use_skey = 1;
  2066. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  2067. if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
  2068. MADV_UNMERGEABLE, &vma->vm_flags)) {
  2069. mm->context.use_skey = 0;
  2070. rc = -ENOMEM;
  2071. goto out_up;
  2072. }
  2073. }
  2074. mm->def_flags &= ~VM_MERGEABLE;
  2075. walk.mm = mm;
  2076. walk_page_range(0, TASK_SIZE, &walk);
  2077. out_up:
  2078. up_write(&mm->mmap_sem);
  2079. return rc;
  2080. }
  2081. EXPORT_SYMBOL_GPL(s390_enable_skey);
  2082. /*
  2083. * Reset CMMA state, make all pages stable again.
  2084. */
  2085. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  2086. unsigned long next, struct mm_walk *walk)
  2087. {
  2088. ptep_zap_unused(walk->mm, addr, pte, 1);
  2089. return 0;
  2090. }
  2091. void s390_reset_cmma(struct mm_struct *mm)
  2092. {
  2093. struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
  2094. down_write(&mm->mmap_sem);
  2095. walk.mm = mm;
  2096. walk_page_range(0, TASK_SIZE, &walk);
  2097. up_write(&mm->mmap_sem);
  2098. }
  2099. EXPORT_SYMBOL_GPL(s390_reset_cmma);