gaccess.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184
  1. /*
  2. * guest access functions
  3. *
  4. * Copyright IBM Corp. 2014
  5. *
  6. */
  7. #include <linux/vmalloc.h>
  8. #include <linux/err.h>
  9. #include <asm/pgtable.h>
  10. #include <asm/gmap.h>
  11. #include "kvm-s390.h"
  12. #include "gaccess.h"
  13. #include <asm/switch_to.h>
  14. union asce {
  15. unsigned long val;
  16. struct {
  17. unsigned long origin : 52; /* Region- or Segment-Table Origin */
  18. unsigned long : 2;
  19. unsigned long g : 1; /* Subspace Group Control */
  20. unsigned long p : 1; /* Private Space Control */
  21. unsigned long s : 1; /* Storage-Alteration-Event Control */
  22. unsigned long x : 1; /* Space-Switch-Event Control */
  23. unsigned long r : 1; /* Real-Space Control */
  24. unsigned long : 1;
  25. unsigned long dt : 2; /* Designation-Type Control */
  26. unsigned long tl : 2; /* Region- or Segment-Table Length */
  27. };
  28. };
  29. enum {
  30. ASCE_TYPE_SEGMENT = 0,
  31. ASCE_TYPE_REGION3 = 1,
  32. ASCE_TYPE_REGION2 = 2,
  33. ASCE_TYPE_REGION1 = 3
  34. };
  35. union region1_table_entry {
  36. unsigned long val;
  37. struct {
  38. unsigned long rto: 52;/* Region-Table Origin */
  39. unsigned long : 2;
  40. unsigned long p : 1; /* DAT-Protection Bit */
  41. unsigned long : 1;
  42. unsigned long tf : 2; /* Region-Second-Table Offset */
  43. unsigned long i : 1; /* Region-Invalid Bit */
  44. unsigned long : 1;
  45. unsigned long tt : 2; /* Table-Type Bits */
  46. unsigned long tl : 2; /* Region-Second-Table Length */
  47. };
  48. };
  49. union region2_table_entry {
  50. unsigned long val;
  51. struct {
  52. unsigned long rto: 52;/* Region-Table Origin */
  53. unsigned long : 2;
  54. unsigned long p : 1; /* DAT-Protection Bit */
  55. unsigned long : 1;
  56. unsigned long tf : 2; /* Region-Third-Table Offset */
  57. unsigned long i : 1; /* Region-Invalid Bit */
  58. unsigned long : 1;
  59. unsigned long tt : 2; /* Table-Type Bits */
  60. unsigned long tl : 2; /* Region-Third-Table Length */
  61. };
  62. };
  63. struct region3_table_entry_fc0 {
  64. unsigned long sto: 52;/* Segment-Table Origin */
  65. unsigned long : 1;
  66. unsigned long fc : 1; /* Format-Control */
  67. unsigned long p : 1; /* DAT-Protection Bit */
  68. unsigned long : 1;
  69. unsigned long tf : 2; /* Segment-Table Offset */
  70. unsigned long i : 1; /* Region-Invalid Bit */
  71. unsigned long cr : 1; /* Common-Region Bit */
  72. unsigned long tt : 2; /* Table-Type Bits */
  73. unsigned long tl : 2; /* Segment-Table Length */
  74. };
  75. struct region3_table_entry_fc1 {
  76. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  77. unsigned long : 14;
  78. unsigned long av : 1; /* ACCF-Validity Control */
  79. unsigned long acc: 4; /* Access-Control Bits */
  80. unsigned long f : 1; /* Fetch-Protection Bit */
  81. unsigned long fc : 1; /* Format-Control */
  82. unsigned long p : 1; /* DAT-Protection Bit */
  83. unsigned long co : 1; /* Change-Recording Override */
  84. unsigned long : 2;
  85. unsigned long i : 1; /* Region-Invalid Bit */
  86. unsigned long cr : 1; /* Common-Region Bit */
  87. unsigned long tt : 2; /* Table-Type Bits */
  88. unsigned long : 2;
  89. };
  90. union region3_table_entry {
  91. unsigned long val;
  92. struct region3_table_entry_fc0 fc0;
  93. struct region3_table_entry_fc1 fc1;
  94. struct {
  95. unsigned long : 53;
  96. unsigned long fc : 1; /* Format-Control */
  97. unsigned long : 4;
  98. unsigned long i : 1; /* Region-Invalid Bit */
  99. unsigned long cr : 1; /* Common-Region Bit */
  100. unsigned long tt : 2; /* Table-Type Bits */
  101. unsigned long : 2;
  102. };
  103. };
  104. struct segment_entry_fc0 {
  105. unsigned long pto: 53;/* Page-Table Origin */
  106. unsigned long fc : 1; /* Format-Control */
  107. unsigned long p : 1; /* DAT-Protection Bit */
  108. unsigned long : 3;
  109. unsigned long i : 1; /* Segment-Invalid Bit */
  110. unsigned long cs : 1; /* Common-Segment Bit */
  111. unsigned long tt : 2; /* Table-Type Bits */
  112. unsigned long : 2;
  113. };
  114. struct segment_entry_fc1 {
  115. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  116. unsigned long : 3;
  117. unsigned long av : 1; /* ACCF-Validity Control */
  118. unsigned long acc: 4; /* Access-Control Bits */
  119. unsigned long f : 1; /* Fetch-Protection Bit */
  120. unsigned long fc : 1; /* Format-Control */
  121. unsigned long p : 1; /* DAT-Protection Bit */
  122. unsigned long co : 1; /* Change-Recording Override */
  123. unsigned long : 2;
  124. unsigned long i : 1; /* Segment-Invalid Bit */
  125. unsigned long cs : 1; /* Common-Segment Bit */
  126. unsigned long tt : 2; /* Table-Type Bits */
  127. unsigned long : 2;
  128. };
  129. union segment_table_entry {
  130. unsigned long val;
  131. struct segment_entry_fc0 fc0;
  132. struct segment_entry_fc1 fc1;
  133. struct {
  134. unsigned long : 53;
  135. unsigned long fc : 1; /* Format-Control */
  136. unsigned long : 4;
  137. unsigned long i : 1; /* Segment-Invalid Bit */
  138. unsigned long cs : 1; /* Common-Segment Bit */
  139. unsigned long tt : 2; /* Table-Type Bits */
  140. unsigned long : 2;
  141. };
  142. };
  143. enum {
  144. TABLE_TYPE_SEGMENT = 0,
  145. TABLE_TYPE_REGION3 = 1,
  146. TABLE_TYPE_REGION2 = 2,
  147. TABLE_TYPE_REGION1 = 3
  148. };
  149. union page_table_entry {
  150. unsigned long val;
  151. struct {
  152. unsigned long pfra : 52; /* Page-Frame Real Address */
  153. unsigned long z : 1; /* Zero Bit */
  154. unsigned long i : 1; /* Page-Invalid Bit */
  155. unsigned long p : 1; /* DAT-Protection Bit */
  156. unsigned long co : 1; /* Change-Recording Override */
  157. unsigned long : 8;
  158. };
  159. };
  160. /*
  161. * vaddress union in order to easily decode a virtual address into its
  162. * region first index, region second index etc. parts.
  163. */
  164. union vaddress {
  165. unsigned long addr;
  166. struct {
  167. unsigned long rfx : 11;
  168. unsigned long rsx : 11;
  169. unsigned long rtx : 11;
  170. unsigned long sx : 11;
  171. unsigned long px : 8;
  172. unsigned long bx : 12;
  173. };
  174. struct {
  175. unsigned long rfx01 : 2;
  176. unsigned long : 9;
  177. unsigned long rsx01 : 2;
  178. unsigned long : 9;
  179. unsigned long rtx01 : 2;
  180. unsigned long : 9;
  181. unsigned long sx01 : 2;
  182. unsigned long : 29;
  183. };
  184. };
  185. /*
  186. * raddress union which will contain the result (real or absolute address)
  187. * after a page table walk. The rfaa, sfaa and pfra members are used to
  188. * simply assign them the value of a region, segment or page table entry.
  189. */
  190. union raddress {
  191. unsigned long addr;
  192. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  193. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  194. unsigned long pfra : 52; /* Page-Frame Real Address */
  195. };
  196. union alet {
  197. u32 val;
  198. struct {
  199. u32 reserved : 7;
  200. u32 p : 1;
  201. u32 alesn : 8;
  202. u32 alen : 16;
  203. };
  204. };
  205. union ald {
  206. u32 val;
  207. struct {
  208. u32 : 1;
  209. u32 alo : 24;
  210. u32 all : 7;
  211. };
  212. };
  213. struct ale {
  214. unsigned long i : 1; /* ALEN-Invalid Bit */
  215. unsigned long : 5;
  216. unsigned long fo : 1; /* Fetch-Only Bit */
  217. unsigned long p : 1; /* Private Bit */
  218. unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
  219. unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
  220. unsigned long : 32;
  221. unsigned long : 1;
  222. unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
  223. unsigned long : 6;
  224. unsigned long astesn : 32; /* ASTE Sequence Number */
  225. } __packed;
  226. struct aste {
  227. unsigned long i : 1; /* ASX-Invalid Bit */
  228. unsigned long ato : 29; /* Authority-Table Origin */
  229. unsigned long : 1;
  230. unsigned long b : 1; /* Base-Space Bit */
  231. unsigned long ax : 16; /* Authorization Index */
  232. unsigned long atl : 12; /* Authority-Table Length */
  233. unsigned long : 2;
  234. unsigned long ca : 1; /* Controlled-ASN Bit */
  235. unsigned long ra : 1; /* Reusable-ASN Bit */
  236. unsigned long asce : 64; /* Address-Space-Control Element */
  237. unsigned long ald : 32;
  238. unsigned long astesn : 32;
  239. /* .. more fields there */
  240. } __packed;
  241. int ipte_lock_held(struct kvm_vcpu *vcpu)
  242. {
  243. if (vcpu->arch.sie_block->eca & 1) {
  244. int rc;
  245. read_lock(&vcpu->kvm->arch.sca_lock);
  246. rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
  247. read_unlock(&vcpu->kvm->arch.sca_lock);
  248. return rc;
  249. }
  250. return vcpu->kvm->arch.ipte_lock_count != 0;
  251. }
  252. static void ipte_lock_simple(struct kvm_vcpu *vcpu)
  253. {
  254. union ipte_control old, new, *ic;
  255. mutex_lock(&vcpu->kvm->arch.ipte_mutex);
  256. vcpu->kvm->arch.ipte_lock_count++;
  257. if (vcpu->kvm->arch.ipte_lock_count > 1)
  258. goto out;
  259. retry:
  260. read_lock(&vcpu->kvm->arch.sca_lock);
  261. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  262. do {
  263. old = READ_ONCE(*ic);
  264. if (old.k) {
  265. read_unlock(&vcpu->kvm->arch.sca_lock);
  266. cond_resched();
  267. goto retry;
  268. }
  269. new = old;
  270. new.k = 1;
  271. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  272. read_unlock(&vcpu->kvm->arch.sca_lock);
  273. out:
  274. mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
  275. }
  276. static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
  277. {
  278. union ipte_control old, new, *ic;
  279. mutex_lock(&vcpu->kvm->arch.ipte_mutex);
  280. vcpu->kvm->arch.ipte_lock_count--;
  281. if (vcpu->kvm->arch.ipte_lock_count)
  282. goto out;
  283. read_lock(&vcpu->kvm->arch.sca_lock);
  284. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  285. do {
  286. old = READ_ONCE(*ic);
  287. new = old;
  288. new.k = 0;
  289. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  290. read_unlock(&vcpu->kvm->arch.sca_lock);
  291. wake_up(&vcpu->kvm->arch.ipte_wq);
  292. out:
  293. mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
  294. }
  295. static void ipte_lock_siif(struct kvm_vcpu *vcpu)
  296. {
  297. union ipte_control old, new, *ic;
  298. retry:
  299. read_lock(&vcpu->kvm->arch.sca_lock);
  300. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  301. do {
  302. old = READ_ONCE(*ic);
  303. if (old.kg) {
  304. read_unlock(&vcpu->kvm->arch.sca_lock);
  305. cond_resched();
  306. goto retry;
  307. }
  308. new = old;
  309. new.k = 1;
  310. new.kh++;
  311. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  312. read_unlock(&vcpu->kvm->arch.sca_lock);
  313. }
  314. static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
  315. {
  316. union ipte_control old, new, *ic;
  317. read_lock(&vcpu->kvm->arch.sca_lock);
  318. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  319. do {
  320. old = READ_ONCE(*ic);
  321. new = old;
  322. new.kh--;
  323. if (!new.kh)
  324. new.k = 0;
  325. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  326. read_unlock(&vcpu->kvm->arch.sca_lock);
  327. if (!new.kh)
  328. wake_up(&vcpu->kvm->arch.ipte_wq);
  329. }
  330. void ipte_lock(struct kvm_vcpu *vcpu)
  331. {
  332. if (vcpu->arch.sie_block->eca & 1)
  333. ipte_lock_siif(vcpu);
  334. else
  335. ipte_lock_simple(vcpu);
  336. }
  337. void ipte_unlock(struct kvm_vcpu *vcpu)
  338. {
  339. if (vcpu->arch.sie_block->eca & 1)
  340. ipte_unlock_siif(vcpu);
  341. else
  342. ipte_unlock_simple(vcpu);
  343. }
  344. static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
  345. enum gacc_mode mode)
  346. {
  347. union alet alet;
  348. struct ale ale;
  349. struct aste aste;
  350. unsigned long ald_addr, authority_table_addr;
  351. union ald ald;
  352. int eax, rc;
  353. u8 authority_table;
  354. if (ar >= NUM_ACRS)
  355. return -EINVAL;
  356. save_access_regs(vcpu->run->s.regs.acrs);
  357. alet.val = vcpu->run->s.regs.acrs[ar];
  358. if (ar == 0 || alet.val == 0) {
  359. asce->val = vcpu->arch.sie_block->gcr[1];
  360. return 0;
  361. } else if (alet.val == 1) {
  362. asce->val = vcpu->arch.sie_block->gcr[7];
  363. return 0;
  364. }
  365. if (alet.reserved)
  366. return PGM_ALET_SPECIFICATION;
  367. if (alet.p)
  368. ald_addr = vcpu->arch.sie_block->gcr[5];
  369. else
  370. ald_addr = vcpu->arch.sie_block->gcr[2];
  371. ald_addr &= 0x7fffffc0;
  372. rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
  373. if (rc)
  374. return rc;
  375. if (alet.alen / 8 > ald.all)
  376. return PGM_ALEN_TRANSLATION;
  377. if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
  378. return PGM_ADDRESSING;
  379. rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
  380. sizeof(struct ale));
  381. if (rc)
  382. return rc;
  383. if (ale.i == 1)
  384. return PGM_ALEN_TRANSLATION;
  385. if (ale.alesn != alet.alesn)
  386. return PGM_ALE_SEQUENCE;
  387. rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
  388. if (rc)
  389. return rc;
  390. if (aste.i)
  391. return PGM_ASTE_VALIDITY;
  392. if (aste.astesn != ale.astesn)
  393. return PGM_ASTE_SEQUENCE;
  394. if (ale.p == 1) {
  395. eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
  396. if (ale.aleax != eax) {
  397. if (eax / 16 > aste.atl)
  398. return PGM_EXTENDED_AUTHORITY;
  399. authority_table_addr = aste.ato * 4 + eax / 4;
  400. rc = read_guest_real(vcpu, authority_table_addr,
  401. &authority_table,
  402. sizeof(u8));
  403. if (rc)
  404. return rc;
  405. if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
  406. return PGM_EXTENDED_AUTHORITY;
  407. }
  408. }
  409. if (ale.fo == 1 && mode == GACC_STORE)
  410. return PGM_PROTECTION;
  411. asce->val = aste.asce;
  412. return 0;
  413. }
  414. struct trans_exc_code_bits {
  415. unsigned long addr : 52; /* Translation-exception Address */
  416. unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
  417. unsigned long : 6;
  418. unsigned long b60 : 1;
  419. unsigned long b61 : 1;
  420. unsigned long as : 2; /* ASCE Identifier */
  421. };
  422. enum {
  423. FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
  424. FSI_STORE = 1, /* Exception was due to store operation */
  425. FSI_FETCH = 2 /* Exception was due to fetch operation */
  426. };
  427. enum prot_type {
  428. PROT_TYPE_LA = 0,
  429. PROT_TYPE_KEYC = 1,
  430. PROT_TYPE_ALC = 2,
  431. PROT_TYPE_DAT = 3,
  432. };
  433. static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
  434. ar_t ar, enum gacc_mode mode, enum prot_type prot)
  435. {
  436. struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
  437. struct trans_exc_code_bits *tec;
  438. memset(pgm, 0, sizeof(*pgm));
  439. pgm->code = code;
  440. tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
  441. switch (code) {
  442. case PGM_PROTECTION:
  443. switch (prot) {
  444. case PROT_TYPE_ALC:
  445. tec->b60 = 1;
  446. /* FALL THROUGH */
  447. case PROT_TYPE_DAT:
  448. tec->b61 = 1;
  449. break;
  450. default: /* LA and KEYC set b61 to 0, other params undefined */
  451. return code;
  452. }
  453. /* FALL THROUGH */
  454. case PGM_ASCE_TYPE:
  455. case PGM_PAGE_TRANSLATION:
  456. case PGM_REGION_FIRST_TRANS:
  457. case PGM_REGION_SECOND_TRANS:
  458. case PGM_REGION_THIRD_TRANS:
  459. case PGM_SEGMENT_TRANSLATION:
  460. /*
  461. * op_access_id only applies to MOVE_PAGE -> set bit 61
  462. * exc_access_id has to be set to 0 for some instructions. Both
  463. * cases have to be handled by the caller.
  464. */
  465. tec->addr = gva >> PAGE_SHIFT;
  466. tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
  467. tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
  468. /* FALL THROUGH */
  469. case PGM_ALEN_TRANSLATION:
  470. case PGM_ALE_SEQUENCE:
  471. case PGM_ASTE_VALIDITY:
  472. case PGM_ASTE_SEQUENCE:
  473. case PGM_EXTENDED_AUTHORITY:
  474. /*
  475. * We can always store exc_access_id, as it is
  476. * undefined for non-ar cases. It is undefined for
  477. * most DAT protection exceptions.
  478. */
  479. pgm->exc_access_id = ar;
  480. break;
  481. }
  482. return code;
  483. }
  484. static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
  485. unsigned long ga, ar_t ar, enum gacc_mode mode)
  486. {
  487. int rc;
  488. struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
  489. if (!psw.t) {
  490. asce->val = 0;
  491. asce->r = 1;
  492. return 0;
  493. }
  494. if (mode == GACC_IFETCH)
  495. psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
  496. switch (psw.as) {
  497. case PSW_AS_PRIMARY:
  498. asce->val = vcpu->arch.sie_block->gcr[1];
  499. return 0;
  500. case PSW_AS_SECONDARY:
  501. asce->val = vcpu->arch.sie_block->gcr[7];
  502. return 0;
  503. case PSW_AS_HOME:
  504. asce->val = vcpu->arch.sie_block->gcr[13];
  505. return 0;
  506. case PSW_AS_ACCREG:
  507. rc = ar_translation(vcpu, asce, ar, mode);
  508. if (rc > 0)
  509. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
  510. return rc;
  511. }
  512. return 0;
  513. }
  514. static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
  515. {
  516. return kvm_read_guest(kvm, gpa, val, sizeof(*val));
  517. }
  518. /**
  519. * guest_translate - translate a guest virtual into a guest absolute address
  520. * @vcpu: virtual cpu
  521. * @gva: guest virtual address
  522. * @gpa: points to where guest physical (absolute) address should be stored
  523. * @asce: effective asce
  524. * @mode: indicates the access mode to be used
  525. *
  526. * Translate a guest virtual address into a guest absolute address by means
  527. * of dynamic address translation as specified by the architecture.
  528. * If the resulting absolute address is not available in the configuration
  529. * an addressing exception is indicated and @gpa will not be changed.
  530. *
  531. * Returns: - zero on success; @gpa contains the resulting absolute address
  532. * - a negative value if guest access failed due to e.g. broken
  533. * guest mapping
  534. * - a positve value if an access exception happened. In this case
  535. * the returned value is the program interruption code as defined
  536. * by the architecture
  537. */
  538. static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
  539. unsigned long *gpa, const union asce asce,
  540. enum gacc_mode mode)
  541. {
  542. union vaddress vaddr = {.addr = gva};
  543. union raddress raddr = {.addr = gva};
  544. union page_table_entry pte;
  545. int dat_protection = 0;
  546. union ctlreg0 ctlreg0;
  547. unsigned long ptr;
  548. int edat1, edat2;
  549. ctlreg0.val = vcpu->arch.sie_block->gcr[0];
  550. edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
  551. edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
  552. if (asce.r)
  553. goto real_address;
  554. ptr = asce.origin * 4096;
  555. switch (asce.dt) {
  556. case ASCE_TYPE_REGION1:
  557. if (vaddr.rfx01 > asce.tl)
  558. return PGM_REGION_FIRST_TRANS;
  559. ptr += vaddr.rfx * 8;
  560. break;
  561. case ASCE_TYPE_REGION2:
  562. if (vaddr.rfx)
  563. return PGM_ASCE_TYPE;
  564. if (vaddr.rsx01 > asce.tl)
  565. return PGM_REGION_SECOND_TRANS;
  566. ptr += vaddr.rsx * 8;
  567. break;
  568. case ASCE_TYPE_REGION3:
  569. if (vaddr.rfx || vaddr.rsx)
  570. return PGM_ASCE_TYPE;
  571. if (vaddr.rtx01 > asce.tl)
  572. return PGM_REGION_THIRD_TRANS;
  573. ptr += vaddr.rtx * 8;
  574. break;
  575. case ASCE_TYPE_SEGMENT:
  576. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  577. return PGM_ASCE_TYPE;
  578. if (vaddr.sx01 > asce.tl)
  579. return PGM_SEGMENT_TRANSLATION;
  580. ptr += vaddr.sx * 8;
  581. break;
  582. }
  583. switch (asce.dt) {
  584. case ASCE_TYPE_REGION1: {
  585. union region1_table_entry rfte;
  586. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  587. return PGM_ADDRESSING;
  588. if (deref_table(vcpu->kvm, ptr, &rfte.val))
  589. return -EFAULT;
  590. if (rfte.i)
  591. return PGM_REGION_FIRST_TRANS;
  592. if (rfte.tt != TABLE_TYPE_REGION1)
  593. return PGM_TRANSLATION_SPEC;
  594. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  595. return PGM_REGION_SECOND_TRANS;
  596. if (edat1)
  597. dat_protection |= rfte.p;
  598. ptr = rfte.rto * 4096 + vaddr.rsx * 8;
  599. }
  600. /* fallthrough */
  601. case ASCE_TYPE_REGION2: {
  602. union region2_table_entry rste;
  603. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  604. return PGM_ADDRESSING;
  605. if (deref_table(vcpu->kvm, ptr, &rste.val))
  606. return -EFAULT;
  607. if (rste.i)
  608. return PGM_REGION_SECOND_TRANS;
  609. if (rste.tt != TABLE_TYPE_REGION2)
  610. return PGM_TRANSLATION_SPEC;
  611. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  612. return PGM_REGION_THIRD_TRANS;
  613. if (edat1)
  614. dat_protection |= rste.p;
  615. ptr = rste.rto * 4096 + vaddr.rtx * 8;
  616. }
  617. /* fallthrough */
  618. case ASCE_TYPE_REGION3: {
  619. union region3_table_entry rtte;
  620. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  621. return PGM_ADDRESSING;
  622. if (deref_table(vcpu->kvm, ptr, &rtte.val))
  623. return -EFAULT;
  624. if (rtte.i)
  625. return PGM_REGION_THIRD_TRANS;
  626. if (rtte.tt != TABLE_TYPE_REGION3)
  627. return PGM_TRANSLATION_SPEC;
  628. if (rtte.cr && asce.p && edat2)
  629. return PGM_TRANSLATION_SPEC;
  630. if (rtte.fc && edat2) {
  631. dat_protection |= rtte.fc1.p;
  632. raddr.rfaa = rtte.fc1.rfaa;
  633. goto absolute_address;
  634. }
  635. if (vaddr.sx01 < rtte.fc0.tf)
  636. return PGM_SEGMENT_TRANSLATION;
  637. if (vaddr.sx01 > rtte.fc0.tl)
  638. return PGM_SEGMENT_TRANSLATION;
  639. if (edat1)
  640. dat_protection |= rtte.fc0.p;
  641. ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
  642. }
  643. /* fallthrough */
  644. case ASCE_TYPE_SEGMENT: {
  645. union segment_table_entry ste;
  646. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  647. return PGM_ADDRESSING;
  648. if (deref_table(vcpu->kvm, ptr, &ste.val))
  649. return -EFAULT;
  650. if (ste.i)
  651. return PGM_SEGMENT_TRANSLATION;
  652. if (ste.tt != TABLE_TYPE_SEGMENT)
  653. return PGM_TRANSLATION_SPEC;
  654. if (ste.cs && asce.p)
  655. return PGM_TRANSLATION_SPEC;
  656. if (ste.fc && edat1) {
  657. dat_protection |= ste.fc1.p;
  658. raddr.sfaa = ste.fc1.sfaa;
  659. goto absolute_address;
  660. }
  661. dat_protection |= ste.fc0.p;
  662. ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
  663. }
  664. }
  665. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  666. return PGM_ADDRESSING;
  667. if (deref_table(vcpu->kvm, ptr, &pte.val))
  668. return -EFAULT;
  669. if (pte.i)
  670. return PGM_PAGE_TRANSLATION;
  671. if (pte.z)
  672. return PGM_TRANSLATION_SPEC;
  673. if (pte.co && !edat1)
  674. return PGM_TRANSLATION_SPEC;
  675. dat_protection |= pte.p;
  676. raddr.pfra = pte.pfra;
  677. real_address:
  678. raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
  679. absolute_address:
  680. if (mode == GACC_STORE && dat_protection)
  681. return PGM_PROTECTION;
  682. if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
  683. return PGM_ADDRESSING;
  684. *gpa = raddr.addr;
  685. return 0;
  686. }
  687. static inline int is_low_address(unsigned long ga)
  688. {
  689. /* Check for address ranges 0..511 and 4096..4607 */
  690. return (ga & ~0x11fful) == 0;
  691. }
  692. static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
  693. const union asce asce)
  694. {
  695. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  696. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  697. if (!ctlreg0.lap)
  698. return 0;
  699. if (psw_bits(*psw).t && asce.p)
  700. return 0;
  701. return 1;
  702. }
  703. static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
  704. unsigned long *pages, unsigned long nr_pages,
  705. const union asce asce, enum gacc_mode mode)
  706. {
  707. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  708. int lap_enabled, rc = 0;
  709. lap_enabled = low_address_protection_enabled(vcpu, asce);
  710. while (nr_pages) {
  711. ga = kvm_s390_logical_to_effective(vcpu, ga);
  712. if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
  713. return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
  714. PROT_TYPE_LA);
  715. ga &= PAGE_MASK;
  716. if (psw_bits(*psw).t) {
  717. rc = guest_translate(vcpu, ga, pages, asce, mode);
  718. if (rc < 0)
  719. return rc;
  720. } else {
  721. *pages = kvm_s390_real_to_abs(vcpu, ga);
  722. if (kvm_is_error_gpa(vcpu->kvm, *pages))
  723. rc = PGM_ADDRESSING;
  724. }
  725. if (rc)
  726. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_DAT);
  727. ga += PAGE_SIZE;
  728. pages++;
  729. nr_pages--;
  730. }
  731. return 0;
  732. }
  733. int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
  734. unsigned long len, enum gacc_mode mode)
  735. {
  736. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  737. unsigned long _len, nr_pages, gpa, idx;
  738. unsigned long pages_array[2];
  739. unsigned long *pages;
  740. int need_ipte_lock;
  741. union asce asce;
  742. int rc;
  743. if (!len)
  744. return 0;
  745. ga = kvm_s390_logical_to_effective(vcpu, ga);
  746. rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
  747. if (rc)
  748. return rc;
  749. nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
  750. pages = pages_array;
  751. if (nr_pages > ARRAY_SIZE(pages_array))
  752. pages = vmalloc(nr_pages * sizeof(unsigned long));
  753. if (!pages)
  754. return -ENOMEM;
  755. need_ipte_lock = psw_bits(*psw).t && !asce.r;
  756. if (need_ipte_lock)
  757. ipte_lock(vcpu);
  758. rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
  759. for (idx = 0; idx < nr_pages && !rc; idx++) {
  760. gpa = *(pages + idx) + (ga & ~PAGE_MASK);
  761. _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
  762. if (mode == GACC_STORE)
  763. rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
  764. else
  765. rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
  766. len -= _len;
  767. ga += _len;
  768. data += _len;
  769. }
  770. if (need_ipte_lock)
  771. ipte_unlock(vcpu);
  772. if (nr_pages > ARRAY_SIZE(pages_array))
  773. vfree(pages);
  774. return rc;
  775. }
  776. int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
  777. void *data, unsigned long len, enum gacc_mode mode)
  778. {
  779. unsigned long _len, gpa;
  780. int rc = 0;
  781. while (len && !rc) {
  782. gpa = kvm_s390_real_to_abs(vcpu, gra);
  783. _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
  784. if (mode)
  785. rc = write_guest_abs(vcpu, gpa, data, _len);
  786. else
  787. rc = read_guest_abs(vcpu, gpa, data, _len);
  788. len -= _len;
  789. gra += _len;
  790. data += _len;
  791. }
  792. return rc;
  793. }
  794. /**
  795. * guest_translate_address - translate guest logical into guest absolute address
  796. *
  797. * Parameter semantics are the same as the ones from guest_translate.
  798. * The memory contents at the guest address are not changed.
  799. *
  800. * Note: The IPTE lock is not taken during this function, so the caller
  801. * has to take care of this.
  802. */
  803. int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
  804. unsigned long *gpa, enum gacc_mode mode)
  805. {
  806. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  807. union asce asce;
  808. int rc;
  809. gva = kvm_s390_logical_to_effective(vcpu, gva);
  810. rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
  811. if (rc)
  812. return rc;
  813. if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
  814. if (mode == GACC_STORE)
  815. return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
  816. mode, PROT_TYPE_LA);
  817. }
  818. if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
  819. rc = guest_translate(vcpu, gva, gpa, asce, mode);
  820. if (rc > 0)
  821. return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
  822. } else {
  823. *gpa = kvm_s390_real_to_abs(vcpu, gva);
  824. if (kvm_is_error_gpa(vcpu->kvm, *gpa))
  825. return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
  826. }
  827. return rc;
  828. }
  829. /**
  830. * check_gva_range - test a range of guest virtual addresses for accessibility
  831. */
  832. int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
  833. unsigned long length, enum gacc_mode mode)
  834. {
  835. unsigned long gpa;
  836. unsigned long currlen;
  837. int rc = 0;
  838. ipte_lock(vcpu);
  839. while (length > 0 && !rc) {
  840. currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
  841. rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
  842. gva += currlen;
  843. length -= currlen;
  844. }
  845. ipte_unlock(vcpu);
  846. return rc;
  847. }
  848. /**
  849. * kvm_s390_check_low_addr_prot_real - check for low-address protection
  850. * @gra: Guest real address
  851. *
  852. * Checks whether an address is subject to low-address protection and set
  853. * up vcpu->arch.pgm accordingly if necessary.
  854. *
  855. * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
  856. */
  857. int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  858. {
  859. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  860. if (!ctlreg0.lap || !is_low_address(gra))
  861. return 0;
  862. return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
  863. }
  864. /**
  865. * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
  866. * @sg: pointer to the shadow guest address space structure
  867. * @saddr: faulting address in the shadow gmap
  868. * @pgt: pointer to the page table address result
  869. * @fake: pgt references contiguous guest memory block, not a pgtable
  870. */
  871. static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
  872. unsigned long *pgt, int *dat_protection,
  873. int *fake)
  874. {
  875. struct gmap *parent;
  876. union asce asce;
  877. union vaddress vaddr;
  878. unsigned long ptr;
  879. int rc;
  880. *fake = 0;
  881. *dat_protection = 0;
  882. parent = sg->parent;
  883. vaddr.addr = saddr;
  884. asce.val = sg->orig_asce;
  885. ptr = asce.origin * 4096;
  886. if (asce.r) {
  887. *fake = 1;
  888. ptr = 0;
  889. asce.dt = ASCE_TYPE_REGION1;
  890. }
  891. switch (asce.dt) {
  892. case ASCE_TYPE_REGION1:
  893. if (vaddr.rfx01 > asce.tl && !*fake)
  894. return PGM_REGION_FIRST_TRANS;
  895. break;
  896. case ASCE_TYPE_REGION2:
  897. if (vaddr.rfx)
  898. return PGM_ASCE_TYPE;
  899. if (vaddr.rsx01 > asce.tl)
  900. return PGM_REGION_SECOND_TRANS;
  901. break;
  902. case ASCE_TYPE_REGION3:
  903. if (vaddr.rfx || vaddr.rsx)
  904. return PGM_ASCE_TYPE;
  905. if (vaddr.rtx01 > asce.tl)
  906. return PGM_REGION_THIRD_TRANS;
  907. break;
  908. case ASCE_TYPE_SEGMENT:
  909. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  910. return PGM_ASCE_TYPE;
  911. if (vaddr.sx01 > asce.tl)
  912. return PGM_SEGMENT_TRANSLATION;
  913. break;
  914. }
  915. switch (asce.dt) {
  916. case ASCE_TYPE_REGION1: {
  917. union region1_table_entry rfte;
  918. if (*fake) {
  919. ptr += (unsigned long) vaddr.rfx << 53;
  920. rfte.val = ptr;
  921. goto shadow_r2t;
  922. }
  923. rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
  924. if (rc)
  925. return rc;
  926. if (rfte.i)
  927. return PGM_REGION_FIRST_TRANS;
  928. if (rfte.tt != TABLE_TYPE_REGION1)
  929. return PGM_TRANSLATION_SPEC;
  930. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  931. return PGM_REGION_SECOND_TRANS;
  932. if (sg->edat_level >= 1)
  933. *dat_protection |= rfte.p;
  934. ptr = rfte.rto << 12UL;
  935. shadow_r2t:
  936. rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
  937. if (rc)
  938. return rc;
  939. /* fallthrough */
  940. }
  941. case ASCE_TYPE_REGION2: {
  942. union region2_table_entry rste;
  943. if (*fake) {
  944. ptr += (unsigned long) vaddr.rsx << 42;
  945. rste.val = ptr;
  946. goto shadow_r3t;
  947. }
  948. rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
  949. if (rc)
  950. return rc;
  951. if (rste.i)
  952. return PGM_REGION_SECOND_TRANS;
  953. if (rste.tt != TABLE_TYPE_REGION2)
  954. return PGM_TRANSLATION_SPEC;
  955. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  956. return PGM_REGION_THIRD_TRANS;
  957. if (sg->edat_level >= 1)
  958. *dat_protection |= rste.p;
  959. ptr = rste.rto << 12UL;
  960. shadow_r3t:
  961. rste.p |= *dat_protection;
  962. rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
  963. if (rc)
  964. return rc;
  965. /* fallthrough */
  966. }
  967. case ASCE_TYPE_REGION3: {
  968. union region3_table_entry rtte;
  969. if (*fake) {
  970. ptr += (unsigned long) vaddr.rtx << 31;
  971. rtte.val = ptr;
  972. goto shadow_sgt;
  973. }
  974. rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
  975. if (rc)
  976. return rc;
  977. if (rtte.i)
  978. return PGM_REGION_THIRD_TRANS;
  979. if (rtte.tt != TABLE_TYPE_REGION3)
  980. return PGM_TRANSLATION_SPEC;
  981. if (rtte.cr && asce.p && sg->edat_level >= 2)
  982. return PGM_TRANSLATION_SPEC;
  983. if (rtte.fc && sg->edat_level >= 2) {
  984. *dat_protection |= rtte.fc0.p;
  985. *fake = 1;
  986. ptr = rtte.fc1.rfaa << 31UL;
  987. rtte.val = ptr;
  988. goto shadow_sgt;
  989. }
  990. if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
  991. return PGM_SEGMENT_TRANSLATION;
  992. if (sg->edat_level >= 1)
  993. *dat_protection |= rtte.fc0.p;
  994. ptr = rtte.fc0.sto << 12UL;
  995. shadow_sgt:
  996. rtte.fc0.p |= *dat_protection;
  997. rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
  998. if (rc)
  999. return rc;
  1000. /* fallthrough */
  1001. }
  1002. case ASCE_TYPE_SEGMENT: {
  1003. union segment_table_entry ste;
  1004. if (*fake) {
  1005. ptr += (unsigned long) vaddr.sx << 20;
  1006. ste.val = ptr;
  1007. goto shadow_pgt;
  1008. }
  1009. rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
  1010. if (rc)
  1011. return rc;
  1012. if (ste.i)
  1013. return PGM_SEGMENT_TRANSLATION;
  1014. if (ste.tt != TABLE_TYPE_SEGMENT)
  1015. return PGM_TRANSLATION_SPEC;
  1016. if (ste.cs && asce.p)
  1017. return PGM_TRANSLATION_SPEC;
  1018. *dat_protection |= ste.fc0.p;
  1019. if (ste.fc && sg->edat_level >= 1) {
  1020. *fake = 1;
  1021. ptr = ste.fc1.sfaa << 20UL;
  1022. ste.val = ptr;
  1023. goto shadow_pgt;
  1024. }
  1025. ptr = ste.fc0.pto << 11UL;
  1026. shadow_pgt:
  1027. ste.fc0.p |= *dat_protection;
  1028. rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
  1029. if (rc)
  1030. return rc;
  1031. }
  1032. }
  1033. /* Return the parent address of the page table */
  1034. *pgt = ptr;
  1035. return 0;
  1036. }
  1037. /**
  1038. * kvm_s390_shadow_fault - handle fault on a shadow page table
  1039. * @vcpu: virtual cpu
  1040. * @sg: pointer to the shadow guest address space structure
  1041. * @saddr: faulting address in the shadow gmap
  1042. *
  1043. * Returns: - 0 if the shadow fault was successfully resolved
  1044. * - > 0 (pgm exception code) on exceptions while faulting
  1045. * - -EAGAIN if the caller can retry immediately
  1046. * - -EFAULT when accessing invalid guest addresses
  1047. * - -ENOMEM if out of memory
  1048. */
  1049. int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
  1050. unsigned long saddr)
  1051. {
  1052. union vaddress vaddr;
  1053. union page_table_entry pte;
  1054. unsigned long pgt;
  1055. int dat_protection, fake;
  1056. int rc;
  1057. down_read(&sg->mm->mmap_sem);
  1058. /*
  1059. * We don't want any guest-2 tables to change - so the parent
  1060. * tables/pointers we read stay valid - unshadowing is however
  1061. * always possible - only guest_table_lock protects us.
  1062. */
  1063. ipte_lock(vcpu);
  1064. rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
  1065. if (rc)
  1066. rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
  1067. &fake);
  1068. vaddr.addr = saddr;
  1069. if (fake) {
  1070. /* offset in 1MB guest memory block */
  1071. pte.val = pgt + ((unsigned long) vaddr.px << 12UL);
  1072. goto shadow_page;
  1073. }
  1074. if (!rc)
  1075. rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
  1076. if (!rc && pte.i)
  1077. rc = PGM_PAGE_TRANSLATION;
  1078. if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
  1079. rc = PGM_TRANSLATION_SPEC;
  1080. shadow_page:
  1081. pte.p |= dat_protection;
  1082. if (!rc)
  1083. rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
  1084. ipte_unlock(vcpu);
  1085. up_read(&sg->mm->mmap_sem);
  1086. return rc;
  1087. }