vcpu.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210
  1. /*
  2. * kvm_vcpu.c: handling all virtual cpu related thing.
  3. * Copyright (c) 2005, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Shaofan Li (Susue Li) <susie.li@intel.com>
  19. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  20. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  21. * Xiantao Zhang <xiantao.zhang@intel.com>
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <linux/types.h>
  25. #include <asm/processor.h>
  26. #include <asm/ia64regs.h>
  27. #include <asm/gcc_intrin.h>
  28. #include <asm/kregs.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/tlb.h>
  31. #include "asm-offsets.h"
  32. #include "vcpu.h"
  33. /*
  34. * Special notes:
  35. * - Index by it/dt/rt sequence
  36. * - Only existing mode transitions are allowed in this table
  37. * - RSE is placed at lazy mode when emulating guest partial mode
  38. * - If gva happens to be rr0 and rr4, only allowed case is identity
  39. * mapping (gva=gpa), or panic! (How?)
  40. */
  41. int mm_switch_table[8][8] = {
  42. /* 2004/09/12(Kevin): Allow switch to self */
  43. /*
  44. * (it,dt,rt): (0,0,0) -> (1,1,1)
  45. * This kind of transition usually occurs in the very early
  46. * stage of Linux boot up procedure. Another case is in efi
  47. * and pal calls. (see "arch/ia64/kernel/head.S")
  48. *
  49. * (it,dt,rt): (0,0,0) -> (0,1,1)
  50. * This kind of transition is found when OSYa exits efi boot
  51. * service. Due to gva = gpa in this case (Same region),
  52. * data access can be satisfied though itlb entry for physical
  53. * emulation is hit.
  54. */
  55. {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
  56. {0, 0, 0, 0, 0, 0, 0, 0},
  57. {0, 0, 0, 0, 0, 0, 0, 0},
  58. /*
  59. * (it,dt,rt): (0,1,1) -> (1,1,1)
  60. * This kind of transition is found in OSYa.
  61. *
  62. * (it,dt,rt): (0,1,1) -> (0,0,0)
  63. * This kind of transition is found in OSYa
  64. */
  65. {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
  66. /* (1,0,0)->(1,1,1) */
  67. {0, 0, 0, 0, 0, 0, 0, SW_P2V},
  68. /*
  69. * (it,dt,rt): (1,0,1) -> (1,1,1)
  70. * This kind of transition usually occurs when Linux returns
  71. * from the low level TLB miss handlers.
  72. * (see "arch/ia64/kernel/ivt.S")
  73. */
  74. {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
  75. {0, 0, 0, 0, 0, 0, 0, 0},
  76. /*
  77. * (it,dt,rt): (1,1,1) -> (1,0,1)
  78. * This kind of transition usually occurs in Linux low level
  79. * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
  80. *
  81. * (it,dt,rt): (1,1,1) -> (0,0,0)
  82. * This kind of transition usually occurs in pal and efi calls,
  83. * which requires running in physical mode.
  84. * (see "arch/ia64/kernel/head.S")
  85. * (1,1,1)->(1,0,0)
  86. */
  87. {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
  88. };
  89. void physical_mode_init(struct kvm_vcpu *vcpu)
  90. {
  91. vcpu->arch.mode_flags = GUEST_IN_PHY;
  92. }
  93. void switch_to_physical_rid(struct kvm_vcpu *vcpu)
  94. {
  95. unsigned long psr;
  96. /* Save original virtual mode rr[0] and rr[4] */
  97. psr = ia64_clear_ic();
  98. ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
  99. ia64_srlz_d();
  100. ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
  101. ia64_srlz_d();
  102. ia64_set_psr(psr);
  103. return;
  104. }
  105. void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
  106. {
  107. unsigned long psr;
  108. psr = ia64_clear_ic();
  109. ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
  110. ia64_srlz_d();
  111. ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
  112. ia64_srlz_d();
  113. ia64_set_psr(psr);
  114. return;
  115. }
  116. static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
  117. {
  118. return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  119. }
  120. void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  121. struct ia64_psr new_psr)
  122. {
  123. int act;
  124. act = mm_switch_action(old_psr, new_psr);
  125. switch (act) {
  126. case SW_V2P:
  127. /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
  128. old_psr.val, new_psr.val);*/
  129. switch_to_physical_rid(vcpu);
  130. /*
  131. * Set rse to enforced lazy, to prevent active rse
  132. *save/restor when guest physical mode.
  133. */
  134. vcpu->arch.mode_flags |= GUEST_IN_PHY;
  135. break;
  136. case SW_P2V:
  137. switch_to_virtual_rid(vcpu);
  138. /*
  139. * recover old mode which is saved when entering
  140. * guest physical mode
  141. */
  142. vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  143. break;
  144. case SW_SELF:
  145. break;
  146. case SW_NOP:
  147. break;
  148. default:
  149. /* Sanity check */
  150. break;
  151. }
  152. return;
  153. }
  154. /*
  155. * In physical mode, insert tc/tr for region 0 and 4 uses
  156. * RID[0] and RID[4] which is for physical mode emulation.
  157. * However what those inserted tc/tr wants is rid for
  158. * virtual mode. So original virtual rid needs to be restored
  159. * before insert.
  160. *
  161. * Operations which required such switch include:
  162. * - insertions (itc.*, itr.*)
  163. * - purges (ptc.* and ptr.*)
  164. * - tpa
  165. * - tak
  166. * - thash?, ttag?
  167. * All above needs actual virtual rid for destination entry.
  168. */
  169. void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  170. struct ia64_psr new_psr)
  171. {
  172. if ((old_psr.dt != new_psr.dt)
  173. || (old_psr.it != new_psr.it)
  174. || (old_psr.rt != new_psr.rt))
  175. switch_mm_mode(vcpu, old_psr, new_psr);
  176. return;
  177. }
  178. /*
  179. * In physical mode, insert tc/tr for region 0 and 4 uses
  180. * RID[0] and RID[4] which is for physical mode emulation.
  181. * However what those inserted tc/tr wants is rid for
  182. * virtual mode. So original virtual rid needs to be restored
  183. * before insert.
  184. *
  185. * Operations which required such switch include:
  186. * - insertions (itc.*, itr.*)
  187. * - purges (ptc.* and ptr.*)
  188. * - tpa
  189. * - tak
  190. * - thash?, ttag?
  191. * All above needs actual virtual rid for destination entry.
  192. */
  193. void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
  194. {
  195. if (is_physical_mode(vcpu)) {
  196. vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
  197. switch_to_virtual_rid(vcpu);
  198. }
  199. return;
  200. }
  201. /* Recover always follows prepare */
  202. void recover_if_physical_mode(struct kvm_vcpu *vcpu)
  203. {
  204. if (is_physical_mode(vcpu))
  205. switch_to_physical_rid(vcpu);
  206. vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
  207. return;
  208. }
  209. #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
  210. static u16 gr_info[32] = {
  211. 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
  212. RPT(r1), RPT(r2), RPT(r3),
  213. RPT(r4), RPT(r5), RPT(r6), RPT(r7),
  214. RPT(r8), RPT(r9), RPT(r10), RPT(r11),
  215. RPT(r12), RPT(r13), RPT(r14), RPT(r15),
  216. RPT(r16), RPT(r17), RPT(r18), RPT(r19),
  217. RPT(r20), RPT(r21), RPT(r22), RPT(r23),
  218. RPT(r24), RPT(r25), RPT(r26), RPT(r27),
  219. RPT(r28), RPT(r29), RPT(r30), RPT(r31)
  220. };
  221. #define IA64_FIRST_STACKED_GR 32
  222. #define IA64_FIRST_ROTATING_FR 32
  223. static inline unsigned long
  224. rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
  225. {
  226. reg += rrb;
  227. if (reg >= sor)
  228. reg -= sor;
  229. return reg;
  230. }
  231. /*
  232. * Return the (rotated) index for floating point register
  233. * be in the REGNUM (REGNUM must range from 32-127,
  234. * result is in the range from 0-95.
  235. */
  236. static inline unsigned long fph_index(struct kvm_pt_regs *regs,
  237. long regnum)
  238. {
  239. unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
  240. return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
  241. }
  242. /*
  243. * The inverse of the above: given bspstore and the number of
  244. * registers, calculate ar.bsp.
  245. */
  246. static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
  247. long num_regs)
  248. {
  249. long delta = ia64_rse_slot_num(addr) + num_regs;
  250. int i = 0;
  251. if (num_regs < 0)
  252. delta -= 0x3e;
  253. if (delta < 0) {
  254. while (delta <= -0x3f) {
  255. i--;
  256. delta += 0x3f;
  257. }
  258. } else {
  259. while (delta >= 0x3f) {
  260. i++;
  261. delta -= 0x3f;
  262. }
  263. }
  264. return addr + num_regs + i;
  265. }
  266. static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  267. unsigned long *val, int *nat)
  268. {
  269. unsigned long *bsp, *addr, *rnat_addr, *bspstore;
  270. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  271. unsigned long nat_mask;
  272. unsigned long old_rsc, new_rsc;
  273. long sof = (regs->cr_ifs) & 0x7f;
  274. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  275. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  276. long ridx = r1 - 32;
  277. if (ridx < sor)
  278. ridx = rotate_reg(sor, rrb_gr, ridx);
  279. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  280. new_rsc = old_rsc&(~(0x3));
  281. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  282. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  283. bsp = kbs + (regs->loadrs >> 19);
  284. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  285. nat_mask = 1UL << ia64_rse_slot_num(addr);
  286. rnat_addr = ia64_rse_rnat_addr(addr);
  287. if (addr >= bspstore) {
  288. ia64_flushrs();
  289. ia64_mf();
  290. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  291. }
  292. *val = *addr;
  293. if (nat) {
  294. if (bspstore < rnat_addr)
  295. *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
  296. & nat_mask);
  297. else
  298. *nat = (int)!!((*rnat_addr) & nat_mask);
  299. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  300. }
  301. }
  302. void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  303. unsigned long val, unsigned long nat)
  304. {
  305. unsigned long *bsp, *bspstore, *addr, *rnat_addr;
  306. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  307. unsigned long nat_mask;
  308. unsigned long old_rsc, new_rsc, psr;
  309. unsigned long rnat;
  310. long sof = (regs->cr_ifs) & 0x7f;
  311. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  312. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  313. long ridx = r1 - 32;
  314. if (ridx < sor)
  315. ridx = rotate_reg(sor, rrb_gr, ridx);
  316. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  317. /* put RSC to lazy mode, and set loadrs 0 */
  318. new_rsc = old_rsc & (~0x3fff0003);
  319. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  320. bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
  321. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  322. nat_mask = 1UL << ia64_rse_slot_num(addr);
  323. rnat_addr = ia64_rse_rnat_addr(addr);
  324. local_irq_save(psr);
  325. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  326. if (addr >= bspstore) {
  327. ia64_flushrs();
  328. ia64_mf();
  329. *addr = val;
  330. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  331. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  332. if (bspstore < rnat_addr)
  333. rnat = rnat & (~nat_mask);
  334. else
  335. *rnat_addr = (*rnat_addr)&(~nat_mask);
  336. ia64_mf();
  337. ia64_loadrs();
  338. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  339. } else {
  340. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  341. *addr = val;
  342. if (bspstore < rnat_addr)
  343. rnat = rnat&(~nat_mask);
  344. else
  345. *rnat_addr = (*rnat_addr) & (~nat_mask);
  346. ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
  347. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  348. }
  349. local_irq_restore(psr);
  350. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  351. }
  352. void getreg(unsigned long regnum, unsigned long *val,
  353. int *nat, struct kvm_pt_regs *regs)
  354. {
  355. unsigned long addr, *unat;
  356. if (regnum >= IA64_FIRST_STACKED_GR) {
  357. get_rse_reg(regs, regnum, val, nat);
  358. return;
  359. }
  360. /*
  361. * Now look at registers in [0-31] range and init correct UNAT
  362. */
  363. addr = (unsigned long)regs;
  364. unat = &regs->eml_unat;
  365. addr += gr_info[regnum];
  366. *val = *(unsigned long *)addr;
  367. /*
  368. * do it only when requested
  369. */
  370. if (nat)
  371. *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
  372. }
  373. void setreg(unsigned long regnum, unsigned long val,
  374. int nat, struct kvm_pt_regs *regs)
  375. {
  376. unsigned long addr;
  377. unsigned long bitmask;
  378. unsigned long *unat;
  379. /*
  380. * First takes care of stacked registers
  381. */
  382. if (regnum >= IA64_FIRST_STACKED_GR) {
  383. set_rse_reg(regs, regnum, val, nat);
  384. return;
  385. }
  386. /*
  387. * Now look at registers in [0-31] range and init correct UNAT
  388. */
  389. addr = (unsigned long)regs;
  390. unat = &regs->eml_unat;
  391. /*
  392. * add offset from base of struct
  393. * and do it !
  394. */
  395. addr += gr_info[regnum];
  396. *(unsigned long *)addr = val;
  397. /*
  398. * We need to clear the corresponding UNAT bit to fully emulate the load
  399. * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
  400. */
  401. bitmask = 1UL << ((addr >> 3) & 0x3f);
  402. if (nat)
  403. *unat |= bitmask;
  404. else
  405. *unat &= ~bitmask;
  406. }
  407. u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
  408. {
  409. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  410. unsigned long val;
  411. if (!reg)
  412. return 0;
  413. getreg(reg, &val, 0, regs);
  414. return val;
  415. }
  416. void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
  417. {
  418. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  419. long sof = (regs->cr_ifs) & 0x7f;
  420. if (!reg)
  421. return;
  422. if (reg >= sof + 32)
  423. return;
  424. setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
  425. }
  426. void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  427. struct kvm_pt_regs *regs)
  428. {
  429. /* Take floating register rotation into consideration*/
  430. if (regnum >= IA64_FIRST_ROTATING_FR)
  431. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  432. #define CASE_FIXED_FP(reg) \
  433. case (reg) : \
  434. ia64_stf_spill(fpval, reg); \
  435. break
  436. switch (regnum) {
  437. CASE_FIXED_FP(0);
  438. CASE_FIXED_FP(1);
  439. CASE_FIXED_FP(2);
  440. CASE_FIXED_FP(3);
  441. CASE_FIXED_FP(4);
  442. CASE_FIXED_FP(5);
  443. CASE_FIXED_FP(6);
  444. CASE_FIXED_FP(7);
  445. CASE_FIXED_FP(8);
  446. CASE_FIXED_FP(9);
  447. CASE_FIXED_FP(10);
  448. CASE_FIXED_FP(11);
  449. CASE_FIXED_FP(12);
  450. CASE_FIXED_FP(13);
  451. CASE_FIXED_FP(14);
  452. CASE_FIXED_FP(15);
  453. CASE_FIXED_FP(16);
  454. CASE_FIXED_FP(17);
  455. CASE_FIXED_FP(18);
  456. CASE_FIXED_FP(19);
  457. CASE_FIXED_FP(20);
  458. CASE_FIXED_FP(21);
  459. CASE_FIXED_FP(22);
  460. CASE_FIXED_FP(23);
  461. CASE_FIXED_FP(24);
  462. CASE_FIXED_FP(25);
  463. CASE_FIXED_FP(26);
  464. CASE_FIXED_FP(27);
  465. CASE_FIXED_FP(28);
  466. CASE_FIXED_FP(29);
  467. CASE_FIXED_FP(30);
  468. CASE_FIXED_FP(31);
  469. CASE_FIXED_FP(32);
  470. CASE_FIXED_FP(33);
  471. CASE_FIXED_FP(34);
  472. CASE_FIXED_FP(35);
  473. CASE_FIXED_FP(36);
  474. CASE_FIXED_FP(37);
  475. CASE_FIXED_FP(38);
  476. CASE_FIXED_FP(39);
  477. CASE_FIXED_FP(40);
  478. CASE_FIXED_FP(41);
  479. CASE_FIXED_FP(42);
  480. CASE_FIXED_FP(43);
  481. CASE_FIXED_FP(44);
  482. CASE_FIXED_FP(45);
  483. CASE_FIXED_FP(46);
  484. CASE_FIXED_FP(47);
  485. CASE_FIXED_FP(48);
  486. CASE_FIXED_FP(49);
  487. CASE_FIXED_FP(50);
  488. CASE_FIXED_FP(51);
  489. CASE_FIXED_FP(52);
  490. CASE_FIXED_FP(53);
  491. CASE_FIXED_FP(54);
  492. CASE_FIXED_FP(55);
  493. CASE_FIXED_FP(56);
  494. CASE_FIXED_FP(57);
  495. CASE_FIXED_FP(58);
  496. CASE_FIXED_FP(59);
  497. CASE_FIXED_FP(60);
  498. CASE_FIXED_FP(61);
  499. CASE_FIXED_FP(62);
  500. CASE_FIXED_FP(63);
  501. CASE_FIXED_FP(64);
  502. CASE_FIXED_FP(65);
  503. CASE_FIXED_FP(66);
  504. CASE_FIXED_FP(67);
  505. CASE_FIXED_FP(68);
  506. CASE_FIXED_FP(69);
  507. CASE_FIXED_FP(70);
  508. CASE_FIXED_FP(71);
  509. CASE_FIXED_FP(72);
  510. CASE_FIXED_FP(73);
  511. CASE_FIXED_FP(74);
  512. CASE_FIXED_FP(75);
  513. CASE_FIXED_FP(76);
  514. CASE_FIXED_FP(77);
  515. CASE_FIXED_FP(78);
  516. CASE_FIXED_FP(79);
  517. CASE_FIXED_FP(80);
  518. CASE_FIXED_FP(81);
  519. CASE_FIXED_FP(82);
  520. CASE_FIXED_FP(83);
  521. CASE_FIXED_FP(84);
  522. CASE_FIXED_FP(85);
  523. CASE_FIXED_FP(86);
  524. CASE_FIXED_FP(87);
  525. CASE_FIXED_FP(88);
  526. CASE_FIXED_FP(89);
  527. CASE_FIXED_FP(90);
  528. CASE_FIXED_FP(91);
  529. CASE_FIXED_FP(92);
  530. CASE_FIXED_FP(93);
  531. CASE_FIXED_FP(94);
  532. CASE_FIXED_FP(95);
  533. CASE_FIXED_FP(96);
  534. CASE_FIXED_FP(97);
  535. CASE_FIXED_FP(98);
  536. CASE_FIXED_FP(99);
  537. CASE_FIXED_FP(100);
  538. CASE_FIXED_FP(101);
  539. CASE_FIXED_FP(102);
  540. CASE_FIXED_FP(103);
  541. CASE_FIXED_FP(104);
  542. CASE_FIXED_FP(105);
  543. CASE_FIXED_FP(106);
  544. CASE_FIXED_FP(107);
  545. CASE_FIXED_FP(108);
  546. CASE_FIXED_FP(109);
  547. CASE_FIXED_FP(110);
  548. CASE_FIXED_FP(111);
  549. CASE_FIXED_FP(112);
  550. CASE_FIXED_FP(113);
  551. CASE_FIXED_FP(114);
  552. CASE_FIXED_FP(115);
  553. CASE_FIXED_FP(116);
  554. CASE_FIXED_FP(117);
  555. CASE_FIXED_FP(118);
  556. CASE_FIXED_FP(119);
  557. CASE_FIXED_FP(120);
  558. CASE_FIXED_FP(121);
  559. CASE_FIXED_FP(122);
  560. CASE_FIXED_FP(123);
  561. CASE_FIXED_FP(124);
  562. CASE_FIXED_FP(125);
  563. CASE_FIXED_FP(126);
  564. CASE_FIXED_FP(127);
  565. }
  566. #undef CASE_FIXED_FP
  567. }
  568. void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  569. struct kvm_pt_regs *regs)
  570. {
  571. /* Take floating register rotation into consideration*/
  572. if (regnum >= IA64_FIRST_ROTATING_FR)
  573. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  574. #define CASE_FIXED_FP(reg) \
  575. case (reg) : \
  576. ia64_ldf_fill(reg, fpval); \
  577. break
  578. switch (regnum) {
  579. CASE_FIXED_FP(2);
  580. CASE_FIXED_FP(3);
  581. CASE_FIXED_FP(4);
  582. CASE_FIXED_FP(5);
  583. CASE_FIXED_FP(6);
  584. CASE_FIXED_FP(7);
  585. CASE_FIXED_FP(8);
  586. CASE_FIXED_FP(9);
  587. CASE_FIXED_FP(10);
  588. CASE_FIXED_FP(11);
  589. CASE_FIXED_FP(12);
  590. CASE_FIXED_FP(13);
  591. CASE_FIXED_FP(14);
  592. CASE_FIXED_FP(15);
  593. CASE_FIXED_FP(16);
  594. CASE_FIXED_FP(17);
  595. CASE_FIXED_FP(18);
  596. CASE_FIXED_FP(19);
  597. CASE_FIXED_FP(20);
  598. CASE_FIXED_FP(21);
  599. CASE_FIXED_FP(22);
  600. CASE_FIXED_FP(23);
  601. CASE_FIXED_FP(24);
  602. CASE_FIXED_FP(25);
  603. CASE_FIXED_FP(26);
  604. CASE_FIXED_FP(27);
  605. CASE_FIXED_FP(28);
  606. CASE_FIXED_FP(29);
  607. CASE_FIXED_FP(30);
  608. CASE_FIXED_FP(31);
  609. CASE_FIXED_FP(32);
  610. CASE_FIXED_FP(33);
  611. CASE_FIXED_FP(34);
  612. CASE_FIXED_FP(35);
  613. CASE_FIXED_FP(36);
  614. CASE_FIXED_FP(37);
  615. CASE_FIXED_FP(38);
  616. CASE_FIXED_FP(39);
  617. CASE_FIXED_FP(40);
  618. CASE_FIXED_FP(41);
  619. CASE_FIXED_FP(42);
  620. CASE_FIXED_FP(43);
  621. CASE_FIXED_FP(44);
  622. CASE_FIXED_FP(45);
  623. CASE_FIXED_FP(46);
  624. CASE_FIXED_FP(47);
  625. CASE_FIXED_FP(48);
  626. CASE_FIXED_FP(49);
  627. CASE_FIXED_FP(50);
  628. CASE_FIXED_FP(51);
  629. CASE_FIXED_FP(52);
  630. CASE_FIXED_FP(53);
  631. CASE_FIXED_FP(54);
  632. CASE_FIXED_FP(55);
  633. CASE_FIXED_FP(56);
  634. CASE_FIXED_FP(57);
  635. CASE_FIXED_FP(58);
  636. CASE_FIXED_FP(59);
  637. CASE_FIXED_FP(60);
  638. CASE_FIXED_FP(61);
  639. CASE_FIXED_FP(62);
  640. CASE_FIXED_FP(63);
  641. CASE_FIXED_FP(64);
  642. CASE_FIXED_FP(65);
  643. CASE_FIXED_FP(66);
  644. CASE_FIXED_FP(67);
  645. CASE_FIXED_FP(68);
  646. CASE_FIXED_FP(69);
  647. CASE_FIXED_FP(70);
  648. CASE_FIXED_FP(71);
  649. CASE_FIXED_FP(72);
  650. CASE_FIXED_FP(73);
  651. CASE_FIXED_FP(74);
  652. CASE_FIXED_FP(75);
  653. CASE_FIXED_FP(76);
  654. CASE_FIXED_FP(77);
  655. CASE_FIXED_FP(78);
  656. CASE_FIXED_FP(79);
  657. CASE_FIXED_FP(80);
  658. CASE_FIXED_FP(81);
  659. CASE_FIXED_FP(82);
  660. CASE_FIXED_FP(83);
  661. CASE_FIXED_FP(84);
  662. CASE_FIXED_FP(85);
  663. CASE_FIXED_FP(86);
  664. CASE_FIXED_FP(87);
  665. CASE_FIXED_FP(88);
  666. CASE_FIXED_FP(89);
  667. CASE_FIXED_FP(90);
  668. CASE_FIXED_FP(91);
  669. CASE_FIXED_FP(92);
  670. CASE_FIXED_FP(93);
  671. CASE_FIXED_FP(94);
  672. CASE_FIXED_FP(95);
  673. CASE_FIXED_FP(96);
  674. CASE_FIXED_FP(97);
  675. CASE_FIXED_FP(98);
  676. CASE_FIXED_FP(99);
  677. CASE_FIXED_FP(100);
  678. CASE_FIXED_FP(101);
  679. CASE_FIXED_FP(102);
  680. CASE_FIXED_FP(103);
  681. CASE_FIXED_FP(104);
  682. CASE_FIXED_FP(105);
  683. CASE_FIXED_FP(106);
  684. CASE_FIXED_FP(107);
  685. CASE_FIXED_FP(108);
  686. CASE_FIXED_FP(109);
  687. CASE_FIXED_FP(110);
  688. CASE_FIXED_FP(111);
  689. CASE_FIXED_FP(112);
  690. CASE_FIXED_FP(113);
  691. CASE_FIXED_FP(114);
  692. CASE_FIXED_FP(115);
  693. CASE_FIXED_FP(116);
  694. CASE_FIXED_FP(117);
  695. CASE_FIXED_FP(118);
  696. CASE_FIXED_FP(119);
  697. CASE_FIXED_FP(120);
  698. CASE_FIXED_FP(121);
  699. CASE_FIXED_FP(122);
  700. CASE_FIXED_FP(123);
  701. CASE_FIXED_FP(124);
  702. CASE_FIXED_FP(125);
  703. CASE_FIXED_FP(126);
  704. CASE_FIXED_FP(127);
  705. }
  706. }
  707. void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  708. struct ia64_fpreg *val)
  709. {
  710. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  711. getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  712. }
  713. void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  714. struct ia64_fpreg *val)
  715. {
  716. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  717. if (reg > 1)
  718. setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  719. }
  720. /*
  721. * The Altix RTC is mapped specially here for the vmm module
  722. */
  723. #define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
  724. static long kvm_get_itc(struct kvm_vcpu *vcpu)
  725. {
  726. #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
  727. struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
  728. if (kvm->arch.is_sn2)
  729. return (*SN_RTC_BASE);
  730. else
  731. #endif
  732. return ia64_getreg(_IA64_REG_AR_ITC);
  733. }
  734. /************************************************************************
  735. * lsapic timer
  736. ***********************************************************************/
  737. u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
  738. {
  739. unsigned long guest_itc;
  740. guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
  741. if (guest_itc >= VMX(vcpu, last_itc)) {
  742. VMX(vcpu, last_itc) = guest_itc;
  743. return guest_itc;
  744. } else
  745. return VMX(vcpu, last_itc);
  746. }
  747. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
  748. static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
  749. {
  750. struct kvm_vcpu *v;
  751. struct kvm *kvm;
  752. int i;
  753. long itc_offset = val - kvm_get_itc(vcpu);
  754. unsigned long vitv = VCPU(vcpu, itv);
  755. kvm = (struct kvm *)KVM_VM_BASE;
  756. if (kvm_vcpu_is_bsp(vcpu)) {
  757. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
  758. v = (struct kvm_vcpu *)((char *)vcpu +
  759. sizeof(struct kvm_vcpu_data) * i);
  760. VMX(v, itc_offset) = itc_offset;
  761. VMX(v, last_itc) = 0;
  762. }
  763. }
  764. VMX(vcpu, last_itc) = 0;
  765. if (VCPU(vcpu, itm) <= val) {
  766. VMX(vcpu, itc_check) = 0;
  767. vcpu_unpend_interrupt(vcpu, vitv);
  768. } else {
  769. VMX(vcpu, itc_check) = 1;
  770. vcpu_set_itm(vcpu, VCPU(vcpu, itm));
  771. }
  772. }
  773. static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
  774. {
  775. return ((u64)VCPU(vcpu, itm));
  776. }
  777. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
  778. {
  779. unsigned long vitv = VCPU(vcpu, itv);
  780. VCPU(vcpu, itm) = val;
  781. if (val > vcpu_get_itc(vcpu)) {
  782. VMX(vcpu, itc_check) = 1;
  783. vcpu_unpend_interrupt(vcpu, vitv);
  784. VMX(vcpu, timer_pending) = 0;
  785. } else
  786. VMX(vcpu, itc_check) = 0;
  787. }
  788. #define ITV_VECTOR(itv) (itv&0xff)
  789. #define ITV_IRQ_MASK(itv) (itv&(1<<16))
  790. static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
  791. {
  792. VCPU(vcpu, itv) = val;
  793. if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
  794. vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
  795. vcpu->arch.timer_pending = 0;
  796. }
  797. }
  798. static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
  799. {
  800. int vec;
  801. vec = highest_inservice_irq(vcpu);
  802. if (vec == NULL_VECTOR)
  803. return;
  804. VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
  805. VCPU(vcpu, eoi) = 0;
  806. vcpu->arch.irq_new_pending = 1;
  807. }
  808. /* See Table 5-8 in SDM vol2 for the definition */
  809. int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
  810. {
  811. union ia64_tpr vtpr;
  812. vtpr.val = VCPU(vcpu, tpr);
  813. if (h_inservice == NMI_VECTOR)
  814. return IRQ_MASKED_BY_INSVC;
  815. if (h_pending == NMI_VECTOR) {
  816. /* Non Maskable Interrupt */
  817. return IRQ_NO_MASKED;
  818. }
  819. if (h_inservice == ExtINT_VECTOR)
  820. return IRQ_MASKED_BY_INSVC;
  821. if (h_pending == ExtINT_VECTOR) {
  822. if (vtpr.mmi) {
  823. /* mask all external IRQ */
  824. return IRQ_MASKED_BY_VTPR;
  825. } else
  826. return IRQ_NO_MASKED;
  827. }
  828. if (is_higher_irq(h_pending, h_inservice)) {
  829. if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
  830. return IRQ_NO_MASKED;
  831. else
  832. return IRQ_MASKED_BY_VTPR;
  833. } else {
  834. return IRQ_MASKED_BY_INSVC;
  835. }
  836. }
  837. void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  838. {
  839. long spsr;
  840. int ret;
  841. local_irq_save(spsr);
  842. ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
  843. local_irq_restore(spsr);
  844. vcpu->arch.irq_new_pending = 1;
  845. }
  846. void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  847. {
  848. long spsr;
  849. int ret;
  850. local_irq_save(spsr);
  851. ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
  852. local_irq_restore(spsr);
  853. if (ret) {
  854. vcpu->arch.irq_new_pending = 1;
  855. wmb();
  856. }
  857. }
  858. void update_vhpi(struct kvm_vcpu *vcpu, int vec)
  859. {
  860. u64 vhpi;
  861. if (vec == NULL_VECTOR)
  862. vhpi = 0;
  863. else if (vec == NMI_VECTOR)
  864. vhpi = 32;
  865. else if (vec == ExtINT_VECTOR)
  866. vhpi = 16;
  867. else
  868. vhpi = vec >> 4;
  869. VCPU(vcpu, vhpi) = vhpi;
  870. if (VCPU(vcpu, vac).a_int)
  871. ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
  872. (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
  873. }
  874. u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
  875. {
  876. int vec, h_inservice, mask;
  877. vec = highest_pending_irq(vcpu);
  878. h_inservice = highest_inservice_irq(vcpu);
  879. mask = irq_masked(vcpu, vec, h_inservice);
  880. if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
  881. if (VCPU(vcpu, vhpi))
  882. update_vhpi(vcpu, NULL_VECTOR);
  883. return IA64_SPURIOUS_INT_VECTOR;
  884. }
  885. if (mask == IRQ_MASKED_BY_VTPR) {
  886. update_vhpi(vcpu, vec);
  887. return IA64_SPURIOUS_INT_VECTOR;
  888. }
  889. VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
  890. vcpu_unpend_interrupt(vcpu, vec);
  891. return (u64)vec;
  892. }
  893. /**************************************************************************
  894. Privileged operation emulation routines
  895. **************************************************************************/
  896. u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
  897. {
  898. union ia64_pta vpta;
  899. union ia64_rr vrr;
  900. u64 pval;
  901. u64 vhpt_offset;
  902. vpta.val = vcpu_get_pta(vcpu);
  903. vrr.val = vcpu_get_rr(vcpu, vadr);
  904. vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
  905. if (vpta.vf) {
  906. pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
  907. vpta.val, 0, 0, 0, 0);
  908. } else {
  909. pval = (vadr & VRN_MASK) | vhpt_offset |
  910. (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
  911. }
  912. return pval;
  913. }
  914. u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
  915. {
  916. union ia64_rr vrr;
  917. union ia64_pta vpta;
  918. u64 pval;
  919. vpta.val = vcpu_get_pta(vcpu);
  920. vrr.val = vcpu_get_rr(vcpu, vadr);
  921. if (vpta.vf) {
  922. pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
  923. 0, 0, 0, 0, 0);
  924. } else
  925. pval = 1;
  926. return pval;
  927. }
  928. u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
  929. {
  930. struct thash_data *data;
  931. union ia64_pta vpta;
  932. u64 key;
  933. vpta.val = vcpu_get_pta(vcpu);
  934. if (vpta.vf == 0) {
  935. key = 1;
  936. return key;
  937. }
  938. data = vtlb_lookup(vcpu, vadr, D_TLB);
  939. if (!data || !data->p)
  940. key = 1;
  941. else
  942. key = data->key;
  943. return key;
  944. }
  945. void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
  946. {
  947. unsigned long thash, vadr;
  948. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  949. thash = vcpu_thash(vcpu, vadr);
  950. vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
  951. }
  952. void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
  953. {
  954. unsigned long tag, vadr;
  955. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  956. tag = vcpu_ttag(vcpu, vadr);
  957. vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
  958. }
  959. int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
  960. {
  961. struct thash_data *data;
  962. union ia64_isr visr, pt_isr;
  963. struct kvm_pt_regs *regs;
  964. struct ia64_psr vpsr;
  965. regs = vcpu_regs(vcpu);
  966. pt_isr.val = VMX(vcpu, cr_isr);
  967. visr.val = 0;
  968. visr.ei = pt_isr.ei;
  969. visr.ir = pt_isr.ir;
  970. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  971. visr.na = 1;
  972. data = vhpt_lookup(vadr);
  973. if (data) {
  974. if (data->p == 0) {
  975. vcpu_set_isr(vcpu, visr.val);
  976. data_page_not_present(vcpu, vadr);
  977. return IA64_FAULT;
  978. } else if (data->ma == VA_MATTR_NATPAGE) {
  979. vcpu_set_isr(vcpu, visr.val);
  980. dnat_page_consumption(vcpu, vadr);
  981. return IA64_FAULT;
  982. } else {
  983. *padr = (data->gpaddr >> data->ps << data->ps) |
  984. (vadr & (PSIZE(data->ps) - 1));
  985. return IA64_NO_FAULT;
  986. }
  987. }
  988. data = vtlb_lookup(vcpu, vadr, D_TLB);
  989. if (data) {
  990. if (data->p == 0) {
  991. vcpu_set_isr(vcpu, visr.val);
  992. data_page_not_present(vcpu, vadr);
  993. return IA64_FAULT;
  994. } else if (data->ma == VA_MATTR_NATPAGE) {
  995. vcpu_set_isr(vcpu, visr.val);
  996. dnat_page_consumption(vcpu, vadr);
  997. return IA64_FAULT;
  998. } else{
  999. *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
  1000. | (vadr & (PSIZE(data->ps) - 1));
  1001. return IA64_NO_FAULT;
  1002. }
  1003. }
  1004. if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
  1005. if (vpsr.ic) {
  1006. vcpu_set_isr(vcpu, visr.val);
  1007. alt_dtlb(vcpu, vadr);
  1008. return IA64_FAULT;
  1009. } else {
  1010. nested_dtlb(vcpu);
  1011. return IA64_FAULT;
  1012. }
  1013. } else {
  1014. if (vpsr.ic) {
  1015. vcpu_set_isr(vcpu, visr.val);
  1016. dvhpt_fault(vcpu, vadr);
  1017. return IA64_FAULT;
  1018. } else{
  1019. nested_dtlb(vcpu);
  1020. return IA64_FAULT;
  1021. }
  1022. }
  1023. return IA64_NO_FAULT;
  1024. }
  1025. int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
  1026. {
  1027. unsigned long r1, r3;
  1028. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1029. if (vcpu_tpa(vcpu, r3, &r1))
  1030. return IA64_FAULT;
  1031. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1032. return(IA64_NO_FAULT);
  1033. }
  1034. void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
  1035. {
  1036. unsigned long r1, r3;
  1037. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1038. r1 = vcpu_tak(vcpu, r3);
  1039. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1040. }
  1041. /************************************
  1042. * Insert/Purge translation register/cache
  1043. ************************************/
  1044. void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1045. {
  1046. thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
  1047. }
  1048. void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1049. {
  1050. thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
  1051. }
  1052. void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1053. {
  1054. u64 ps, va, rid;
  1055. struct thash_data *p_itr;
  1056. ps = itir_ps(itir);
  1057. va = PAGEALIGN(ifa, ps);
  1058. pte &= ~PAGE_FLAGS_RV_MASK;
  1059. rid = vcpu_get_rr(vcpu, ifa);
  1060. rid = rid & RR_RID_MASK;
  1061. p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
  1062. vcpu_set_tr(p_itr, pte, itir, va, rid);
  1063. vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
  1064. }
  1065. void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1066. {
  1067. u64 gpfn;
  1068. u64 ps, va, rid;
  1069. struct thash_data *p_dtr;
  1070. ps = itir_ps(itir);
  1071. va = PAGEALIGN(ifa, ps);
  1072. pte &= ~PAGE_FLAGS_RV_MASK;
  1073. if (ps != _PAGE_SIZE_16M)
  1074. thash_purge_entries(vcpu, va, ps);
  1075. gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  1076. if (__gpfn_is_io(gpfn))
  1077. pte |= VTLB_PTE_IO;
  1078. rid = vcpu_get_rr(vcpu, va);
  1079. rid = rid & RR_RID_MASK;
  1080. p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
  1081. vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
  1082. pte, itir, va, rid);
  1083. vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
  1084. }
  1085. void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1086. {
  1087. int index;
  1088. u64 va;
  1089. va = PAGEALIGN(ifa, ps);
  1090. while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
  1091. vcpu->arch.dtrs[index].page_flags = 0;
  1092. thash_purge_entries(vcpu, va, ps);
  1093. }
  1094. void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1095. {
  1096. int index;
  1097. u64 va;
  1098. va = PAGEALIGN(ifa, ps);
  1099. while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
  1100. vcpu->arch.itrs[index].page_flags = 0;
  1101. thash_purge_entries(vcpu, va, ps);
  1102. }
  1103. void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1104. {
  1105. va = PAGEALIGN(va, ps);
  1106. thash_purge_entries(vcpu, va, ps);
  1107. }
  1108. void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
  1109. {
  1110. thash_purge_all(vcpu);
  1111. }
  1112. void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1113. {
  1114. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1115. long psr;
  1116. local_irq_save(psr);
  1117. p->exit_reason = EXIT_REASON_PTC_G;
  1118. p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
  1119. p->u.ptc_g_data.vaddr = va;
  1120. p->u.ptc_g_data.ps = ps;
  1121. vmm_transition(vcpu);
  1122. /* Do Local Purge Here*/
  1123. vcpu_ptc_l(vcpu, va, ps);
  1124. local_irq_restore(psr);
  1125. }
  1126. void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1127. {
  1128. vcpu_ptc_ga(vcpu, va, ps);
  1129. }
  1130. void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
  1131. {
  1132. unsigned long ifa;
  1133. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1134. vcpu_ptc_e(vcpu, ifa);
  1135. }
  1136. void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
  1137. {
  1138. unsigned long ifa, itir;
  1139. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1140. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1141. vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
  1142. }
  1143. void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
  1144. {
  1145. unsigned long ifa, itir;
  1146. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1147. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1148. vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
  1149. }
  1150. void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
  1151. {
  1152. unsigned long ifa, itir;
  1153. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1154. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1155. vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
  1156. }
  1157. void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1158. {
  1159. unsigned long ifa, itir;
  1160. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1161. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1162. vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
  1163. }
  1164. void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1165. {
  1166. unsigned long ifa, itir;
  1167. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1168. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1169. vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
  1170. }
  1171. void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1172. {
  1173. unsigned long itir, ifa, pte, slot;
  1174. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1175. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1176. itir = vcpu_get_itir(vcpu);
  1177. ifa = vcpu_get_ifa(vcpu);
  1178. vcpu_itr_d(vcpu, slot, pte, itir, ifa);
  1179. }
  1180. void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1181. {
  1182. unsigned long itir, ifa, pte, slot;
  1183. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1184. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1185. itir = vcpu_get_itir(vcpu);
  1186. ifa = vcpu_get_ifa(vcpu);
  1187. vcpu_itr_i(vcpu, slot, pte, itir, ifa);
  1188. }
  1189. void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
  1190. {
  1191. unsigned long itir, ifa, pte;
  1192. itir = vcpu_get_itir(vcpu);
  1193. ifa = vcpu_get_ifa(vcpu);
  1194. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1195. vcpu_itc_d(vcpu, pte, itir, ifa);
  1196. }
  1197. void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
  1198. {
  1199. unsigned long itir, ifa, pte;
  1200. itir = vcpu_get_itir(vcpu);
  1201. ifa = vcpu_get_ifa(vcpu);
  1202. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1203. vcpu_itc_i(vcpu, pte, itir, ifa);
  1204. }
  1205. /*************************************
  1206. * Moves to semi-privileged registers
  1207. *************************************/
  1208. void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
  1209. {
  1210. unsigned long imm;
  1211. if (inst.M30.s)
  1212. imm = -inst.M30.imm;
  1213. else
  1214. imm = inst.M30.imm;
  1215. vcpu_set_itc(vcpu, imm);
  1216. }
  1217. void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1218. {
  1219. unsigned long r2;
  1220. r2 = vcpu_get_gr(vcpu, inst.M29.r2);
  1221. vcpu_set_itc(vcpu, r2);
  1222. }
  1223. void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1224. {
  1225. unsigned long r1;
  1226. r1 = vcpu_get_itc(vcpu);
  1227. vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
  1228. }
  1229. /**************************************************************************
  1230. struct kvm_vcpu protection key register access routines
  1231. **************************************************************************/
  1232. unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
  1233. {
  1234. return ((unsigned long)ia64_get_pkr(reg));
  1235. }
  1236. void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
  1237. {
  1238. ia64_set_pkr(reg, val);
  1239. }
  1240. /********************************
  1241. * Moves to privileged registers
  1242. ********************************/
  1243. unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
  1244. unsigned long val)
  1245. {
  1246. union ia64_rr oldrr, newrr;
  1247. unsigned long rrval;
  1248. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1249. unsigned long psr;
  1250. oldrr.val = vcpu_get_rr(vcpu, reg);
  1251. newrr.val = val;
  1252. vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
  1253. switch ((unsigned long)(reg >> VRN_SHIFT)) {
  1254. case VRN6:
  1255. vcpu->arch.vmm_rr = vrrtomrr(val);
  1256. local_irq_save(psr);
  1257. p->exit_reason = EXIT_REASON_SWITCH_RR6;
  1258. vmm_transition(vcpu);
  1259. local_irq_restore(psr);
  1260. break;
  1261. case VRN4:
  1262. rrval = vrrtomrr(val);
  1263. vcpu->arch.metaphysical_saved_rr4 = rrval;
  1264. if (!is_physical_mode(vcpu))
  1265. ia64_set_rr(reg, rrval);
  1266. break;
  1267. case VRN0:
  1268. rrval = vrrtomrr(val);
  1269. vcpu->arch.metaphysical_saved_rr0 = rrval;
  1270. if (!is_physical_mode(vcpu))
  1271. ia64_set_rr(reg, rrval);
  1272. break;
  1273. default:
  1274. ia64_set_rr(reg, vrrtomrr(val));
  1275. break;
  1276. }
  1277. return (IA64_NO_FAULT);
  1278. }
  1279. void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1280. {
  1281. unsigned long r3, r2;
  1282. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1283. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1284. vcpu_set_rr(vcpu, r3, r2);
  1285. }
  1286. void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1287. {
  1288. }
  1289. void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1290. {
  1291. }
  1292. void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1293. {
  1294. unsigned long r3, r2;
  1295. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1296. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1297. vcpu_set_pmc(vcpu, r3, r2);
  1298. }
  1299. void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
  1300. {
  1301. unsigned long r3, r2;
  1302. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1303. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1304. vcpu_set_pmd(vcpu, r3, r2);
  1305. }
  1306. void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1307. {
  1308. u64 r3, r2;
  1309. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1310. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1311. vcpu_set_pkr(vcpu, r3, r2);
  1312. }
  1313. void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1314. {
  1315. unsigned long r3, r1;
  1316. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1317. r1 = vcpu_get_rr(vcpu, r3);
  1318. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1319. }
  1320. void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1321. {
  1322. unsigned long r3, r1;
  1323. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1324. r1 = vcpu_get_pkr(vcpu, r3);
  1325. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1326. }
  1327. void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1328. {
  1329. unsigned long r3, r1;
  1330. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1331. r1 = vcpu_get_dbr(vcpu, r3);
  1332. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1333. }
  1334. void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1335. {
  1336. unsigned long r3, r1;
  1337. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1338. r1 = vcpu_get_ibr(vcpu, r3);
  1339. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1340. }
  1341. void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1342. {
  1343. unsigned long r3, r1;
  1344. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1345. r1 = vcpu_get_pmc(vcpu, r3);
  1346. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1347. }
  1348. unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
  1349. {
  1350. /* FIXME: This could get called as a result of a rsvd-reg fault */
  1351. if (reg > (ia64_get_cpuid(3) & 0xff))
  1352. return 0;
  1353. else
  1354. return ia64_get_cpuid(reg);
  1355. }
  1356. void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
  1357. {
  1358. unsigned long r3, r1;
  1359. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1360. r1 = vcpu_get_cpuid(vcpu, r3);
  1361. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1362. }
  1363. void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
  1364. {
  1365. VCPU(vcpu, tpr) = val;
  1366. vcpu->arch.irq_check = 1;
  1367. }
  1368. unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1369. {
  1370. unsigned long r2;
  1371. r2 = vcpu_get_gr(vcpu, inst.M32.r2);
  1372. VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
  1373. switch (inst.M32.cr3) {
  1374. case 0:
  1375. vcpu_set_dcr(vcpu, r2);
  1376. break;
  1377. case 1:
  1378. vcpu_set_itm(vcpu, r2);
  1379. break;
  1380. case 66:
  1381. vcpu_set_tpr(vcpu, r2);
  1382. break;
  1383. case 67:
  1384. vcpu_set_eoi(vcpu, r2);
  1385. break;
  1386. default:
  1387. break;
  1388. }
  1389. return 0;
  1390. }
  1391. unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1392. {
  1393. unsigned long tgt = inst.M33.r1;
  1394. unsigned long val;
  1395. switch (inst.M33.cr3) {
  1396. case 65:
  1397. val = vcpu_get_ivr(vcpu);
  1398. vcpu_set_gr(vcpu, tgt, val, 0);
  1399. break;
  1400. case 67:
  1401. vcpu_set_gr(vcpu, tgt, 0L, 0);
  1402. break;
  1403. default:
  1404. val = VCPU(vcpu, vcr[inst.M33.cr3]);
  1405. vcpu_set_gr(vcpu, tgt, val, 0);
  1406. break;
  1407. }
  1408. return 0;
  1409. }
  1410. void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
  1411. {
  1412. unsigned long mask;
  1413. struct kvm_pt_regs *regs;
  1414. struct ia64_psr old_psr, new_psr;
  1415. old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1416. regs = vcpu_regs(vcpu);
  1417. /* We only support guest as:
  1418. * vpsr.pk = 0
  1419. * vpsr.is = 0
  1420. * Otherwise panic
  1421. */
  1422. if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
  1423. panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
  1424. "& vpsr.is=0\n");
  1425. /*
  1426. * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  1427. * Since these bits will become 0, after success execution of each
  1428. * instruction, we will change set them to mIA64_PSR
  1429. */
  1430. VCPU(vcpu, vpsr) = val
  1431. & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
  1432. IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
  1433. if (!old_psr.i && (val & IA64_PSR_I)) {
  1434. /* vpsr.i 0->1 */
  1435. vcpu->arch.irq_check = 1;
  1436. }
  1437. new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1438. /*
  1439. * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  1440. * , except for the following bits:
  1441. * ic/i/dt/si/rt/mc/it/bn/vm
  1442. */
  1443. mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  1444. IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  1445. IA64_PSR_VM;
  1446. regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
  1447. check_mm_mode_switch(vcpu, old_psr, new_psr);
  1448. return ;
  1449. }
  1450. unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
  1451. {
  1452. struct ia64_psr vpsr;
  1453. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1454. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1455. if (!vpsr.ic)
  1456. VCPU(vcpu, ifs) = regs->cr_ifs;
  1457. regs->cr_ifs = IA64_IFS_V;
  1458. return (IA64_NO_FAULT);
  1459. }
  1460. /**************************************************************************
  1461. VCPU banked general register access routines
  1462. **************************************************************************/
  1463. #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1464. do { \
  1465. __asm__ __volatile__ ( \
  1466. ";;extr.u %0 = %3,%6,16;;\n" \
  1467. "dep %1 = %0, %1, 0, 16;;\n" \
  1468. "st8 [%4] = %1\n" \
  1469. "extr.u %0 = %2, 16, 16;;\n" \
  1470. "dep %3 = %0, %3, %6, 16;;\n" \
  1471. "st8 [%5] = %3\n" \
  1472. ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
  1473. "r"(*runat), "r"(b1unat), "r"(runat), \
  1474. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1475. } while (0)
  1476. void vcpu_bsw0(struct kvm_vcpu *vcpu)
  1477. {
  1478. unsigned long i;
  1479. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1480. unsigned long *r = &regs->r16;
  1481. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1482. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1483. unsigned long *runat = &regs->eml_unat;
  1484. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1485. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1486. if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
  1487. for (i = 0; i < 16; i++) {
  1488. *b1++ = *r;
  1489. *r++ = *b0++;
  1490. }
  1491. vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  1492. VMM_PT_REGS_R16_SLOT);
  1493. VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  1494. }
  1495. }
  1496. #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1497. do { \
  1498. __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
  1499. "dep %1 = %0, %1, 16, 16;;\n" \
  1500. "st8 [%4] = %1\n" \
  1501. "extr.u %0 = %2, 0, 16;;\n" \
  1502. "dep %3 = %0, %3, %6, 16;;\n" \
  1503. "st8 [%5] = %3\n" \
  1504. ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
  1505. "r"(*runat), "r"(b0unat), "r"(runat), \
  1506. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1507. } while (0)
  1508. void vcpu_bsw1(struct kvm_vcpu *vcpu)
  1509. {
  1510. unsigned long i;
  1511. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1512. unsigned long *r = &regs->r16;
  1513. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1514. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1515. unsigned long *runat = &regs->eml_unat;
  1516. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1517. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1518. if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
  1519. for (i = 0; i < 16; i++) {
  1520. *b0++ = *r;
  1521. *r++ = *b1++;
  1522. }
  1523. vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  1524. VMM_PT_REGS_R16_SLOT);
  1525. VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  1526. }
  1527. }
  1528. void vcpu_rfi(struct kvm_vcpu *vcpu)
  1529. {
  1530. unsigned long ifs, psr;
  1531. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1532. psr = VCPU(vcpu, ipsr);
  1533. if (psr & IA64_PSR_BN)
  1534. vcpu_bsw1(vcpu);
  1535. else
  1536. vcpu_bsw0(vcpu);
  1537. vcpu_set_psr(vcpu, psr);
  1538. ifs = VCPU(vcpu, ifs);
  1539. if (ifs >> 63)
  1540. regs->cr_ifs = ifs;
  1541. regs->cr_iip = VCPU(vcpu, iip);
  1542. }
  1543. /*
  1544. VPSR can't keep track of below bits of guest PSR
  1545. This function gets guest PSR
  1546. */
  1547. unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
  1548. {
  1549. unsigned long mask;
  1550. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1551. mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
  1552. IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
  1553. return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
  1554. }
  1555. void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
  1556. {
  1557. unsigned long vpsr;
  1558. unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
  1559. | inst.M44.imm;
  1560. vpsr = vcpu_get_psr(vcpu);
  1561. vpsr &= (~imm24);
  1562. vcpu_set_psr(vcpu, vpsr);
  1563. }
  1564. void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
  1565. {
  1566. unsigned long vpsr;
  1567. unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
  1568. | inst.M44.imm;
  1569. vpsr = vcpu_get_psr(vcpu);
  1570. vpsr |= imm24;
  1571. vcpu_set_psr(vcpu, vpsr);
  1572. }
  1573. /* Generate Mask
  1574. * Parameter:
  1575. * bit -- starting bit
  1576. * len -- how many bits
  1577. */
  1578. #define MASK(bit,len) \
  1579. ({ \
  1580. __u64 ret; \
  1581. \
  1582. __asm __volatile("dep %0=-1, r0, %1, %2"\
  1583. : "=r" (ret): \
  1584. "M" (bit), \
  1585. "M" (len)); \
  1586. ret; \
  1587. })
  1588. void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
  1589. {
  1590. val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
  1591. vcpu_set_psr(vcpu, val);
  1592. }
  1593. void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1594. {
  1595. unsigned long val;
  1596. val = vcpu_get_gr(vcpu, inst.M35.r2);
  1597. vcpu_set_psr_l(vcpu, val);
  1598. }
  1599. void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1600. {
  1601. unsigned long val;
  1602. val = vcpu_get_psr(vcpu);
  1603. val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  1604. vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
  1605. }
  1606. void vcpu_increment_iip(struct kvm_vcpu *vcpu)
  1607. {
  1608. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1609. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1610. if (ipsr->ri == 2) {
  1611. ipsr->ri = 0;
  1612. regs->cr_iip += 16;
  1613. } else
  1614. ipsr->ri++;
  1615. }
  1616. void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
  1617. {
  1618. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1619. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1620. if (ipsr->ri == 0) {
  1621. ipsr->ri = 2;
  1622. regs->cr_iip -= 16;
  1623. } else
  1624. ipsr->ri--;
  1625. }
  1626. /** Emulate a privileged operation.
  1627. *
  1628. *
  1629. * @param vcpu virtual cpu
  1630. * @cause the reason cause virtualization fault
  1631. * @opcode the instruction code which cause virtualization fault
  1632. */
  1633. void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
  1634. {
  1635. unsigned long status, cause, opcode ;
  1636. INST64 inst;
  1637. status = IA64_NO_FAULT;
  1638. cause = VMX(vcpu, cause);
  1639. opcode = VMX(vcpu, opcode);
  1640. inst.inst = opcode;
  1641. /*
  1642. * Switch to actual virtual rid in rr0 and rr4,
  1643. * which is required by some tlb related instructions.
  1644. */
  1645. prepare_if_physical_mode(vcpu);
  1646. switch (cause) {
  1647. case EVENT_RSM:
  1648. kvm_rsm(vcpu, inst);
  1649. break;
  1650. case EVENT_SSM:
  1651. kvm_ssm(vcpu, inst);
  1652. break;
  1653. case EVENT_MOV_TO_PSR:
  1654. kvm_mov_to_psr(vcpu, inst);
  1655. break;
  1656. case EVENT_MOV_FROM_PSR:
  1657. kvm_mov_from_psr(vcpu, inst);
  1658. break;
  1659. case EVENT_MOV_FROM_CR:
  1660. kvm_mov_from_cr(vcpu, inst);
  1661. break;
  1662. case EVENT_MOV_TO_CR:
  1663. kvm_mov_to_cr(vcpu, inst);
  1664. break;
  1665. case EVENT_BSW_0:
  1666. vcpu_bsw0(vcpu);
  1667. break;
  1668. case EVENT_BSW_1:
  1669. vcpu_bsw1(vcpu);
  1670. break;
  1671. case EVENT_COVER:
  1672. vcpu_cover(vcpu);
  1673. break;
  1674. case EVENT_RFI:
  1675. vcpu_rfi(vcpu);
  1676. break;
  1677. case EVENT_ITR_D:
  1678. kvm_itr_d(vcpu, inst);
  1679. break;
  1680. case EVENT_ITR_I:
  1681. kvm_itr_i(vcpu, inst);
  1682. break;
  1683. case EVENT_PTR_D:
  1684. kvm_ptr_d(vcpu, inst);
  1685. break;
  1686. case EVENT_PTR_I:
  1687. kvm_ptr_i(vcpu, inst);
  1688. break;
  1689. case EVENT_ITC_D:
  1690. kvm_itc_d(vcpu, inst);
  1691. break;
  1692. case EVENT_ITC_I:
  1693. kvm_itc_i(vcpu, inst);
  1694. break;
  1695. case EVENT_PTC_L:
  1696. kvm_ptc_l(vcpu, inst);
  1697. break;
  1698. case EVENT_PTC_G:
  1699. kvm_ptc_g(vcpu, inst);
  1700. break;
  1701. case EVENT_PTC_GA:
  1702. kvm_ptc_ga(vcpu, inst);
  1703. break;
  1704. case EVENT_PTC_E:
  1705. kvm_ptc_e(vcpu, inst);
  1706. break;
  1707. case EVENT_MOV_TO_RR:
  1708. kvm_mov_to_rr(vcpu, inst);
  1709. break;
  1710. case EVENT_MOV_FROM_RR:
  1711. kvm_mov_from_rr(vcpu, inst);
  1712. break;
  1713. case EVENT_THASH:
  1714. kvm_thash(vcpu, inst);
  1715. break;
  1716. case EVENT_TTAG:
  1717. kvm_ttag(vcpu, inst);
  1718. break;
  1719. case EVENT_TPA:
  1720. status = kvm_tpa(vcpu, inst);
  1721. break;
  1722. case EVENT_TAK:
  1723. kvm_tak(vcpu, inst);
  1724. break;
  1725. case EVENT_MOV_TO_AR_IMM:
  1726. kvm_mov_to_ar_imm(vcpu, inst);
  1727. break;
  1728. case EVENT_MOV_TO_AR:
  1729. kvm_mov_to_ar_reg(vcpu, inst);
  1730. break;
  1731. case EVENT_MOV_FROM_AR:
  1732. kvm_mov_from_ar_reg(vcpu, inst);
  1733. break;
  1734. case EVENT_MOV_TO_DBR:
  1735. kvm_mov_to_dbr(vcpu, inst);
  1736. break;
  1737. case EVENT_MOV_TO_IBR:
  1738. kvm_mov_to_ibr(vcpu, inst);
  1739. break;
  1740. case EVENT_MOV_TO_PMC:
  1741. kvm_mov_to_pmc(vcpu, inst);
  1742. break;
  1743. case EVENT_MOV_TO_PMD:
  1744. kvm_mov_to_pmd(vcpu, inst);
  1745. break;
  1746. case EVENT_MOV_TO_PKR:
  1747. kvm_mov_to_pkr(vcpu, inst);
  1748. break;
  1749. case EVENT_MOV_FROM_DBR:
  1750. kvm_mov_from_dbr(vcpu, inst);
  1751. break;
  1752. case EVENT_MOV_FROM_IBR:
  1753. kvm_mov_from_ibr(vcpu, inst);
  1754. break;
  1755. case EVENT_MOV_FROM_PMC:
  1756. kvm_mov_from_pmc(vcpu, inst);
  1757. break;
  1758. case EVENT_MOV_FROM_PKR:
  1759. kvm_mov_from_pkr(vcpu, inst);
  1760. break;
  1761. case EVENT_MOV_FROM_CPUID:
  1762. kvm_mov_from_cpuid(vcpu, inst);
  1763. break;
  1764. case EVENT_VMSW:
  1765. status = IA64_FAULT;
  1766. break;
  1767. default:
  1768. break;
  1769. };
  1770. /*Assume all status is NO_FAULT ?*/
  1771. if (status == IA64_NO_FAULT && cause != EVENT_RFI)
  1772. vcpu_increment_iip(vcpu);
  1773. recover_if_physical_mode(vcpu);
  1774. }
  1775. void init_vcpu(struct kvm_vcpu *vcpu)
  1776. {
  1777. int i;
  1778. vcpu->arch.mode_flags = GUEST_IN_PHY;
  1779. VMX(vcpu, vrr[0]) = 0x38;
  1780. VMX(vcpu, vrr[1]) = 0x38;
  1781. VMX(vcpu, vrr[2]) = 0x38;
  1782. VMX(vcpu, vrr[3]) = 0x38;
  1783. VMX(vcpu, vrr[4]) = 0x38;
  1784. VMX(vcpu, vrr[5]) = 0x38;
  1785. VMX(vcpu, vrr[6]) = 0x38;
  1786. VMX(vcpu, vrr[7]) = 0x38;
  1787. VCPU(vcpu, vpsr) = IA64_PSR_BN;
  1788. VCPU(vcpu, dcr) = 0;
  1789. /* pta.size must not be 0. The minimum is 15 (32k) */
  1790. VCPU(vcpu, pta) = 15 << 2;
  1791. VCPU(vcpu, itv) = 0x10000;
  1792. VCPU(vcpu, itm) = 0;
  1793. VMX(vcpu, last_itc) = 0;
  1794. VCPU(vcpu, lid) = VCPU_LID(vcpu);
  1795. VCPU(vcpu, ivr) = 0;
  1796. VCPU(vcpu, tpr) = 0x10000;
  1797. VCPU(vcpu, eoi) = 0;
  1798. VCPU(vcpu, irr[0]) = 0;
  1799. VCPU(vcpu, irr[1]) = 0;
  1800. VCPU(vcpu, irr[2]) = 0;
  1801. VCPU(vcpu, irr[3]) = 0;
  1802. VCPU(vcpu, pmv) = 0x10000;
  1803. VCPU(vcpu, cmcv) = 0x10000;
  1804. VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
  1805. VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
  1806. update_vhpi(vcpu, NULL_VECTOR);
  1807. VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
  1808. for (i = 0; i < 4; i++)
  1809. VLSAPIC_INSVC(vcpu, i) = 0;
  1810. }
  1811. void kvm_init_all_rr(struct kvm_vcpu *vcpu)
  1812. {
  1813. unsigned long psr;
  1814. local_irq_save(psr);
  1815. /* WARNING: not allow co-exist of both virtual mode and physical
  1816. * mode in same region
  1817. */
  1818. vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
  1819. vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
  1820. if (is_physical_mode(vcpu)) {
  1821. if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
  1822. panic_vm(vcpu, "Machine Status conflicts!\n");
  1823. ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
  1824. ia64_dv_serialize_data();
  1825. ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
  1826. ia64_dv_serialize_data();
  1827. } else {
  1828. ia64_set_rr((VRN0 << VRN_SHIFT),
  1829. vcpu->arch.metaphysical_saved_rr0);
  1830. ia64_dv_serialize_data();
  1831. ia64_set_rr((VRN4 << VRN_SHIFT),
  1832. vcpu->arch.metaphysical_saved_rr4);
  1833. ia64_dv_serialize_data();
  1834. }
  1835. ia64_set_rr((VRN1 << VRN_SHIFT),
  1836. vrrtomrr(VMX(vcpu, vrr[VRN1])));
  1837. ia64_dv_serialize_data();
  1838. ia64_set_rr((VRN2 << VRN_SHIFT),
  1839. vrrtomrr(VMX(vcpu, vrr[VRN2])));
  1840. ia64_dv_serialize_data();
  1841. ia64_set_rr((VRN3 << VRN_SHIFT),
  1842. vrrtomrr(VMX(vcpu, vrr[VRN3])));
  1843. ia64_dv_serialize_data();
  1844. ia64_set_rr((VRN5 << VRN_SHIFT),
  1845. vrrtomrr(VMX(vcpu, vrr[VRN5])));
  1846. ia64_dv_serialize_data();
  1847. ia64_set_rr((VRN7 << VRN_SHIFT),
  1848. vrrtomrr(VMX(vcpu, vrr[VRN7])));
  1849. ia64_dv_serialize_data();
  1850. ia64_srlz_d();
  1851. ia64_set_psr(psr);
  1852. }
  1853. int vmm_entry(void)
  1854. {
  1855. struct kvm_vcpu *v;
  1856. v = current_vcpu;
  1857. ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
  1858. 0, 0, 0, 0, 0, 0);
  1859. kvm_init_vtlb(v);
  1860. kvm_init_vhpt(v);
  1861. init_vcpu(v);
  1862. kvm_init_all_rr(v);
  1863. vmm_reset_entry();
  1864. return 0;
  1865. }
  1866. static void kvm_show_registers(struct kvm_pt_regs *regs)
  1867. {
  1868. unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
  1869. struct kvm_vcpu *vcpu = current_vcpu;
  1870. if (vcpu != NULL)
  1871. printk("vcpu 0x%p vcpu %d\n",
  1872. vcpu, vcpu->vcpu_id);
  1873. printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
  1874. regs->cr_ipsr, regs->cr_ifs, ip);
  1875. printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
  1876. regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
  1877. printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
  1878. regs->ar_rnat, regs->ar_bspstore, regs->pr);
  1879. printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
  1880. regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
  1881. printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
  1882. printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
  1883. regs->b6, regs->b7);
  1884. printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
  1885. regs->f6.u.bits[1], regs->f6.u.bits[0],
  1886. regs->f7.u.bits[1], regs->f7.u.bits[0]);
  1887. printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
  1888. regs->f8.u.bits[1], regs->f8.u.bits[0],
  1889. regs->f9.u.bits[1], regs->f9.u.bits[0]);
  1890. printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
  1891. regs->f10.u.bits[1], regs->f10.u.bits[0],
  1892. regs->f11.u.bits[1], regs->f11.u.bits[0]);
  1893. printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
  1894. regs->r2, regs->r3);
  1895. printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
  1896. regs->r9, regs->r10);
  1897. printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
  1898. regs->r12, regs->r13);
  1899. printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
  1900. regs->r15, regs->r16);
  1901. printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
  1902. regs->r18, regs->r19);
  1903. printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
  1904. regs->r21, regs->r22);
  1905. printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
  1906. regs->r24, regs->r25);
  1907. printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
  1908. regs->r27, regs->r28);
  1909. printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
  1910. regs->r30, regs->r31);
  1911. }
  1912. void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
  1913. {
  1914. va_list args;
  1915. char buf[256];
  1916. struct kvm_pt_regs *regs = vcpu_regs(v);
  1917. struct exit_ctl_data *p = &v->arch.exit_data;
  1918. va_start(args, fmt);
  1919. vsnprintf(buf, sizeof(buf), fmt, args);
  1920. va_end(args);
  1921. printk(buf);
  1922. kvm_show_registers(regs);
  1923. p->exit_reason = EXIT_REASON_VM_PANIC;
  1924. vmm_transition(v);
  1925. /*Never to return*/
  1926. while (1);
  1927. }