optvfault.S 20 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * arch/ia64/kvm/optvfault.S
  3. * optimize virtualization fault handler
  4. *
  5. * Copyright (C) 2006 Intel Co
  6. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  7. * Copyright (C) 2008 Intel Co
  8. * Add the support for Tukwila processors.
  9. * Xiantao Zhang <xiantao.zhang@intel.com>
  10. */
  11. #include <asm/asmmacro.h>
  12. #include <asm/processor.h>
  13. #include <asm/kvm_host.h>
  14. #include "vti.h"
  15. #include "asm-offsets.h"
  16. #define ACCE_MOV_FROM_AR
  17. #define ACCE_MOV_FROM_RR
  18. #define ACCE_MOV_TO_RR
  19. #define ACCE_RSM
  20. #define ACCE_SSM
  21. #define ACCE_MOV_TO_PSR
  22. #define ACCE_THASH
  23. #define VMX_VPS_SYNC_READ \
  24. add r16=VMM_VPD_BASE_OFFSET,r21; \
  25. mov r17 = b0; \
  26. mov r18 = r24; \
  27. mov r19 = r25; \
  28. mov r20 = r31; \
  29. ;; \
  30. {.mii; \
  31. ld8 r16 = [r16]; \
  32. nop 0x0; \
  33. mov r24 = ip; \
  34. ;; \
  35. }; \
  36. {.mmb; \
  37. add r24=0x20, r24; \
  38. mov r25 =r16; \
  39. br.sptk.many kvm_vps_sync_read; \
  40. }; \
  41. mov b0 = r17; \
  42. mov r24 = r18; \
  43. mov r25 = r19; \
  44. mov r31 = r20
  45. ENTRY(kvm_vps_entry)
  46. adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
  47. ;;
  48. ld8 r29 = [r29]
  49. ;;
  50. add r29 = r29, r30
  51. ;;
  52. mov b0 = r29
  53. br.sptk.many b0
  54. END(kvm_vps_entry)
  55. /*
  56. * Inputs:
  57. * r24 : return address
  58. * r25 : vpd
  59. * r29 : scratch
  60. *
  61. */
  62. GLOBAL_ENTRY(kvm_vps_sync_read)
  63. movl r30 = PAL_VPS_SYNC_READ
  64. ;;
  65. br.sptk.many kvm_vps_entry
  66. END(kvm_vps_sync_read)
  67. /*
  68. * Inputs:
  69. * r24 : return address
  70. * r25 : vpd
  71. * r29 : scratch
  72. *
  73. */
  74. GLOBAL_ENTRY(kvm_vps_sync_write)
  75. movl r30 = PAL_VPS_SYNC_WRITE
  76. ;;
  77. br.sptk.many kvm_vps_entry
  78. END(kvm_vps_sync_write)
  79. /*
  80. * Inputs:
  81. * r23 : pr
  82. * r24 : guest b0
  83. * r25 : vpd
  84. *
  85. */
  86. GLOBAL_ENTRY(kvm_vps_resume_normal)
  87. movl r30 = PAL_VPS_RESUME_NORMAL
  88. ;;
  89. mov pr=r23,-2
  90. br.sptk.many kvm_vps_entry
  91. END(kvm_vps_resume_normal)
  92. /*
  93. * Inputs:
  94. * r23 : pr
  95. * r24 : guest b0
  96. * r25 : vpd
  97. * r17 : isr
  98. */
  99. GLOBAL_ENTRY(kvm_vps_resume_handler)
  100. movl r30 = PAL_VPS_RESUME_HANDLER
  101. ;;
  102. ld8 r26=[r25]
  103. shr r17=r17,IA64_ISR_IR_BIT
  104. ;;
  105. dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
  106. mov pr=r23,-2
  107. br.sptk.many kvm_vps_entry
  108. END(kvm_vps_resume_handler)
  109. //mov r1=ar3
  110. GLOBAL_ENTRY(kvm_asm_mov_from_ar)
  111. #ifndef ACCE_MOV_FROM_AR
  112. br.many kvm_virtualization_fault_back
  113. #endif
  114. add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
  115. add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
  116. extr.u r17=r25,6,7
  117. ;;
  118. ld8 r18=[r18]
  119. mov r19=ar.itc
  120. mov r24=b0
  121. ;;
  122. add r19=r19,r18
  123. addl r20=@gprel(asm_mov_to_reg),gp
  124. ;;
  125. st8 [r16] = r19
  126. adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
  127. shladd r17=r17,4,r20
  128. ;;
  129. mov b0=r17
  130. br.sptk.few b0
  131. ;;
  132. END(kvm_asm_mov_from_ar)
  133. /*
  134. * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
  135. * clock as it's source for emulating the ITC. This version will be
  136. * copied on top of the original version if the host is determined to
  137. * be an SN2.
  138. */
  139. GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
  140. add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
  141. movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
  142. add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
  143. extr.u r17=r25,6,7
  144. mov r24=b0
  145. ;;
  146. ld8 r18=[r18]
  147. ld8 r19=[r19]
  148. addl r20=@gprel(asm_mov_to_reg),gp
  149. ;;
  150. add r19=r19,r18
  151. shladd r17=r17,4,r20
  152. ;;
  153. adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
  154. st8 [r16] = r19
  155. mov b0=r17
  156. br.sptk.few b0
  157. ;;
  158. END(kvm_asm_mov_from_ar_sn2)
  159. // mov r1=rr[r3]
  160. GLOBAL_ENTRY(kvm_asm_mov_from_rr)
  161. #ifndef ACCE_MOV_FROM_RR
  162. br.many kvm_virtualization_fault_back
  163. #endif
  164. extr.u r16=r25,20,7
  165. extr.u r17=r25,6,7
  166. addl r20=@gprel(asm_mov_from_reg),gp
  167. ;;
  168. adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
  169. shladd r16=r16,4,r20
  170. mov r24=b0
  171. ;;
  172. add r27=VMM_VCPU_VRR0_OFFSET,r21
  173. mov b0=r16
  174. br.many b0
  175. ;;
  176. kvm_asm_mov_from_rr_back_1:
  177. adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
  178. adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
  179. shr.u r26=r19,61
  180. ;;
  181. shladd r17=r17,4,r22
  182. shladd r27=r26,3,r27
  183. ;;
  184. ld8 r19=[r27]
  185. mov b0=r17
  186. br.many b0
  187. END(kvm_asm_mov_from_rr)
  188. // mov rr[r3]=r2
  189. GLOBAL_ENTRY(kvm_asm_mov_to_rr)
  190. #ifndef ACCE_MOV_TO_RR
  191. br.many kvm_virtualization_fault_back
  192. #endif
  193. extr.u r16=r25,20,7
  194. extr.u r17=r25,13,7
  195. addl r20=@gprel(asm_mov_from_reg),gp
  196. ;;
  197. adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
  198. shladd r16=r16,4,r20
  199. mov r22=b0
  200. ;;
  201. add r27=VMM_VCPU_VRR0_OFFSET,r21
  202. mov b0=r16
  203. br.many b0
  204. ;;
  205. kvm_asm_mov_to_rr_back_1:
  206. adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
  207. shr.u r23=r19,61
  208. shladd r17=r17,4,r20
  209. ;;
  210. //if rr6, go back
  211. cmp.eq p6,p0=6,r23
  212. mov b0=r22
  213. (p6) br.cond.dpnt.many kvm_virtualization_fault_back
  214. ;;
  215. mov r28=r19
  216. mov b0=r17
  217. br.many b0
  218. kvm_asm_mov_to_rr_back_2:
  219. adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
  220. shladd r27=r23,3,r27
  221. ;; // vrr.rid<<4 |0xe
  222. st8 [r27]=r19
  223. mov b0=r30
  224. ;;
  225. extr.u r16=r19,8,26
  226. extr.u r18 =r19,2,6
  227. mov r17 =0xe
  228. ;;
  229. shladd r16 = r16, 4, r17
  230. extr.u r19 =r19,0,8
  231. ;;
  232. shl r16 = r16,8
  233. ;;
  234. add r19 = r19, r16
  235. ;; //set ve 1
  236. dep r19=-1,r19,0,1
  237. cmp.lt p6,p0=14,r18
  238. ;;
  239. (p6) mov r18=14
  240. ;;
  241. (p6) dep r19=r18,r19,2,6
  242. ;;
  243. cmp.eq p6,p0=0,r23
  244. ;;
  245. cmp.eq.or p6,p0=4,r23
  246. ;;
  247. adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  248. (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
  249. ;;
  250. ld4 r16=[r16]
  251. cmp.eq p7,p0=r0,r0
  252. (p6) shladd r17=r23,1,r17
  253. ;;
  254. (p6) st8 [r17]=r19
  255. (p6) tbit.nz p6,p7=r16,0
  256. ;;
  257. (p7) mov rr[r28]=r19
  258. mov r24=r22
  259. br.many b0
  260. END(kvm_asm_mov_to_rr)
  261. //rsm
  262. GLOBAL_ENTRY(kvm_asm_rsm)
  263. #ifndef ACCE_RSM
  264. br.many kvm_virtualization_fault_back
  265. #endif
  266. VMX_VPS_SYNC_READ
  267. ;;
  268. extr.u r26=r25,6,21
  269. extr.u r27=r25,31,2
  270. ;;
  271. extr.u r28=r25,36,1
  272. dep r26=r27,r26,21,2
  273. ;;
  274. add r17=VPD_VPSR_START_OFFSET,r16
  275. add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  276. //r26 is imm24
  277. dep r26=r28,r26,23,1
  278. ;;
  279. ld8 r18=[r17]
  280. movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
  281. ld4 r23=[r22]
  282. sub r27=-1,r26
  283. mov r24=b0
  284. ;;
  285. mov r20=cr.ipsr
  286. or r28=r27,r28
  287. and r19=r18,r27
  288. ;;
  289. st8 [r17]=r19
  290. and r20=r20,r28
  291. /* Comment it out due to short of fp lazy alorgithm support
  292. adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
  293. ;;
  294. ld8 r27=[r27]
  295. ;;
  296. tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
  297. ;;
  298. (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
  299. */
  300. ;;
  301. mov cr.ipsr=r20
  302. tbit.nz p6,p0=r23,0
  303. ;;
  304. tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
  305. (p6) br.dptk kvm_resume_to_guest_with_sync
  306. ;;
  307. add r26=VMM_VCPU_META_RR0_OFFSET,r21
  308. add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
  309. dep r23=-1,r23,0,1
  310. ;;
  311. ld8 r26=[r26]
  312. ld8 r27=[r27]
  313. st4 [r22]=r23
  314. dep.z r28=4,61,3
  315. ;;
  316. mov rr[r0]=r26
  317. ;;
  318. mov rr[r28]=r27
  319. ;;
  320. srlz.d
  321. br.many kvm_resume_to_guest_with_sync
  322. END(kvm_asm_rsm)
  323. //ssm
  324. GLOBAL_ENTRY(kvm_asm_ssm)
  325. #ifndef ACCE_SSM
  326. br.many kvm_virtualization_fault_back
  327. #endif
  328. VMX_VPS_SYNC_READ
  329. ;;
  330. extr.u r26=r25,6,21
  331. extr.u r27=r25,31,2
  332. ;;
  333. extr.u r28=r25,36,1
  334. dep r26=r27,r26,21,2
  335. ;; //r26 is imm24
  336. add r27=VPD_VPSR_START_OFFSET,r16
  337. dep r26=r28,r26,23,1
  338. ;; //r19 vpsr
  339. ld8 r29=[r27]
  340. mov r24=b0
  341. ;;
  342. add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  343. mov r20=cr.ipsr
  344. or r19=r29,r26
  345. ;;
  346. ld4 r23=[r22]
  347. st8 [r27]=r19
  348. or r20=r20,r26
  349. ;;
  350. mov cr.ipsr=r20
  351. movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
  352. ;;
  353. and r19=r28,r19
  354. tbit.z p6,p0=r23,0
  355. ;;
  356. cmp.ne.or p6,p0=r28,r19
  357. (p6) br.dptk kvm_asm_ssm_1
  358. ;;
  359. add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
  360. add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
  361. dep r23=0,r23,0,1
  362. ;;
  363. ld8 r26=[r26]
  364. ld8 r27=[r27]
  365. st4 [r22]=r23
  366. dep.z r28=4,61,3
  367. ;;
  368. mov rr[r0]=r26
  369. ;;
  370. mov rr[r28]=r27
  371. ;;
  372. srlz.d
  373. ;;
  374. kvm_asm_ssm_1:
  375. tbit.nz p6,p0=r29,IA64_PSR_I_BIT
  376. ;;
  377. tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
  378. (p6) br.dptk kvm_resume_to_guest_with_sync
  379. ;;
  380. add r29=VPD_VTPR_START_OFFSET,r16
  381. add r30=VPD_VHPI_START_OFFSET,r16
  382. ;;
  383. ld8 r29=[r29]
  384. ld8 r30=[r30]
  385. ;;
  386. extr.u r17=r29,4,4
  387. extr.u r18=r29,16,1
  388. ;;
  389. dep r17=r18,r17,4,1
  390. ;;
  391. cmp.gt p6,p0=r30,r17
  392. (p6) br.dpnt.few kvm_asm_dispatch_vexirq
  393. br.many kvm_resume_to_guest_with_sync
  394. END(kvm_asm_ssm)
  395. //mov psr.l=r2
  396. GLOBAL_ENTRY(kvm_asm_mov_to_psr)
  397. #ifndef ACCE_MOV_TO_PSR
  398. br.many kvm_virtualization_fault_back
  399. #endif
  400. VMX_VPS_SYNC_READ
  401. ;;
  402. extr.u r26=r25,13,7 //r2
  403. addl r20=@gprel(asm_mov_from_reg),gp
  404. ;;
  405. adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
  406. shladd r26=r26,4,r20
  407. mov r24=b0
  408. ;;
  409. add r27=VPD_VPSR_START_OFFSET,r16
  410. mov b0=r26
  411. br.many b0
  412. ;;
  413. kvm_asm_mov_to_psr_back:
  414. ld8 r17=[r27]
  415. add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  416. dep r19=0,r19,32,32
  417. ;;
  418. ld4 r23=[r22]
  419. dep r18=0,r17,0,32
  420. ;;
  421. add r30=r18,r19
  422. movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
  423. ;;
  424. st8 [r27]=r30
  425. and r27=r28,r30
  426. and r29=r28,r17
  427. ;;
  428. cmp.eq p5,p0=r29,r27
  429. cmp.eq p6,p7=r28,r27
  430. (p5) br.many kvm_asm_mov_to_psr_1
  431. ;;
  432. //virtual to physical
  433. (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
  434. (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
  435. (p7) dep r23=-1,r23,0,1
  436. ;;
  437. //physical to virtual
  438. (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
  439. (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
  440. (p6) dep r23=0,r23,0,1
  441. ;;
  442. ld8 r26=[r26]
  443. ld8 r27=[r27]
  444. st4 [r22]=r23
  445. dep.z r28=4,61,3
  446. ;;
  447. mov rr[r0]=r26
  448. ;;
  449. mov rr[r28]=r27
  450. ;;
  451. srlz.d
  452. ;;
  453. kvm_asm_mov_to_psr_1:
  454. mov r20=cr.ipsr
  455. movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
  456. ;;
  457. or r19=r19,r28
  458. dep r20=0,r20,0,32
  459. ;;
  460. add r20=r19,r20
  461. mov b0=r24
  462. ;;
  463. /* Comment it out due to short of fp lazy algorithm support
  464. adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
  465. ;;
  466. ld8 r27=[r27]
  467. ;;
  468. tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
  469. ;;
  470. (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
  471. ;;
  472. */
  473. mov cr.ipsr=r20
  474. cmp.ne p6,p0=r0,r0
  475. ;;
  476. tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
  477. tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
  478. (p6) br.dpnt.few kvm_resume_to_guest_with_sync
  479. ;;
  480. add r29=VPD_VTPR_START_OFFSET,r16
  481. add r30=VPD_VHPI_START_OFFSET,r16
  482. ;;
  483. ld8 r29=[r29]
  484. ld8 r30=[r30]
  485. ;;
  486. extr.u r17=r29,4,4
  487. extr.u r18=r29,16,1
  488. ;;
  489. dep r17=r18,r17,4,1
  490. ;;
  491. cmp.gt p6,p0=r30,r17
  492. (p6) br.dpnt.few kvm_asm_dispatch_vexirq
  493. br.many kvm_resume_to_guest_with_sync
  494. END(kvm_asm_mov_to_psr)
  495. ENTRY(kvm_asm_dispatch_vexirq)
  496. //increment iip
  497. mov r17 = b0
  498. mov r18 = r31
  499. {.mii
  500. add r25=VMM_VPD_BASE_OFFSET,r21
  501. nop 0x0
  502. mov r24 = ip
  503. ;;
  504. }
  505. {.mmb
  506. add r24 = 0x20, r24
  507. ld8 r25 = [r25]
  508. br.sptk.many kvm_vps_sync_write
  509. }
  510. mov b0 =r17
  511. mov r16=cr.ipsr
  512. mov r31 = r18
  513. mov r19 = 37
  514. ;;
  515. extr.u r17=r16,IA64_PSR_RI_BIT,2
  516. tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
  517. ;;
  518. (p6) mov r18=cr.iip
  519. (p6) mov r17=r0
  520. (p7) add r17=1,r17
  521. ;;
  522. (p6) add r18=0x10,r18
  523. dep r16=r17,r16,IA64_PSR_RI_BIT,2
  524. ;;
  525. (p6) mov cr.iip=r18
  526. mov cr.ipsr=r16
  527. mov r30 =1
  528. br.many kvm_dispatch_vexirq
  529. END(kvm_asm_dispatch_vexirq)
  530. // thash
  531. // TODO: add support when pta.vf = 1
  532. GLOBAL_ENTRY(kvm_asm_thash)
  533. #ifndef ACCE_THASH
  534. br.many kvm_virtualization_fault_back
  535. #endif
  536. extr.u r17=r25,20,7 // get r3 from opcode in r25
  537. extr.u r18=r25,6,7 // get r1 from opcode in r25
  538. addl r20=@gprel(asm_mov_from_reg),gp
  539. ;;
  540. adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
  541. shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
  542. adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
  543. ;;
  544. mov r24=b0
  545. ;;
  546. ld8 r16=[r16] // get VPD addr
  547. mov b0=r17
  548. br.many b0 // r19 return value
  549. ;;
  550. kvm_asm_thash_back1:
  551. shr.u r23=r19,61 // get RR number
  552. adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
  553. adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
  554. ;;
  555. shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
  556. ld8 r17=[r16] // get PTA
  557. mov r26=1
  558. ;;
  559. extr.u r29=r17,2,6 // get pta.size
  560. ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
  561. ;;
  562. mov b0=r24
  563. //Fallback to C if pta.vf is set
  564. tbit.nz p6,p0=r17, 8
  565. ;;
  566. (p6) mov r24=EVENT_THASH
  567. (p6) br.cond.dpnt.many kvm_virtualization_fault_back
  568. extr.u r28=r28,2,6 // get rr.ps
  569. shl r22=r26,r29 // 1UL << pta.size
  570. ;;
  571. shr.u r23=r19,r28 // vaddr >> rr.ps
  572. adds r26=3,r29 // pta.size + 3
  573. shl r27=r17,3 // pta << 3
  574. ;;
  575. shl r23=r23,3 // (vaddr >> rr.ps) << 3
  576. shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
  577. movl r16=7<<61
  578. ;;
  579. adds r22=-1,r22 // (1UL << pta.size) - 1
  580. shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
  581. and r19=r19,r16 // vaddr & VRN_MASK
  582. ;;
  583. and r22=r22,r23 // vhpt_offset
  584. or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
  585. adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
  586. ;;
  587. or r19=r19,r22 // calc pval
  588. shladd r17=r18,4,r26
  589. adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
  590. ;;
  591. mov b0=r17
  592. br.many b0
  593. END(kvm_asm_thash)
  594. #define MOV_TO_REG0 \
  595. {; \
  596. nop.b 0x0; \
  597. nop.b 0x0; \
  598. nop.b 0x0; \
  599. ;; \
  600. };
  601. #define MOV_TO_REG(n) \
  602. {; \
  603. mov r##n##=r19; \
  604. mov b0=r30; \
  605. br.sptk.many b0; \
  606. ;; \
  607. };
  608. #define MOV_FROM_REG(n) \
  609. {; \
  610. mov r19=r##n##; \
  611. mov b0=r30; \
  612. br.sptk.many b0; \
  613. ;; \
  614. };
  615. #define MOV_TO_BANK0_REG(n) \
  616. ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
  617. {; \
  618. mov r26=r2; \
  619. mov r2=r19; \
  620. bsw.1; \
  621. ;; \
  622. }; \
  623. {; \
  624. mov r##n##=r2; \
  625. nop.b 0x0; \
  626. bsw.0; \
  627. ;; \
  628. }; \
  629. {; \
  630. mov r2=r26; \
  631. mov b0=r30; \
  632. br.sptk.many b0; \
  633. ;; \
  634. }; \
  635. END(asm_mov_to_bank0_reg##n##)
  636. #define MOV_FROM_BANK0_REG(n) \
  637. ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
  638. {; \
  639. mov r26=r2; \
  640. nop.b 0x0; \
  641. bsw.1; \
  642. ;; \
  643. }; \
  644. {; \
  645. mov r2=r##n##; \
  646. nop.b 0x0; \
  647. bsw.0; \
  648. ;; \
  649. }; \
  650. {; \
  651. mov r19=r2; \
  652. mov r2=r26; \
  653. mov b0=r30; \
  654. }; \
  655. {; \
  656. nop.b 0x0; \
  657. nop.b 0x0; \
  658. br.sptk.many b0; \
  659. ;; \
  660. }; \
  661. END(asm_mov_from_bank0_reg##n##)
  662. #define JMP_TO_MOV_TO_BANK0_REG(n) \
  663. {; \
  664. nop.b 0x0; \
  665. nop.b 0x0; \
  666. br.sptk.many asm_mov_to_bank0_reg##n##; \
  667. ;; \
  668. }
  669. #define JMP_TO_MOV_FROM_BANK0_REG(n) \
  670. {; \
  671. nop.b 0x0; \
  672. nop.b 0x0; \
  673. br.sptk.many asm_mov_from_bank0_reg##n##; \
  674. ;; \
  675. }
  676. MOV_FROM_BANK0_REG(16)
  677. MOV_FROM_BANK0_REG(17)
  678. MOV_FROM_BANK0_REG(18)
  679. MOV_FROM_BANK0_REG(19)
  680. MOV_FROM_BANK0_REG(20)
  681. MOV_FROM_BANK0_REG(21)
  682. MOV_FROM_BANK0_REG(22)
  683. MOV_FROM_BANK0_REG(23)
  684. MOV_FROM_BANK0_REG(24)
  685. MOV_FROM_BANK0_REG(25)
  686. MOV_FROM_BANK0_REG(26)
  687. MOV_FROM_BANK0_REG(27)
  688. MOV_FROM_BANK0_REG(28)
  689. MOV_FROM_BANK0_REG(29)
  690. MOV_FROM_BANK0_REG(30)
  691. MOV_FROM_BANK0_REG(31)
  692. // mov from reg table
  693. ENTRY(asm_mov_from_reg)
  694. MOV_FROM_REG(0)
  695. MOV_FROM_REG(1)
  696. MOV_FROM_REG(2)
  697. MOV_FROM_REG(3)
  698. MOV_FROM_REG(4)
  699. MOV_FROM_REG(5)
  700. MOV_FROM_REG(6)
  701. MOV_FROM_REG(7)
  702. MOV_FROM_REG(8)
  703. MOV_FROM_REG(9)
  704. MOV_FROM_REG(10)
  705. MOV_FROM_REG(11)
  706. MOV_FROM_REG(12)
  707. MOV_FROM_REG(13)
  708. MOV_FROM_REG(14)
  709. MOV_FROM_REG(15)
  710. JMP_TO_MOV_FROM_BANK0_REG(16)
  711. JMP_TO_MOV_FROM_BANK0_REG(17)
  712. JMP_TO_MOV_FROM_BANK0_REG(18)
  713. JMP_TO_MOV_FROM_BANK0_REG(19)
  714. JMP_TO_MOV_FROM_BANK0_REG(20)
  715. JMP_TO_MOV_FROM_BANK0_REG(21)
  716. JMP_TO_MOV_FROM_BANK0_REG(22)
  717. JMP_TO_MOV_FROM_BANK0_REG(23)
  718. JMP_TO_MOV_FROM_BANK0_REG(24)
  719. JMP_TO_MOV_FROM_BANK0_REG(25)
  720. JMP_TO_MOV_FROM_BANK0_REG(26)
  721. JMP_TO_MOV_FROM_BANK0_REG(27)
  722. JMP_TO_MOV_FROM_BANK0_REG(28)
  723. JMP_TO_MOV_FROM_BANK0_REG(29)
  724. JMP_TO_MOV_FROM_BANK0_REG(30)
  725. JMP_TO_MOV_FROM_BANK0_REG(31)
  726. MOV_FROM_REG(32)
  727. MOV_FROM_REG(33)
  728. MOV_FROM_REG(34)
  729. MOV_FROM_REG(35)
  730. MOV_FROM_REG(36)
  731. MOV_FROM_REG(37)
  732. MOV_FROM_REG(38)
  733. MOV_FROM_REG(39)
  734. MOV_FROM_REG(40)
  735. MOV_FROM_REG(41)
  736. MOV_FROM_REG(42)
  737. MOV_FROM_REG(43)
  738. MOV_FROM_REG(44)
  739. MOV_FROM_REG(45)
  740. MOV_FROM_REG(46)
  741. MOV_FROM_REG(47)
  742. MOV_FROM_REG(48)
  743. MOV_FROM_REG(49)
  744. MOV_FROM_REG(50)
  745. MOV_FROM_REG(51)
  746. MOV_FROM_REG(52)
  747. MOV_FROM_REG(53)
  748. MOV_FROM_REG(54)
  749. MOV_FROM_REG(55)
  750. MOV_FROM_REG(56)
  751. MOV_FROM_REG(57)
  752. MOV_FROM_REG(58)
  753. MOV_FROM_REG(59)
  754. MOV_FROM_REG(60)
  755. MOV_FROM_REG(61)
  756. MOV_FROM_REG(62)
  757. MOV_FROM_REG(63)
  758. MOV_FROM_REG(64)
  759. MOV_FROM_REG(65)
  760. MOV_FROM_REG(66)
  761. MOV_FROM_REG(67)
  762. MOV_FROM_REG(68)
  763. MOV_FROM_REG(69)
  764. MOV_FROM_REG(70)
  765. MOV_FROM_REG(71)
  766. MOV_FROM_REG(72)
  767. MOV_FROM_REG(73)
  768. MOV_FROM_REG(74)
  769. MOV_FROM_REG(75)
  770. MOV_FROM_REG(76)
  771. MOV_FROM_REG(77)
  772. MOV_FROM_REG(78)
  773. MOV_FROM_REG(79)
  774. MOV_FROM_REG(80)
  775. MOV_FROM_REG(81)
  776. MOV_FROM_REG(82)
  777. MOV_FROM_REG(83)
  778. MOV_FROM_REG(84)
  779. MOV_FROM_REG(85)
  780. MOV_FROM_REG(86)
  781. MOV_FROM_REG(87)
  782. MOV_FROM_REG(88)
  783. MOV_FROM_REG(89)
  784. MOV_FROM_REG(90)
  785. MOV_FROM_REG(91)
  786. MOV_FROM_REG(92)
  787. MOV_FROM_REG(93)
  788. MOV_FROM_REG(94)
  789. MOV_FROM_REG(95)
  790. MOV_FROM_REG(96)
  791. MOV_FROM_REG(97)
  792. MOV_FROM_REG(98)
  793. MOV_FROM_REG(99)
  794. MOV_FROM_REG(100)
  795. MOV_FROM_REG(101)
  796. MOV_FROM_REG(102)
  797. MOV_FROM_REG(103)
  798. MOV_FROM_REG(104)
  799. MOV_FROM_REG(105)
  800. MOV_FROM_REG(106)
  801. MOV_FROM_REG(107)
  802. MOV_FROM_REG(108)
  803. MOV_FROM_REG(109)
  804. MOV_FROM_REG(110)
  805. MOV_FROM_REG(111)
  806. MOV_FROM_REG(112)
  807. MOV_FROM_REG(113)
  808. MOV_FROM_REG(114)
  809. MOV_FROM_REG(115)
  810. MOV_FROM_REG(116)
  811. MOV_FROM_REG(117)
  812. MOV_FROM_REG(118)
  813. MOV_FROM_REG(119)
  814. MOV_FROM_REG(120)
  815. MOV_FROM_REG(121)
  816. MOV_FROM_REG(122)
  817. MOV_FROM_REG(123)
  818. MOV_FROM_REG(124)
  819. MOV_FROM_REG(125)
  820. MOV_FROM_REG(126)
  821. MOV_FROM_REG(127)
  822. END(asm_mov_from_reg)
  823. /* must be in bank 0
  824. * parameter:
  825. * r31: pr
  826. * r24: b0
  827. */
  828. ENTRY(kvm_resume_to_guest_with_sync)
  829. adds r19=VMM_VPD_BASE_OFFSET,r21
  830. mov r16 = r31
  831. mov r17 = r24
  832. ;;
  833. {.mii
  834. ld8 r25 =[r19]
  835. nop 0x0
  836. mov r24 = ip
  837. ;;
  838. }
  839. {.mmb
  840. add r24 =0x20, r24
  841. nop 0x0
  842. br.sptk.many kvm_vps_sync_write
  843. }
  844. mov r31 = r16
  845. mov r24 =r17
  846. ;;
  847. br.sptk.many kvm_resume_to_guest
  848. END(kvm_resume_to_guest_with_sync)
  849. ENTRY(kvm_resume_to_guest)
  850. adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
  851. ;;
  852. ld8 r1 =[r16]
  853. adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
  854. ;;
  855. mov r16=cr.ipsr
  856. ;;
  857. ld8 r20 = [r20]
  858. adds r19=VMM_VPD_BASE_OFFSET,r21
  859. ;;
  860. ld8 r25=[r19]
  861. extr.u r17=r16,IA64_PSR_RI_BIT,2
  862. tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
  863. ;;
  864. (p6) mov r18=cr.iip
  865. (p6) mov r17=r0
  866. ;;
  867. (p6) add r18=0x10,r18
  868. (p7) add r17=1,r17
  869. ;;
  870. (p6) mov cr.iip=r18
  871. dep r16=r17,r16,IA64_PSR_RI_BIT,2
  872. ;;
  873. mov cr.ipsr=r16
  874. adds r19= VPD_VPSR_START_OFFSET,r25
  875. add r28=PAL_VPS_RESUME_NORMAL,r20
  876. add r29=PAL_VPS_RESUME_HANDLER,r20
  877. ;;
  878. ld8 r19=[r19]
  879. mov b0=r29
  880. mov r27=cr.isr
  881. ;;
  882. tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
  883. shr r27=r27,IA64_ISR_IR_BIT
  884. ;;
  885. (p6) ld8 r26=[r25]
  886. (p7) mov b0=r28
  887. ;;
  888. (p6) dep r26=r27,r26,63,1
  889. mov pr=r31,-2
  890. br.sptk.many b0 // call pal service
  891. ;;
  892. END(kvm_resume_to_guest)
  893. MOV_TO_BANK0_REG(16)
  894. MOV_TO_BANK0_REG(17)
  895. MOV_TO_BANK0_REG(18)
  896. MOV_TO_BANK0_REG(19)
  897. MOV_TO_BANK0_REG(20)
  898. MOV_TO_BANK0_REG(21)
  899. MOV_TO_BANK0_REG(22)
  900. MOV_TO_BANK0_REG(23)
  901. MOV_TO_BANK0_REG(24)
  902. MOV_TO_BANK0_REG(25)
  903. MOV_TO_BANK0_REG(26)
  904. MOV_TO_BANK0_REG(27)
  905. MOV_TO_BANK0_REG(28)
  906. MOV_TO_BANK0_REG(29)
  907. MOV_TO_BANK0_REG(30)
  908. MOV_TO_BANK0_REG(31)
  909. // mov to reg table
  910. ENTRY(asm_mov_to_reg)
  911. MOV_TO_REG0
  912. MOV_TO_REG(1)
  913. MOV_TO_REG(2)
  914. MOV_TO_REG(3)
  915. MOV_TO_REG(4)
  916. MOV_TO_REG(5)
  917. MOV_TO_REG(6)
  918. MOV_TO_REG(7)
  919. MOV_TO_REG(8)
  920. MOV_TO_REG(9)
  921. MOV_TO_REG(10)
  922. MOV_TO_REG(11)
  923. MOV_TO_REG(12)
  924. MOV_TO_REG(13)
  925. MOV_TO_REG(14)
  926. MOV_TO_REG(15)
  927. JMP_TO_MOV_TO_BANK0_REG(16)
  928. JMP_TO_MOV_TO_BANK0_REG(17)
  929. JMP_TO_MOV_TO_BANK0_REG(18)
  930. JMP_TO_MOV_TO_BANK0_REG(19)
  931. JMP_TO_MOV_TO_BANK0_REG(20)
  932. JMP_TO_MOV_TO_BANK0_REG(21)
  933. JMP_TO_MOV_TO_BANK0_REG(22)
  934. JMP_TO_MOV_TO_BANK0_REG(23)
  935. JMP_TO_MOV_TO_BANK0_REG(24)
  936. JMP_TO_MOV_TO_BANK0_REG(25)
  937. JMP_TO_MOV_TO_BANK0_REG(26)
  938. JMP_TO_MOV_TO_BANK0_REG(27)
  939. JMP_TO_MOV_TO_BANK0_REG(28)
  940. JMP_TO_MOV_TO_BANK0_REG(29)
  941. JMP_TO_MOV_TO_BANK0_REG(30)
  942. JMP_TO_MOV_TO_BANK0_REG(31)
  943. MOV_TO_REG(32)
  944. MOV_TO_REG(33)
  945. MOV_TO_REG(34)
  946. MOV_TO_REG(35)
  947. MOV_TO_REG(36)
  948. MOV_TO_REG(37)
  949. MOV_TO_REG(38)
  950. MOV_TO_REG(39)
  951. MOV_TO_REG(40)
  952. MOV_TO_REG(41)
  953. MOV_TO_REG(42)
  954. MOV_TO_REG(43)
  955. MOV_TO_REG(44)
  956. MOV_TO_REG(45)
  957. MOV_TO_REG(46)
  958. MOV_TO_REG(47)
  959. MOV_TO_REG(48)
  960. MOV_TO_REG(49)
  961. MOV_TO_REG(50)
  962. MOV_TO_REG(51)
  963. MOV_TO_REG(52)
  964. MOV_TO_REG(53)
  965. MOV_TO_REG(54)
  966. MOV_TO_REG(55)
  967. MOV_TO_REG(56)
  968. MOV_TO_REG(57)
  969. MOV_TO_REG(58)
  970. MOV_TO_REG(59)
  971. MOV_TO_REG(60)
  972. MOV_TO_REG(61)
  973. MOV_TO_REG(62)
  974. MOV_TO_REG(63)
  975. MOV_TO_REG(64)
  976. MOV_TO_REG(65)
  977. MOV_TO_REG(66)
  978. MOV_TO_REG(67)
  979. MOV_TO_REG(68)
  980. MOV_TO_REG(69)
  981. MOV_TO_REG(70)
  982. MOV_TO_REG(71)
  983. MOV_TO_REG(72)
  984. MOV_TO_REG(73)
  985. MOV_TO_REG(74)
  986. MOV_TO_REG(75)
  987. MOV_TO_REG(76)
  988. MOV_TO_REG(77)
  989. MOV_TO_REG(78)
  990. MOV_TO_REG(79)
  991. MOV_TO_REG(80)
  992. MOV_TO_REG(81)
  993. MOV_TO_REG(82)
  994. MOV_TO_REG(83)
  995. MOV_TO_REG(84)
  996. MOV_TO_REG(85)
  997. MOV_TO_REG(86)
  998. MOV_TO_REG(87)
  999. MOV_TO_REG(88)
  1000. MOV_TO_REG(89)
  1001. MOV_TO_REG(90)
  1002. MOV_TO_REG(91)
  1003. MOV_TO_REG(92)
  1004. MOV_TO_REG(93)
  1005. MOV_TO_REG(94)
  1006. MOV_TO_REG(95)
  1007. MOV_TO_REG(96)
  1008. MOV_TO_REG(97)
  1009. MOV_TO_REG(98)
  1010. MOV_TO_REG(99)
  1011. MOV_TO_REG(100)
  1012. MOV_TO_REG(101)
  1013. MOV_TO_REG(102)
  1014. MOV_TO_REG(103)
  1015. MOV_TO_REG(104)
  1016. MOV_TO_REG(105)
  1017. MOV_TO_REG(106)
  1018. MOV_TO_REG(107)
  1019. MOV_TO_REG(108)
  1020. MOV_TO_REG(109)
  1021. MOV_TO_REG(110)
  1022. MOV_TO_REG(111)
  1023. MOV_TO_REG(112)
  1024. MOV_TO_REG(113)
  1025. MOV_TO_REG(114)
  1026. MOV_TO_REG(115)
  1027. MOV_TO_REG(116)
  1028. MOV_TO_REG(117)
  1029. MOV_TO_REG(118)
  1030. MOV_TO_REG(119)
  1031. MOV_TO_REG(120)
  1032. MOV_TO_REG(121)
  1033. MOV_TO_REG(122)
  1034. MOV_TO_REG(123)
  1035. MOV_TO_REG(124)
  1036. MOV_TO_REG(125)
  1037. MOV_TO_REG(126)
  1038. MOV_TO_REG(127)
  1039. END(asm_mov_to_reg)