vmm_ivt.S 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. /*
  2. * arch/ia64/kvm/vmm_ivt.S
  3. *
  4. * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
  5. * Stephane Eranian <eranian@hpl.hp.com>
  6. * David Mosberger <davidm@hpl.hp.com>
  7. * Copyright (C) 2000, 2002-2003 Intel Co
  8. * Asit Mallick <asit.k.mallick@intel.com>
  9. * Suresh Siddha <suresh.b.siddha@intel.com>
  10. * Kenneth Chen <kenneth.w.chen@intel.com>
  11. * Fenghua Yu <fenghua.yu@intel.com>
  12. *
  13. *
  14. * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
  15. * for SMP
  16. * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
  17. * handler now uses virtual PT.
  18. *
  19. * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
  20. * Supporting Intel virtualization architecture
  21. *
  22. */
  23. /*
  24. * This file defines the interruption vector table used by the CPU.
  25. * It does not include one entry per possible cause of interruption.
  26. *
  27. * The first 20 entries of the table contain 64 bundles each while the
  28. * remaining 48 entries contain only 16 bundles each.
  29. *
  30. * The 64 bundles are used to allow inlining the whole handler for
  31. * critical
  32. * interruptions like TLB misses.
  33. *
  34. * For each entry, the comment is as follows:
  35. *
  36. * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
  37. * (12,51)
  38. * entry offset ----/ / / /
  39. * /
  40. * entry number ---------/ / /
  41. * /
  42. * size of the entry -------------/ /
  43. * /
  44. * vector name -------------------------------------/
  45. * /
  46. * interruptions triggering this vector
  47. * ----------------------/
  48. *
  49. * The table is 32KB in size and must be aligned on 32KB
  50. * boundary.
  51. * (The CPU ignores the 15 lower bits of the address)
  52. *
  53. * Table is based upon EAS2.6 (Oct 1999)
  54. */
  55. #include <asm/asmmacro.h>
  56. #include <asm/cache.h>
  57. #include <asm/pgtable.h>
  58. #include "asm-offsets.h"
  59. #include "vcpu.h"
  60. #include "kvm_minstate.h"
  61. #include "vti.h"
  62. #if 1
  63. # define PSR_DEFAULT_BITS psr.ac
  64. #else
  65. # define PSR_DEFAULT_BITS 0
  66. #endif
  67. #define KVM_FAULT(n) \
  68. kvm_fault_##n:; \
  69. mov r19=n;; \
  70. br.sptk.many kvm_vmm_panic; \
  71. ;; \
  72. #define KVM_REFLECT(n) \
  73. mov r31=pr; \
  74. mov r19=n; /* prepare to save predicates */ \
  75. mov r29=cr.ipsr; \
  76. ;; \
  77. tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
  78. (p7) br.sptk.many kvm_dispatch_reflection; \
  79. br.sptk.many kvm_vmm_panic; \
  80. GLOBAL_ENTRY(kvm_vmm_panic)
  81. KVM_SAVE_MIN_WITH_COVER_R19
  82. alloc r14=ar.pfs,0,0,1,0
  83. mov out0=r15
  84. adds r3=8,r2 // set up second base pointer
  85. ;;
  86. ssm psr.ic
  87. ;;
  88. srlz.i // guarantee that interruption collection is on
  89. ;;
  90. (p15) ssm psr.i // restore psr.
  91. addl r14=@gprel(ia64_leave_hypervisor),gp
  92. ;;
  93. KVM_SAVE_REST
  94. mov rp=r14
  95. ;;
  96. br.call.sptk.many b6=vmm_panic_handler;
  97. END(kvm_vmm_panic)
  98. .section .text..ivt,"ax"
  99. .align 32768 // align on 32KB boundary
  100. .global kvm_ia64_ivt
  101. kvm_ia64_ivt:
  102. ///////////////////////////////////////////////////////////////
  103. // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
  104. ENTRY(kvm_vhpt_miss)
  105. KVM_FAULT(0)
  106. END(kvm_vhpt_miss)
  107. .org kvm_ia64_ivt+0x400
  108. ////////////////////////////////////////////////////////////////
  109. // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
  110. ENTRY(kvm_itlb_miss)
  111. mov r31 = pr
  112. mov r29=cr.ipsr;
  113. ;;
  114. tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  115. (p6) br.sptk kvm_alt_itlb_miss
  116. mov r19 = 1
  117. br.sptk kvm_itlb_miss_dispatch
  118. KVM_FAULT(1);
  119. END(kvm_itlb_miss)
  120. .org kvm_ia64_ivt+0x0800
  121. //////////////////////////////////////////////////////////////////
  122. // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
  123. ENTRY(kvm_dtlb_miss)
  124. mov r31 = pr
  125. mov r29=cr.ipsr;
  126. ;;
  127. tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  128. (p6) br.sptk kvm_alt_dtlb_miss
  129. br.sptk kvm_dtlb_miss_dispatch
  130. END(kvm_dtlb_miss)
  131. .org kvm_ia64_ivt+0x0c00
  132. ////////////////////////////////////////////////////////////////////
  133. // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
  134. ENTRY(kvm_alt_itlb_miss)
  135. mov r16=cr.ifa // get address that caused the TLB miss
  136. ;;
  137. movl r17=PAGE_KERNEL
  138. mov r24=cr.ipsr
  139. movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  140. ;;
  141. and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
  142. ;;
  143. or r19=r17,r19 // insert PTE control bits into r19
  144. ;;
  145. movl r20=IA64_GRANULE_SHIFT<<2
  146. ;;
  147. mov cr.itir=r20
  148. ;;
  149. itc.i r19 // insert the TLB entry
  150. mov pr=r31,-1
  151. rfi
  152. END(kvm_alt_itlb_miss)
  153. .org kvm_ia64_ivt+0x1000
  154. /////////////////////////////////////////////////////////////////////
  155. // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
  156. ENTRY(kvm_alt_dtlb_miss)
  157. mov r16=cr.ifa // get address that caused the TLB miss
  158. ;;
  159. movl r17=PAGE_KERNEL
  160. movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  161. mov r24=cr.ipsr
  162. ;;
  163. and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
  164. ;;
  165. or r19=r19,r17 // insert PTE control bits into r19
  166. ;;
  167. movl r20=IA64_GRANULE_SHIFT<<2
  168. ;;
  169. mov cr.itir=r20
  170. ;;
  171. itc.d r19 // insert the TLB entry
  172. mov pr=r31,-1
  173. rfi
  174. END(kvm_alt_dtlb_miss)
  175. .org kvm_ia64_ivt+0x1400
  176. //////////////////////////////////////////////////////////////////////
  177. // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
  178. ENTRY(kvm_nested_dtlb_miss)
  179. KVM_FAULT(5)
  180. END(kvm_nested_dtlb_miss)
  181. .org kvm_ia64_ivt+0x1800
  182. /////////////////////////////////////////////////////////////////////
  183. // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
  184. ENTRY(kvm_ikey_miss)
  185. KVM_REFLECT(6)
  186. END(kvm_ikey_miss)
  187. .org kvm_ia64_ivt+0x1c00
  188. /////////////////////////////////////////////////////////////////////
  189. // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  190. ENTRY(kvm_dkey_miss)
  191. KVM_REFLECT(7)
  192. END(kvm_dkey_miss)
  193. .org kvm_ia64_ivt+0x2000
  194. ////////////////////////////////////////////////////////////////////
  195. // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
  196. ENTRY(kvm_dirty_bit)
  197. KVM_REFLECT(8)
  198. END(kvm_dirty_bit)
  199. .org kvm_ia64_ivt+0x2400
  200. ////////////////////////////////////////////////////////////////////
  201. // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
  202. ENTRY(kvm_iaccess_bit)
  203. KVM_REFLECT(9)
  204. END(kvm_iaccess_bit)
  205. .org kvm_ia64_ivt+0x2800
  206. ///////////////////////////////////////////////////////////////////
  207. // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
  208. ENTRY(kvm_daccess_bit)
  209. KVM_REFLECT(10)
  210. END(kvm_daccess_bit)
  211. .org kvm_ia64_ivt+0x2c00
  212. /////////////////////////////////////////////////////////////////
  213. // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
  214. ENTRY(kvm_break_fault)
  215. mov r31=pr
  216. mov r19=11
  217. mov r29=cr.ipsr
  218. ;;
  219. KVM_SAVE_MIN_WITH_COVER_R19
  220. ;;
  221. alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
  222. mov out0=cr.ifa
  223. mov out2=cr.isr // FIXME: pity to make this slow access twice
  224. mov out3=cr.iim // FIXME: pity to make this slow access twice
  225. adds r3=8,r2 // set up second base pointer
  226. ;;
  227. ssm psr.ic
  228. ;;
  229. srlz.i // guarantee that interruption collection is on
  230. ;;
  231. (p15)ssm psr.i // restore psr.i
  232. addl r14=@gprel(ia64_leave_hypervisor),gp
  233. ;;
  234. KVM_SAVE_REST
  235. mov rp=r14
  236. ;;
  237. adds out1=16,sp
  238. br.call.sptk.many b6=kvm_ia64_handle_break
  239. ;;
  240. END(kvm_break_fault)
  241. .org kvm_ia64_ivt+0x3000
  242. /////////////////////////////////////////////////////////////////
  243. // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
  244. ENTRY(kvm_interrupt)
  245. mov r31=pr // prepare to save predicates
  246. mov r19=12
  247. mov r29=cr.ipsr
  248. ;;
  249. tbit.z p6,p7=r29,IA64_PSR_VM_BIT
  250. tbit.z p0,p15=r29,IA64_PSR_I_BIT
  251. ;;
  252. (p7) br.sptk kvm_dispatch_interrupt
  253. ;;
  254. mov r27=ar.rsc /* M */
  255. mov r20=r1 /* A */
  256. mov r25=ar.unat /* M */
  257. mov r26=ar.pfs /* I */
  258. mov r28=cr.iip /* M */
  259. cover /* B (or nothing) */
  260. ;;
  261. mov r1=sp
  262. ;;
  263. invala /* M */
  264. mov r30=cr.ifs
  265. ;;
  266. addl r1=-VMM_PT_REGS_SIZE,r1
  267. ;;
  268. adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
  269. adds r16=PT(CR_IPSR),r1
  270. ;;
  271. lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
  272. st8 [r16]=r29 /* save cr.ipsr */
  273. ;;
  274. lfetch.fault.excl.nt1 [r17]
  275. mov r29=b0
  276. ;;
  277. adds r16=PT(R8),r1 /* initialize first base pointer */
  278. adds r17=PT(R9),r1 /* initialize second base pointer */
  279. mov r18=r0 /* make sure r18 isn't NaT */
  280. ;;
  281. .mem.offset 0,0; st8.spill [r16]=r8,16
  282. .mem.offset 8,0; st8.spill [r17]=r9,16
  283. ;;
  284. .mem.offset 0,0; st8.spill [r16]=r10,24
  285. .mem.offset 8,0; st8.spill [r17]=r11,24
  286. ;;
  287. st8 [r16]=r28,16 /* save cr.iip */
  288. st8 [r17]=r30,16 /* save cr.ifs */
  289. mov r8=ar.fpsr /* M */
  290. mov r9=ar.csd
  291. mov r10=ar.ssd
  292. movl r11=FPSR_DEFAULT /* L-unit */
  293. ;;
  294. st8 [r16]=r25,16 /* save ar.unat */
  295. st8 [r17]=r26,16 /* save ar.pfs */
  296. shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
  297. ;;
  298. st8 [r16]=r27,16 /* save ar.rsc */
  299. adds r17=16,r17 /* skip over ar_rnat field */
  300. ;;
  301. st8 [r17]=r31,16 /* save predicates */
  302. adds r16=16,r16 /* skip over ar_bspstore field */
  303. ;;
  304. st8 [r16]=r29,16 /* save b0 */
  305. st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
  306. ;;
  307. .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
  308. .mem.offset 8,0; st8.spill [r17]=r12,16
  309. adds r12=-16,r1
  310. /* switch to kernel memory stack (with 16 bytes of scratch) */
  311. ;;
  312. .mem.offset 0,0; st8.spill [r16]=r13,16
  313. .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
  314. ;;
  315. .mem.offset 0,0; st8.spill [r16]=r15,16
  316. .mem.offset 8,0; st8.spill [r17]=r14,16
  317. dep r14=-1,r0,60,4
  318. ;;
  319. .mem.offset 0,0; st8.spill [r16]=r2,16
  320. .mem.offset 8,0; st8.spill [r17]=r3,16
  321. adds r2=VMM_PT_REGS_R16_OFFSET,r1
  322. adds r14 = VMM_VCPU_GP_OFFSET,r13
  323. ;;
  324. mov r8=ar.ccv
  325. ld8 r14 = [r14]
  326. ;;
  327. mov r1=r14 /* establish kernel global pointer */
  328. ;; \
  329. bsw.1
  330. ;;
  331. alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
  332. mov out0=r13
  333. ;;
  334. ssm psr.ic
  335. ;;
  336. srlz.i
  337. ;;
  338. //(p15) ssm psr.i
  339. adds r3=8,r2 // set up second base pointer for SAVE_REST
  340. srlz.i // ensure everybody knows psr.ic is back on
  341. ;;
  342. .mem.offset 0,0; st8.spill [r2]=r16,16
  343. .mem.offset 8,0; st8.spill [r3]=r17,16
  344. ;;
  345. .mem.offset 0,0; st8.spill [r2]=r18,16
  346. .mem.offset 8,0; st8.spill [r3]=r19,16
  347. ;;
  348. .mem.offset 0,0; st8.spill [r2]=r20,16
  349. .mem.offset 8,0; st8.spill [r3]=r21,16
  350. mov r18=b6
  351. ;;
  352. .mem.offset 0,0; st8.spill [r2]=r22,16
  353. .mem.offset 8,0; st8.spill [r3]=r23,16
  354. mov r19=b7
  355. ;;
  356. .mem.offset 0,0; st8.spill [r2]=r24,16
  357. .mem.offset 8,0; st8.spill [r3]=r25,16
  358. ;;
  359. .mem.offset 0,0; st8.spill [r2]=r26,16
  360. .mem.offset 8,0; st8.spill [r3]=r27,16
  361. ;;
  362. .mem.offset 0,0; st8.spill [r2]=r28,16
  363. .mem.offset 8,0; st8.spill [r3]=r29,16
  364. ;;
  365. .mem.offset 0,0; st8.spill [r2]=r30,16
  366. .mem.offset 8,0; st8.spill [r3]=r31,32
  367. ;;
  368. mov ar.fpsr=r11 /* M-unit */
  369. st8 [r2]=r8,8 /* ar.ccv */
  370. adds r24=PT(B6)-PT(F7),r3
  371. ;;
  372. stf.spill [r2]=f6,32
  373. stf.spill [r3]=f7,32
  374. ;;
  375. stf.spill [r2]=f8,32
  376. stf.spill [r3]=f9,32
  377. ;;
  378. stf.spill [r2]=f10
  379. stf.spill [r3]=f11
  380. adds r25=PT(B7)-PT(F11),r3
  381. ;;
  382. st8 [r24]=r18,16 /* b6 */
  383. st8 [r25]=r19,16 /* b7 */
  384. ;;
  385. st8 [r24]=r9 /* ar.csd */
  386. st8 [r25]=r10 /* ar.ssd */
  387. ;;
  388. srlz.d // make sure we see the effect of cr.ivr
  389. addl r14=@gprel(ia64_leave_nested),gp
  390. ;;
  391. mov rp=r14
  392. br.call.sptk.many b6=kvm_ia64_handle_irq
  393. ;;
  394. END(kvm_interrupt)
  395. .global kvm_dispatch_vexirq
  396. .org kvm_ia64_ivt+0x3400
  397. //////////////////////////////////////////////////////////////////////
  398. // 0x3400 Entry 13 (size 64 bundles) Reserved
  399. ENTRY(kvm_virtual_exirq)
  400. mov r31=pr
  401. mov r19=13
  402. mov r30 =r0
  403. ;;
  404. kvm_dispatch_vexirq:
  405. cmp.eq p6,p0 = 1,r30
  406. ;;
  407. (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
  408. ;;
  409. (p6) ld8 r1 = [r29]
  410. ;;
  411. KVM_SAVE_MIN_WITH_COVER_R19
  412. alloc r14=ar.pfs,0,0,1,0
  413. mov out0=r13
  414. ssm psr.ic
  415. ;;
  416. srlz.i // guarantee that interruption collection is on
  417. ;;
  418. (p15) ssm psr.i // restore psr.i
  419. adds r3=8,r2 // set up second base pointer
  420. ;;
  421. KVM_SAVE_REST
  422. addl r14=@gprel(ia64_leave_hypervisor),gp
  423. ;;
  424. mov rp=r14
  425. br.call.sptk.many b6=kvm_vexirq
  426. END(kvm_virtual_exirq)
  427. .org kvm_ia64_ivt+0x3800
  428. /////////////////////////////////////////////////////////////////////
  429. // 0x3800 Entry 14 (size 64 bundles) Reserved
  430. KVM_FAULT(14)
  431. // this code segment is from 2.6.16.13
  432. .org kvm_ia64_ivt+0x3c00
  433. ///////////////////////////////////////////////////////////////////////
  434. // 0x3c00 Entry 15 (size 64 bundles) Reserved
  435. KVM_FAULT(15)
  436. .org kvm_ia64_ivt+0x4000
  437. ///////////////////////////////////////////////////////////////////////
  438. // 0x4000 Entry 16 (size 64 bundles) Reserved
  439. KVM_FAULT(16)
  440. .org kvm_ia64_ivt+0x4400
  441. //////////////////////////////////////////////////////////////////////
  442. // 0x4400 Entry 17 (size 64 bundles) Reserved
  443. KVM_FAULT(17)
  444. .org kvm_ia64_ivt+0x4800
  445. //////////////////////////////////////////////////////////////////////
  446. // 0x4800 Entry 18 (size 64 bundles) Reserved
  447. KVM_FAULT(18)
  448. .org kvm_ia64_ivt+0x4c00
  449. //////////////////////////////////////////////////////////////////////
  450. // 0x4c00 Entry 19 (size 64 bundles) Reserved
  451. KVM_FAULT(19)
  452. .org kvm_ia64_ivt+0x5000
  453. //////////////////////////////////////////////////////////////////////
  454. // 0x5000 Entry 20 (size 16 bundles) Page Not Present
  455. ENTRY(kvm_page_not_present)
  456. KVM_REFLECT(20)
  457. END(kvm_page_not_present)
  458. .org kvm_ia64_ivt+0x5100
  459. ///////////////////////////////////////////////////////////////////////
  460. // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
  461. ENTRY(kvm_key_permission)
  462. KVM_REFLECT(21)
  463. END(kvm_key_permission)
  464. .org kvm_ia64_ivt+0x5200
  465. //////////////////////////////////////////////////////////////////////
  466. // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  467. ENTRY(kvm_iaccess_rights)
  468. KVM_REFLECT(22)
  469. END(kvm_iaccess_rights)
  470. .org kvm_ia64_ivt+0x5300
  471. //////////////////////////////////////////////////////////////////////
  472. // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  473. ENTRY(kvm_daccess_rights)
  474. KVM_REFLECT(23)
  475. END(kvm_daccess_rights)
  476. .org kvm_ia64_ivt+0x5400
  477. /////////////////////////////////////////////////////////////////////
  478. // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
  479. ENTRY(kvm_general_exception)
  480. KVM_REFLECT(24)
  481. KVM_FAULT(24)
  482. END(kvm_general_exception)
  483. .org kvm_ia64_ivt+0x5500
  484. //////////////////////////////////////////////////////////////////////
  485. // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  486. ENTRY(kvm_disabled_fp_reg)
  487. KVM_REFLECT(25)
  488. END(kvm_disabled_fp_reg)
  489. .org kvm_ia64_ivt+0x5600
  490. ////////////////////////////////////////////////////////////////////
  491. // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
  492. ENTRY(kvm_nat_consumption)
  493. KVM_REFLECT(26)
  494. END(kvm_nat_consumption)
  495. .org kvm_ia64_ivt+0x5700
  496. /////////////////////////////////////////////////////////////////////
  497. // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  498. ENTRY(kvm_speculation_vector)
  499. KVM_REFLECT(27)
  500. END(kvm_speculation_vector)
  501. .org kvm_ia64_ivt+0x5800
  502. /////////////////////////////////////////////////////////////////////
  503. // 0x5800 Entry 28 (size 16 bundles) Reserved
  504. KVM_FAULT(28)
  505. .org kvm_ia64_ivt+0x5900
  506. ///////////////////////////////////////////////////////////////////
  507. // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
  508. ENTRY(kvm_debug_vector)
  509. KVM_FAULT(29)
  510. END(kvm_debug_vector)
  511. .org kvm_ia64_ivt+0x5a00
  512. ///////////////////////////////////////////////////////////////
  513. // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  514. ENTRY(kvm_unaligned_access)
  515. KVM_REFLECT(30)
  516. END(kvm_unaligned_access)
  517. .org kvm_ia64_ivt+0x5b00
  518. //////////////////////////////////////////////////////////////////////
  519. // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  520. ENTRY(kvm_unsupported_data_reference)
  521. KVM_REFLECT(31)
  522. END(kvm_unsupported_data_reference)
  523. .org kvm_ia64_ivt+0x5c00
  524. ////////////////////////////////////////////////////////////////////
  525. // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
  526. ENTRY(kvm_floating_point_fault)
  527. KVM_REFLECT(32)
  528. END(kvm_floating_point_fault)
  529. .org kvm_ia64_ivt+0x5d00
  530. /////////////////////////////////////////////////////////////////////
  531. // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  532. ENTRY(kvm_floating_point_trap)
  533. KVM_REFLECT(33)
  534. END(kvm_floating_point_trap)
  535. .org kvm_ia64_ivt+0x5e00
  536. //////////////////////////////////////////////////////////////////////
  537. // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  538. ENTRY(kvm_lower_privilege_trap)
  539. KVM_REFLECT(34)
  540. END(kvm_lower_privilege_trap)
  541. .org kvm_ia64_ivt+0x5f00
  542. //////////////////////////////////////////////////////////////////////
  543. // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  544. ENTRY(kvm_taken_branch_trap)
  545. KVM_REFLECT(35)
  546. END(kvm_taken_branch_trap)
  547. .org kvm_ia64_ivt+0x6000
  548. ////////////////////////////////////////////////////////////////////
  549. // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  550. ENTRY(kvm_single_step_trap)
  551. KVM_REFLECT(36)
  552. END(kvm_single_step_trap)
  553. .global kvm_virtualization_fault_back
  554. .org kvm_ia64_ivt+0x6100
  555. /////////////////////////////////////////////////////////////////////
  556. // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
  557. ENTRY(kvm_virtualization_fault)
  558. mov r31=pr
  559. adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
  560. ;;
  561. st8 [r16] = r1
  562. adds r17 = VMM_VCPU_GP_OFFSET, r21
  563. ;;
  564. ld8 r1 = [r17]
  565. cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
  566. cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
  567. cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
  568. cmp.eq p9,p0=EVENT_RSM,r24
  569. cmp.eq p10,p0=EVENT_SSM,r24
  570. cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
  571. cmp.eq p12,p0=EVENT_THASH,r24
  572. (p6) br.dptk.many kvm_asm_mov_from_ar
  573. (p7) br.dptk.many kvm_asm_mov_from_rr
  574. (p8) br.dptk.many kvm_asm_mov_to_rr
  575. (p9) br.dptk.many kvm_asm_rsm
  576. (p10) br.dptk.many kvm_asm_ssm
  577. (p11) br.dptk.many kvm_asm_mov_to_psr
  578. (p12) br.dptk.many kvm_asm_thash
  579. ;;
  580. kvm_virtualization_fault_back:
  581. adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
  582. ;;
  583. ld8 r1 = [r16]
  584. ;;
  585. mov r19=37
  586. adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
  587. adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
  588. ;;
  589. st8 [r16] = r24
  590. st8 [r17] = r25
  591. ;;
  592. cmp.ne p6,p0=EVENT_RFI, r24
  593. (p6) br.sptk kvm_dispatch_virtualization_fault
  594. ;;
  595. adds r18=VMM_VPD_BASE_OFFSET,r21
  596. ;;
  597. ld8 r18=[r18]
  598. ;;
  599. adds r18=VMM_VPD_VIFS_OFFSET,r18
  600. ;;
  601. ld8 r18=[r18]
  602. ;;
  603. tbit.z p6,p0=r18,63
  604. (p6) br.sptk kvm_dispatch_virtualization_fault
  605. ;;
  606. //if vifs.v=1 desert current register frame
  607. alloc r18=ar.pfs,0,0,0,0
  608. br.sptk kvm_dispatch_virtualization_fault
  609. END(kvm_virtualization_fault)
  610. .org kvm_ia64_ivt+0x6200
  611. //////////////////////////////////////////////////////////////
  612. // 0x6200 Entry 38 (size 16 bundles) Reserved
  613. KVM_FAULT(38)
  614. .org kvm_ia64_ivt+0x6300
  615. /////////////////////////////////////////////////////////////////
  616. // 0x6300 Entry 39 (size 16 bundles) Reserved
  617. KVM_FAULT(39)
  618. .org kvm_ia64_ivt+0x6400
  619. /////////////////////////////////////////////////////////////////
  620. // 0x6400 Entry 40 (size 16 bundles) Reserved
  621. KVM_FAULT(40)
  622. .org kvm_ia64_ivt+0x6500
  623. //////////////////////////////////////////////////////////////////
  624. // 0x6500 Entry 41 (size 16 bundles) Reserved
  625. KVM_FAULT(41)
  626. .org kvm_ia64_ivt+0x6600
  627. //////////////////////////////////////////////////////////////////
  628. // 0x6600 Entry 42 (size 16 bundles) Reserved
  629. KVM_FAULT(42)
  630. .org kvm_ia64_ivt+0x6700
  631. //////////////////////////////////////////////////////////////////
  632. // 0x6700 Entry 43 (size 16 bundles) Reserved
  633. KVM_FAULT(43)
  634. .org kvm_ia64_ivt+0x6800
  635. //////////////////////////////////////////////////////////////////
  636. // 0x6800 Entry 44 (size 16 bundles) Reserved
  637. KVM_FAULT(44)
  638. .org kvm_ia64_ivt+0x6900
  639. ///////////////////////////////////////////////////////////////////
  640. // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
  641. //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
  642. ENTRY(kvm_ia32_exception)
  643. KVM_FAULT(45)
  644. END(kvm_ia32_exception)
  645. .org kvm_ia64_ivt+0x6a00
  646. ////////////////////////////////////////////////////////////////////
  647. // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
  648. ENTRY(kvm_ia32_intercept)
  649. KVM_FAULT(47)
  650. END(kvm_ia32_intercept)
  651. .org kvm_ia64_ivt+0x6c00
  652. /////////////////////////////////////////////////////////////////////
  653. // 0x6c00 Entry 48 (size 16 bundles) Reserved
  654. KVM_FAULT(48)
  655. .org kvm_ia64_ivt+0x6d00
  656. //////////////////////////////////////////////////////////////////////
  657. // 0x6d00 Entry 49 (size 16 bundles) Reserved
  658. KVM_FAULT(49)
  659. .org kvm_ia64_ivt+0x6e00
  660. //////////////////////////////////////////////////////////////////////
  661. // 0x6e00 Entry 50 (size 16 bundles) Reserved
  662. KVM_FAULT(50)
  663. .org kvm_ia64_ivt+0x6f00
  664. /////////////////////////////////////////////////////////////////////
  665. // 0x6f00 Entry 51 (size 16 bundles) Reserved
  666. KVM_FAULT(52)
  667. .org kvm_ia64_ivt+0x7100
  668. ////////////////////////////////////////////////////////////////////
  669. // 0x7100 Entry 53 (size 16 bundles) Reserved
  670. KVM_FAULT(53)
  671. .org kvm_ia64_ivt+0x7200
  672. /////////////////////////////////////////////////////////////////////
  673. // 0x7200 Entry 54 (size 16 bundles) Reserved
  674. KVM_FAULT(54)
  675. .org kvm_ia64_ivt+0x7300
  676. ////////////////////////////////////////////////////////////////////
  677. // 0x7300 Entry 55 (size 16 bundles) Reserved
  678. KVM_FAULT(55)
  679. .org kvm_ia64_ivt+0x7400
  680. ////////////////////////////////////////////////////////////////////
  681. // 0x7400 Entry 56 (size 16 bundles) Reserved
  682. KVM_FAULT(56)
  683. .org kvm_ia64_ivt+0x7500
  684. /////////////////////////////////////////////////////////////////////
  685. // 0x7500 Entry 57 (size 16 bundles) Reserved
  686. KVM_FAULT(57)
  687. .org kvm_ia64_ivt+0x7600
  688. /////////////////////////////////////////////////////////////////////
  689. // 0x7600 Entry 58 (size 16 bundles) Reserved
  690. KVM_FAULT(58)
  691. .org kvm_ia64_ivt+0x7700
  692. ////////////////////////////////////////////////////////////////////
  693. // 0x7700 Entry 59 (size 16 bundles) Reserved
  694. KVM_FAULT(59)
  695. .org kvm_ia64_ivt+0x7800
  696. ////////////////////////////////////////////////////////////////////
  697. // 0x7800 Entry 60 (size 16 bundles) Reserved
  698. KVM_FAULT(60)
  699. .org kvm_ia64_ivt+0x7900
  700. /////////////////////////////////////////////////////////////////////
  701. // 0x7900 Entry 61 (size 16 bundles) Reserved
  702. KVM_FAULT(61)
  703. .org kvm_ia64_ivt+0x7a00
  704. /////////////////////////////////////////////////////////////////////
  705. // 0x7a00 Entry 62 (size 16 bundles) Reserved
  706. KVM_FAULT(62)
  707. .org kvm_ia64_ivt+0x7b00
  708. /////////////////////////////////////////////////////////////////////
  709. // 0x7b00 Entry 63 (size 16 bundles) Reserved
  710. KVM_FAULT(63)
  711. .org kvm_ia64_ivt+0x7c00
  712. ////////////////////////////////////////////////////////////////////
  713. // 0x7c00 Entry 64 (size 16 bundles) Reserved
  714. KVM_FAULT(64)
  715. .org kvm_ia64_ivt+0x7d00
  716. /////////////////////////////////////////////////////////////////////
  717. // 0x7d00 Entry 65 (size 16 bundles) Reserved
  718. KVM_FAULT(65)
  719. .org kvm_ia64_ivt+0x7e00
  720. /////////////////////////////////////////////////////////////////////
  721. // 0x7e00 Entry 66 (size 16 bundles) Reserved
  722. KVM_FAULT(66)
  723. .org kvm_ia64_ivt+0x7f00
  724. ////////////////////////////////////////////////////////////////////
  725. // 0x7f00 Entry 67 (size 16 bundles) Reserved
  726. KVM_FAULT(67)
  727. .org kvm_ia64_ivt+0x8000
  728. // There is no particular reason for this code to be here, other than that
  729. // there happens to be space here that would go unused otherwise. If this
  730. // fault ever gets "unreserved", simply moved the following code to a more
  731. // suitable spot...
  732. ENTRY(kvm_dtlb_miss_dispatch)
  733. mov r19 = 2
  734. KVM_SAVE_MIN_WITH_COVER_R19
  735. alloc r14=ar.pfs,0,0,3,0
  736. mov out0=cr.ifa
  737. mov out1=r15
  738. adds r3=8,r2 // set up second base pointer
  739. ;;
  740. ssm psr.ic
  741. ;;
  742. srlz.i // guarantee that interruption collection is on
  743. ;;
  744. (p15) ssm psr.i // restore psr.i
  745. addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
  746. ;;
  747. KVM_SAVE_REST
  748. KVM_SAVE_EXTRA
  749. mov rp=r14
  750. ;;
  751. adds out2=16,r12
  752. br.call.sptk.many b6=kvm_page_fault
  753. END(kvm_dtlb_miss_dispatch)
  754. ENTRY(kvm_itlb_miss_dispatch)
  755. KVM_SAVE_MIN_WITH_COVER_R19
  756. alloc r14=ar.pfs,0,0,3,0
  757. mov out0=cr.ifa
  758. mov out1=r15
  759. adds r3=8,r2 // set up second base pointer
  760. ;;
  761. ssm psr.ic
  762. ;;
  763. srlz.i // guarantee that interruption collection is on
  764. ;;
  765. (p15) ssm psr.i // restore psr.i
  766. addl r14=@gprel(ia64_leave_hypervisor),gp
  767. ;;
  768. KVM_SAVE_REST
  769. mov rp=r14
  770. ;;
  771. adds out2=16,r12
  772. br.call.sptk.many b6=kvm_page_fault
  773. END(kvm_itlb_miss_dispatch)
  774. ENTRY(kvm_dispatch_reflection)
  775. /*
  776. * Input:
  777. * psr.ic: off
  778. * r19: intr type (offset into ivt, see ia64_int.h)
  779. * r31: contains saved predicates (pr)
  780. */
  781. KVM_SAVE_MIN_WITH_COVER_R19
  782. alloc r14=ar.pfs,0,0,5,0
  783. mov out0=cr.ifa
  784. mov out1=cr.isr
  785. mov out2=cr.iim
  786. mov out3=r15
  787. adds r3=8,r2 // set up second base pointer
  788. ;;
  789. ssm psr.ic
  790. ;;
  791. srlz.i // guarantee that interruption collection is on
  792. ;;
  793. (p15) ssm psr.i // restore psr.i
  794. addl r14=@gprel(ia64_leave_hypervisor),gp
  795. ;;
  796. KVM_SAVE_REST
  797. mov rp=r14
  798. ;;
  799. adds out4=16,r12
  800. br.call.sptk.many b6=reflect_interruption
  801. END(kvm_dispatch_reflection)
  802. ENTRY(kvm_dispatch_virtualization_fault)
  803. adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
  804. adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
  805. ;;
  806. st8 [r16] = r24
  807. st8 [r17] = r25
  808. ;;
  809. KVM_SAVE_MIN_WITH_COVER_R19
  810. ;;
  811. alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
  812. mov out0=r13 //vcpu
  813. adds r3=8,r2 // set up second base pointer
  814. ;;
  815. ssm psr.ic
  816. ;;
  817. srlz.i // guarantee that interruption collection is on
  818. ;;
  819. (p15) ssm psr.i // restore psr.i
  820. addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
  821. ;;
  822. KVM_SAVE_REST
  823. KVM_SAVE_EXTRA
  824. mov rp=r14
  825. ;;
  826. adds out1=16,sp //regs
  827. br.call.sptk.many b6=kvm_emulate
  828. END(kvm_dispatch_virtualization_fault)
  829. ENTRY(kvm_dispatch_interrupt)
  830. KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
  831. ;;
  832. alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
  833. adds r3=8,r2 // set up second base pointer for SAVE_REST
  834. ;;
  835. ssm psr.ic
  836. ;;
  837. srlz.i
  838. ;;
  839. (p15) ssm psr.i
  840. addl r14=@gprel(ia64_leave_hypervisor),gp
  841. ;;
  842. KVM_SAVE_REST
  843. mov rp=r14
  844. ;;
  845. mov out0=r13 // pass pointer to pt_regs as second arg
  846. br.call.sptk.many b6=kvm_ia64_handle_irq
  847. END(kvm_dispatch_interrupt)
  848. GLOBAL_ENTRY(ia64_leave_nested)
  849. rsm psr.i
  850. ;;
  851. adds r21=PT(PR)+16,r12
  852. ;;
  853. lfetch [r21],PT(CR_IPSR)-PT(PR)
  854. adds r2=PT(B6)+16,r12
  855. adds r3=PT(R16)+16,r12
  856. ;;
  857. lfetch [r21]
  858. ld8 r28=[r2],8 // load b6
  859. adds r29=PT(R24)+16,r12
  860. ld8.fill r16=[r3]
  861. adds r3=PT(AR_CSD)-PT(R16),r3
  862. adds r30=PT(AR_CCV)+16,r12
  863. ;;
  864. ld8.fill r24=[r29]
  865. ld8 r15=[r30] // load ar.ccv
  866. ;;
  867. ld8 r29=[r2],16 // load b7
  868. ld8 r30=[r3],16 // load ar.csd
  869. ;;
  870. ld8 r31=[r2],16 // load ar.ssd
  871. ld8.fill r8=[r3],16
  872. ;;
  873. ld8.fill r9=[r2],16
  874. ld8.fill r10=[r3],PT(R17)-PT(R10)
  875. ;;
  876. ld8.fill r11=[r2],PT(R18)-PT(R11)
  877. ld8.fill r17=[r3],16
  878. ;;
  879. ld8.fill r18=[r2],16
  880. ld8.fill r19=[r3],16
  881. ;;
  882. ld8.fill r20=[r2],16
  883. ld8.fill r21=[r3],16
  884. mov ar.csd=r30
  885. mov ar.ssd=r31
  886. ;;
  887. rsm psr.i | psr.ic
  888. // initiate turning off of interrupt and interruption collection
  889. invala // invalidate ALAT
  890. ;;
  891. srlz.i
  892. ;;
  893. ld8.fill r22=[r2],24
  894. ld8.fill r23=[r3],24
  895. mov b6=r28
  896. ;;
  897. ld8.fill r25=[r2],16
  898. ld8.fill r26=[r3],16
  899. mov b7=r29
  900. ;;
  901. ld8.fill r27=[r2],16
  902. ld8.fill r28=[r3],16
  903. ;;
  904. ld8.fill r29=[r2],16
  905. ld8.fill r30=[r3],24
  906. ;;
  907. ld8.fill r31=[r2],PT(F9)-PT(R31)
  908. adds r3=PT(F10)-PT(F6),r3
  909. ;;
  910. ldf.fill f9=[r2],PT(F6)-PT(F9)
  911. ldf.fill f10=[r3],PT(F8)-PT(F10)
  912. ;;
  913. ldf.fill f6=[r2],PT(F7)-PT(F6)
  914. ;;
  915. ldf.fill f7=[r2],PT(F11)-PT(F7)
  916. ldf.fill f8=[r3],32
  917. ;;
  918. srlz.i // ensure interruption collection is off
  919. mov ar.ccv=r15
  920. ;;
  921. bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
  922. ;;
  923. ldf.fill f11=[r2]
  924. // mov r18=r13
  925. // mov r21=r13
  926. adds r16=PT(CR_IPSR)+16,r12
  927. adds r17=PT(CR_IIP)+16,r12
  928. ;;
  929. ld8 r29=[r16],16 // load cr.ipsr
  930. ld8 r28=[r17],16 // load cr.iip
  931. ;;
  932. ld8 r30=[r16],16 // load cr.ifs
  933. ld8 r25=[r17],16 // load ar.unat
  934. ;;
  935. ld8 r26=[r16],16 // load ar.pfs
  936. ld8 r27=[r17],16 // load ar.rsc
  937. cmp.eq p9,p0=r0,r0
  938. // set p9 to indicate that we should restore cr.ifs
  939. ;;
  940. ld8 r24=[r16],16 // load ar.rnat (may be garbage)
  941. ld8 r23=[r17],16// load ar.bspstore (may be garbage)
  942. ;;
  943. ld8 r31=[r16],16 // load predicates
  944. ld8 r22=[r17],16 // load b0
  945. ;;
  946. ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
  947. ld8.fill r1=[r17],16 // load r1
  948. ;;
  949. ld8.fill r12=[r16],16
  950. ld8.fill r13=[r17],16
  951. ;;
  952. ld8 r20=[r16],16 // ar.fpsr
  953. ld8.fill r15=[r17],16
  954. ;;
  955. ld8.fill r14=[r16],16
  956. ld8.fill r2=[r17]
  957. ;;
  958. ld8.fill r3=[r16]
  959. ;;
  960. mov r16=ar.bsp // get existing backing store pointer
  961. ;;
  962. mov b0=r22
  963. mov ar.pfs=r26
  964. mov cr.ifs=r30
  965. mov cr.ipsr=r29
  966. mov ar.fpsr=r20
  967. mov cr.iip=r28
  968. ;;
  969. mov ar.rsc=r27
  970. mov ar.unat=r25
  971. mov pr=r31,-1
  972. rfi
  973. END(ia64_leave_nested)
  974. GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
  975. /*
  976. * work.need_resched etc. mustn't get changed
  977. *by this CPU before it returns to
  978. * user- or fsys-mode, hence we disable interrupts early on:
  979. */
  980. adds r2 = PT(R4)+16,r12
  981. adds r3 = PT(R5)+16,r12
  982. adds r8 = PT(EML_UNAT)+16,r12
  983. ;;
  984. ld8 r8 = [r8]
  985. ;;
  986. mov ar.unat=r8
  987. ;;
  988. ld8.fill r4=[r2],16 //load r4
  989. ld8.fill r5=[r3],16 //load r5
  990. ;;
  991. ld8.fill r6=[r2] //load r6
  992. ld8.fill r7=[r3] //load r7
  993. ;;
  994. END(ia64_leave_hypervisor_prepare)
  995. //fall through
  996. GLOBAL_ENTRY(ia64_leave_hypervisor)
  997. rsm psr.i
  998. ;;
  999. br.call.sptk.many b0=leave_hypervisor_tail
  1000. ;;
  1001. adds r20=PT(PR)+16,r12
  1002. adds r8=PT(EML_UNAT)+16,r12
  1003. ;;
  1004. ld8 r8=[r8]
  1005. ;;
  1006. mov ar.unat=r8
  1007. ;;
  1008. lfetch [r20],PT(CR_IPSR)-PT(PR)
  1009. adds r2 = PT(B6)+16,r12
  1010. adds r3 = PT(B7)+16,r12
  1011. ;;
  1012. lfetch [r20]
  1013. ;;
  1014. ld8 r24=[r2],16 /* B6 */
  1015. ld8 r25=[r3],16 /* B7 */
  1016. ;;
  1017. ld8 r26=[r2],16 /* ar_csd */
  1018. ld8 r27=[r3],16 /* ar_ssd */
  1019. mov b6 = r24
  1020. ;;
  1021. ld8.fill r8=[r2],16
  1022. ld8.fill r9=[r3],16
  1023. mov b7 = r25
  1024. ;;
  1025. mov ar.csd = r26
  1026. mov ar.ssd = r27
  1027. ;;
  1028. ld8.fill r10=[r2],PT(R15)-PT(R10)
  1029. ld8.fill r11=[r3],PT(R14)-PT(R11)
  1030. ;;
  1031. ld8.fill r15=[r2],PT(R16)-PT(R15)
  1032. ld8.fill r14=[r3],PT(R17)-PT(R14)
  1033. ;;
  1034. ld8.fill r16=[r2],16
  1035. ld8.fill r17=[r3],16
  1036. ;;
  1037. ld8.fill r18=[r2],16
  1038. ld8.fill r19=[r3],16
  1039. ;;
  1040. ld8.fill r20=[r2],16
  1041. ld8.fill r21=[r3],16
  1042. ;;
  1043. ld8.fill r22=[r2],16
  1044. ld8.fill r23=[r3],16
  1045. ;;
  1046. ld8.fill r24=[r2],16
  1047. ld8.fill r25=[r3],16
  1048. ;;
  1049. ld8.fill r26=[r2],16
  1050. ld8.fill r27=[r3],16
  1051. ;;
  1052. ld8.fill r28=[r2],16
  1053. ld8.fill r29=[r3],16
  1054. ;;
  1055. ld8.fill r30=[r2],PT(F6)-PT(R30)
  1056. ld8.fill r31=[r3],PT(F7)-PT(R31)
  1057. ;;
  1058. rsm psr.i | psr.ic
  1059. // initiate turning off of interrupt and interruption collection
  1060. invala // invalidate ALAT
  1061. ;;
  1062. srlz.i // ensure interruption collection is off
  1063. ;;
  1064. bsw.0
  1065. ;;
  1066. adds r16 = PT(CR_IPSR)+16,r12
  1067. adds r17 = PT(CR_IIP)+16,r12
  1068. mov r21=r13 // get current
  1069. ;;
  1070. ld8 r31=[r16],16 // load cr.ipsr
  1071. ld8 r30=[r17],16 // load cr.iip
  1072. ;;
  1073. ld8 r29=[r16],16 // load cr.ifs
  1074. ld8 r28=[r17],16 // load ar.unat
  1075. ;;
  1076. ld8 r27=[r16],16 // load ar.pfs
  1077. ld8 r26=[r17],16 // load ar.rsc
  1078. ;;
  1079. ld8 r25=[r16],16 // load ar.rnat
  1080. ld8 r24=[r17],16 // load ar.bspstore
  1081. ;;
  1082. ld8 r23=[r16],16 // load predicates
  1083. ld8 r22=[r17],16 // load b0
  1084. ;;
  1085. ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
  1086. ld8.fill r1=[r17],16 //load r1
  1087. ;;
  1088. ld8.fill r12=[r16],16 //load r12
  1089. ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
  1090. ;;
  1091. ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
  1092. ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
  1093. ;;
  1094. ld8.fill r3=[r16] //load r3
  1095. ld8 r18=[r17] //load ar_ccv
  1096. ;;
  1097. mov ar.fpsr=r19
  1098. mov ar.ccv=r18
  1099. shr.u r18=r20,16
  1100. ;;
  1101. kvm_rbs_switch:
  1102. mov r19=96
  1103. kvm_dont_preserve_current_frame:
  1104. /*
  1105. * To prevent leaking bits between the hypervisor and guest domain,
  1106. * we must clear the stacked registers in the "invalid" partition here.
  1107. * 5 registers/cycle on McKinley).
  1108. */
  1109. # define pRecurse p6
  1110. # define pReturn p7
  1111. # define Nregs 14
  1112. alloc loc0=ar.pfs,2,Nregs-2,2,0
  1113. shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
  1114. sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
  1115. ;;
  1116. mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
  1117. shladd in0=loc1,3,r19
  1118. mov in1=0
  1119. ;;
  1120. TEXT_ALIGN(32)
  1121. kvm_rse_clear_invalid:
  1122. alloc loc0=ar.pfs,2,Nregs-2,2,0
  1123. cmp.lt pRecurse,p0=Nregs*8,in0
  1124. // if more than Nregs regs left to clear, (re)curse
  1125. add out0=-Nregs*8,in0
  1126. add out1=1,in1 // increment recursion count
  1127. mov loc1=0
  1128. mov loc2=0
  1129. ;;
  1130. mov loc3=0
  1131. mov loc4=0
  1132. mov loc5=0
  1133. mov loc6=0
  1134. mov loc7=0
  1135. (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
  1136. ;;
  1137. mov loc8=0
  1138. mov loc9=0
  1139. cmp.ne pReturn,p0=r0,in1
  1140. // if recursion count != 0, we need to do a br.ret
  1141. mov loc10=0
  1142. mov loc11=0
  1143. (pReturn) br.ret.dptk.many b0
  1144. # undef pRecurse
  1145. # undef pReturn
  1146. // loadrs has already been shifted
  1147. alloc r16=ar.pfs,0,0,0,0 // drop current register frame
  1148. ;;
  1149. loadrs
  1150. ;;
  1151. mov ar.bspstore=r24
  1152. ;;
  1153. mov ar.unat=r28
  1154. mov ar.rnat=r25
  1155. mov ar.rsc=r26
  1156. ;;
  1157. mov cr.ipsr=r31
  1158. mov cr.iip=r30
  1159. mov cr.ifs=r29
  1160. mov ar.pfs=r27
  1161. adds r18=VMM_VPD_BASE_OFFSET,r21
  1162. ;;
  1163. ld8 r18=[r18] //vpd
  1164. adds r17=VMM_VCPU_ISR_OFFSET,r21
  1165. ;;
  1166. ld8 r17=[r17]
  1167. adds r19=VMM_VPD_VPSR_OFFSET,r18
  1168. ;;
  1169. ld8 r19=[r19] //vpsr
  1170. mov r25=r18
  1171. adds r16= VMM_VCPU_GP_OFFSET,r21
  1172. ;;
  1173. ld8 r16= [r16] // Put gp in r24
  1174. movl r24=@gprel(ia64_vmm_entry) // calculate return address
  1175. ;;
  1176. add r24=r24,r16
  1177. ;;
  1178. br.sptk.many kvm_vps_sync_write // call the service
  1179. ;;
  1180. END(ia64_leave_hypervisor)
  1181. // fall through
  1182. GLOBAL_ENTRY(ia64_vmm_entry)
  1183. /*
  1184. * must be at bank 0
  1185. * parameter:
  1186. * r17:cr.isr
  1187. * r18:vpd
  1188. * r19:vpsr
  1189. * r22:b0
  1190. * r23:predicate
  1191. */
  1192. mov r24=r22
  1193. mov r25=r18
  1194. tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
  1195. (p1) br.cond.sptk.few kvm_vps_resume_normal
  1196. (p2) br.cond.sptk.many kvm_vps_resume_handler
  1197. ;;
  1198. END(ia64_vmm_entry)
  1199. /*
  1200. * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
  1201. * u64 arg3, u64 arg4, u64 arg5,
  1202. * u64 arg6, u64 arg7);
  1203. *
  1204. * XXX: The currently defined services use only 4 args at the max. The
  1205. * rest are not consumed.
  1206. */
  1207. GLOBAL_ENTRY(ia64_call_vsa)
  1208. .regstk 4,4,0,0
  1209. rpsave = loc0
  1210. pfssave = loc1
  1211. psrsave = loc2
  1212. entry = loc3
  1213. hostret = r24
  1214. alloc pfssave=ar.pfs,4,4,0,0
  1215. mov rpsave=rp
  1216. adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
  1217. ;;
  1218. ld8 entry=[entry]
  1219. 1: mov hostret=ip
  1220. mov r25=in1 // copy arguments
  1221. mov r26=in2
  1222. mov r27=in3
  1223. mov psrsave=psr
  1224. ;;
  1225. tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
  1226. tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
  1227. ;;
  1228. add hostret=2f-1b,hostret // calculate return address
  1229. add entry=entry,in0
  1230. ;;
  1231. rsm psr.i | psr.ic
  1232. ;;
  1233. srlz.i
  1234. mov b6=entry
  1235. br.cond.sptk b6 // call the service
  1236. 2:
  1237. // Architectural sequence for enabling interrupts if necessary
  1238. (p7) ssm psr.ic
  1239. ;;
  1240. (p7) srlz.i
  1241. ;;
  1242. (p6) ssm psr.i
  1243. ;;
  1244. mov rp=rpsave
  1245. mov ar.pfs=pfssave
  1246. mov r8=r31
  1247. ;;
  1248. srlz.d
  1249. br.ret.sptk rp
  1250. END(ia64_call_vsa)
  1251. #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
  1252. GLOBAL_ENTRY(vmm_reset_entry)
  1253. //set up ipsr, iip, vpd.vpsr, dcr
  1254. // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
  1255. // For DCR: all bits 0
  1256. bsw.0
  1257. ;;
  1258. mov r21 =r13
  1259. adds r14=-VMM_PT_REGS_SIZE, r12
  1260. ;;
  1261. movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
  1262. movl r10=0x8000000000000000
  1263. adds r16=PT(CR_IIP), r14
  1264. adds r20=PT(R1), r14
  1265. ;;
  1266. rsm psr.ic | psr.i
  1267. ;;
  1268. srlz.i
  1269. ;;
  1270. mov ar.rsc = 0
  1271. ;;
  1272. flushrs
  1273. ;;
  1274. mov ar.bspstore = 0
  1275. // clear BSPSTORE
  1276. ;;
  1277. mov cr.ipsr=r6
  1278. mov cr.ifs=r10
  1279. ld8 r4 = [r16] // Set init iip for first run.
  1280. ld8 r1 = [r20]
  1281. ;;
  1282. mov cr.iip=r4
  1283. adds r16=VMM_VPD_BASE_OFFSET,r13
  1284. ;;
  1285. ld8 r18=[r16]
  1286. ;;
  1287. adds r19=VMM_VPD_VPSR_OFFSET,r18
  1288. ;;
  1289. ld8 r19=[r19]
  1290. mov r17=r0
  1291. mov r22=r0
  1292. mov r23=r0
  1293. br.cond.sptk ia64_vmm_entry
  1294. br.ret.sptk b0
  1295. END(vmm_reset_entry)