book3s_64_slb.S 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
  20. #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
  21. #define UNBOLT_SLB_ENTRY(num) \
  22. ld r9, SHADOW_SLB_ESID(num)(r12); \
  23. /* Invalid? Skip. */; \
  24. rldicl. r0, r9, 37, 63; \
  25. beq slb_entry_skip_ ## num; \
  26. xoris r9, r9, SLB_ESID_V@h; \
  27. std r9, SHADOW_SLB_ESID(num)(r12); \
  28. slb_entry_skip_ ## num:
  29. #define REBOLT_SLB_ENTRY(num) \
  30. ld r10, SHADOW_SLB_ESID(num)(r11); \
  31. cmpdi r10, 0; \
  32. beq slb_exit_skip_ ## num; \
  33. oris r10, r10, SLB_ESID_V@h; \
  34. ld r9, SHADOW_SLB_VSID(num)(r11); \
  35. slbmte r9, r10; \
  36. std r10, SHADOW_SLB_ESID(num)(r11); \
  37. slb_exit_skip_ ## num:
  38. /******************************************************************************
  39. * *
  40. * Entry code *
  41. * *
  42. *****************************************************************************/
  43. .macro LOAD_GUEST_SEGMENTS
  44. /* Required state:
  45. *
  46. * MSR = ~IR|DR
  47. * R13 = PACA
  48. * R1 = host R1
  49. * R2 = host R2
  50. * R3 = shadow vcpu
  51. * all other volatile GPRS = free except R4, R6
  52. * SVCPU[CR] = guest CR
  53. * SVCPU[XER] = guest XER
  54. * SVCPU[CTR] = guest CTR
  55. * SVCPU[LR] = guest LR
  56. */
  57. /* Remove LPAR shadow entries */
  58. #if SLB_NUM_BOLTED == 3
  59. ld r12, PACA_SLBSHADOWPTR(r13)
  60. /* Save off the first entry so we can slbie it later */
  61. ld r10, SHADOW_SLB_ESID(0)(r12)
  62. ld r11, SHADOW_SLB_VSID(0)(r12)
  63. /* Remove bolted entries */
  64. UNBOLT_SLB_ENTRY(0)
  65. UNBOLT_SLB_ENTRY(1)
  66. UNBOLT_SLB_ENTRY(2)
  67. #else
  68. #error unknown number of bolted entries
  69. #endif
  70. /* Flush SLB */
  71. slbia
  72. /* r0 = esid & ESID_MASK */
  73. rldicr r10, r10, 0, 35
  74. /* r0 |= CLASS_BIT(VSID) */
  75. rldic r12, r11, 56 - 36, 36
  76. or r10, r10, r12
  77. slbie r10
  78. isync
  79. /* Fill SLB with our shadow */
  80. lbz r12, SVCPU_SLB_MAX(r3)
  81. mulli r12, r12, 16
  82. addi r12, r12, SVCPU_SLB
  83. add r12, r12, r3
  84. /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
  85. li r11, SVCPU_SLB
  86. add r11, r11, r3
  87. slb_loop_enter:
  88. ld r10, 0(r11)
  89. rldicl. r0, r10, 37, 63
  90. beq slb_loop_enter_skip
  91. ld r9, 8(r11)
  92. slbmte r9, r10
  93. slb_loop_enter_skip:
  94. addi r11, r11, 16
  95. cmpd cr0, r11, r12
  96. blt slb_loop_enter
  97. slb_do_enter:
  98. .endm
  99. /******************************************************************************
  100. * *
  101. * Exit code *
  102. * *
  103. *****************************************************************************/
  104. .macro LOAD_HOST_SEGMENTS
  105. /* Register usage at this point:
  106. *
  107. * R1 = host R1
  108. * R2 = host R2
  109. * R12 = exit handler id
  110. * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
  111. * SVCPU.* = guest *
  112. * SVCPU[CR] = guest CR
  113. * SVCPU[XER] = guest XER
  114. * SVCPU[CTR] = guest CTR
  115. * SVCPU[LR] = guest LR
  116. *
  117. */
  118. /* Restore bolted entries from the shadow and fix it along the way */
  119. /* We don't store anything in entry 0, so we don't need to take care of it */
  120. slbia
  121. isync
  122. #if SLB_NUM_BOLTED == 3
  123. ld r11, PACA_SLBSHADOWPTR(r13)
  124. REBOLT_SLB_ENTRY(0)
  125. REBOLT_SLB_ENTRY(1)
  126. REBOLT_SLB_ENTRY(2)
  127. #else
  128. #error unknown number of bolted entries
  129. #endif
  130. slb_do_exit:
  131. .endm