htab.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * iSeries hashtable management.
  3. * Derived from pSeries_htab.c
  4. *
  5. * SMP scalability work:
  6. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <asm/machdep.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/mmu.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/abs_addr.h>
  18. #include <linux/spinlock.h>
  19. #include "call_hpt.h"
  20. static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
  21. /*
  22. * Very primitive algorithm for picking up a lock
  23. */
  24. static inline void iSeries_hlock(unsigned long slot)
  25. {
  26. if (slot & 0x8)
  27. slot = ~slot;
  28. spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
  29. }
  30. static inline void iSeries_hunlock(unsigned long slot)
  31. {
  32. if (slot & 0x8)
  33. slot = ~slot;
  34. spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
  35. }
  36. static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
  37. unsigned long pa, unsigned long rflags,
  38. unsigned long vflags, int psize, int ssize)
  39. {
  40. long slot;
  41. struct hash_pte lhpte;
  42. int secondary = 0;
  43. BUG_ON(psize != MMU_PAGE_4K);
  44. /*
  45. * The hypervisor tries both primary and secondary.
  46. * If we are being called to insert in the secondary,
  47. * it means we have already tried both primary and secondary,
  48. * so we return failure immediately.
  49. */
  50. if (vflags & HPTE_V_SECONDARY)
  51. return -1;
  52. iSeries_hlock(hpte_group);
  53. slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
  54. if (unlikely(lhpte.v & HPTE_V_VALID)) {
  55. if (vflags & HPTE_V_BOLTED) {
  56. HvCallHpt_setSwBits(slot, 0x10, 0);
  57. HvCallHpt_setPp(slot, PP_RWXX);
  58. iSeries_hunlock(hpte_group);
  59. if (slot < 0)
  60. return 0x8 | (slot & 7);
  61. else
  62. return slot & 7;
  63. }
  64. BUG();
  65. }
  66. if (slot == -1) { /* No available entry found in either group */
  67. iSeries_hunlock(hpte_group);
  68. return -1;
  69. }
  70. if (slot < 0) { /* MSB set means secondary group */
  71. vflags |= HPTE_V_SECONDARY;
  72. secondary = 1;
  73. slot &= 0x7fffffffffffffff;
  74. }
  75. lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
  76. vflags | HPTE_V_VALID;
  77. lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
  78. /* Now fill in the actual HPTE */
  79. HvCallHpt_addValidate(slot, secondary, &lhpte);
  80. iSeries_hunlock(hpte_group);
  81. return (secondary << 3) | (slot & 7);
  82. }
  83. static unsigned long iSeries_hpte_getword0(unsigned long slot)
  84. {
  85. struct hash_pte hpte;
  86. HvCallHpt_get(&hpte, slot);
  87. return hpte.v;
  88. }
  89. static long iSeries_hpte_remove(unsigned long hpte_group)
  90. {
  91. unsigned long slot_offset;
  92. int i;
  93. unsigned long hpte_v;
  94. /* Pick a random slot to start at */
  95. slot_offset = mftb() & 0x7;
  96. iSeries_hlock(hpte_group);
  97. for (i = 0; i < HPTES_PER_GROUP; i++) {
  98. hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
  99. if (! (hpte_v & HPTE_V_BOLTED)) {
  100. HvCallHpt_invalidateSetSwBitsGet(hpte_group +
  101. slot_offset, 0, 0);
  102. iSeries_hunlock(hpte_group);
  103. return i;
  104. }
  105. slot_offset++;
  106. slot_offset &= 0x7;
  107. }
  108. iSeries_hunlock(hpte_group);
  109. return -1;
  110. }
  111. /*
  112. * The HyperVisor expects the "flags" argument in this form:
  113. * bits 0..59 : reserved
  114. * bit 60 : N
  115. * bits 61..63 : PP2,PP1,PP0
  116. */
  117. static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
  118. unsigned long va, int psize, int ssize, int local)
  119. {
  120. struct hash_pte hpte;
  121. unsigned long want_v;
  122. iSeries_hlock(slot);
  123. HvCallHpt_get(&hpte, slot);
  124. want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
  125. if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
  126. /*
  127. * Hypervisor expects bits as NPPP, which is
  128. * different from how they are mapped in our PP.
  129. */
  130. HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
  131. iSeries_hunlock(slot);
  132. return 0;
  133. }
  134. iSeries_hunlock(slot);
  135. return -1;
  136. }
  137. /*
  138. * Functions used to find the PTE for a particular virtual address.
  139. * Only used during boot when bolting pages.
  140. *
  141. * Input : vpn : virtual page number
  142. * Output: PTE index within the page table of the entry
  143. * -1 on failure
  144. */
  145. static long iSeries_hpte_find(unsigned long vpn)
  146. {
  147. struct hash_pte hpte;
  148. long slot;
  149. /*
  150. * The HvCallHpt_findValid interface is as follows:
  151. * 0xffffffffffffffff : No entry found.
  152. * 0x00000000xxxxxxxx : Entry found in primary group, slot x
  153. * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
  154. */
  155. slot = HvCallHpt_findValid(&hpte, vpn);
  156. if (hpte.v & HPTE_V_VALID) {
  157. if (slot < 0) {
  158. slot &= 0x7fffffffffffffff;
  159. slot = -slot;
  160. }
  161. } else
  162. slot = -1;
  163. return slot;
  164. }
  165. /*
  166. * Update the page protection bits. Intended to be used to create
  167. * guard pages for kernel data structures on pages which are bolted
  168. * in the HPT. Assumes pages being operated on will not be stolen.
  169. * Does not work on large pages.
  170. *
  171. * No need to lock here because we should be the only user.
  172. */
  173. static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  174. int psize, int ssize)
  175. {
  176. unsigned long vsid,va,vpn;
  177. long slot;
  178. BUG_ON(psize != MMU_PAGE_4K);
  179. vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
  180. va = (vsid << 28) | (ea & 0x0fffffff);
  181. vpn = va >> HW_PAGE_SHIFT;
  182. slot = iSeries_hpte_find(vpn);
  183. if (slot == -1)
  184. panic("updateboltedpp: Could not find page to bolt\n");
  185. HvCallHpt_setPp(slot, newpp);
  186. }
  187. static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
  188. int psize, int ssize, int local)
  189. {
  190. unsigned long hpte_v;
  191. unsigned long avpn = va >> 23;
  192. unsigned long flags;
  193. local_irq_save(flags);
  194. iSeries_hlock(slot);
  195. hpte_v = iSeries_hpte_getword0(slot);
  196. if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
  197. HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
  198. iSeries_hunlock(slot);
  199. local_irq_restore(flags);
  200. }
  201. void __init hpte_init_iSeries(void)
  202. {
  203. int i;
  204. for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
  205. spin_lock_init(&iSeries_hlocks[i]);
  206. ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
  207. ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
  208. ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
  209. ppc_md.hpte_insert = iSeries_hpte_insert;
  210. ppc_md.hpte_remove = iSeries_hpte_remove;
  211. }