pageattr.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Copyright IBM Corp. 2011
  3. * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
  4. */
  5. #include <linux/hugetlb.h>
  6. #include <linux/module.h>
  7. #include <linux/mm.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/facility.h>
  10. #include <asm/pgtable.h>
  11. #include <asm/page.h>
  12. static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
  13. {
  14. asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
  15. : [addr] "+a" (addr) : [skey] "d" (skey));
  16. return addr;
  17. }
  18. void __storage_key_init_range(unsigned long start, unsigned long end)
  19. {
  20. unsigned long boundary, size;
  21. if (!PAGE_DEFAULT_KEY)
  22. return;
  23. while (start < end) {
  24. if (MACHINE_HAS_EDAT1) {
  25. /* set storage keys for a 1MB frame */
  26. size = 1UL << 20;
  27. boundary = (start + size) & ~(size - 1);
  28. if (boundary <= end) {
  29. do {
  30. start = sske_frame(start, PAGE_DEFAULT_KEY);
  31. } while (start < boundary);
  32. continue;
  33. }
  34. }
  35. page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
  36. start += PAGE_SIZE;
  37. }
  38. }
  39. #ifdef CONFIG_PROC_FS
  40. atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
  41. void arch_report_meminfo(struct seq_file *m)
  42. {
  43. seq_printf(m, "DirectMap4k: %8lu kB\n",
  44. atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
  45. seq_printf(m, "DirectMap1M: %8lu kB\n",
  46. atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
  47. seq_printf(m, "DirectMap2G: %8lu kB\n",
  48. atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
  49. }
  50. #endif /* CONFIG_PROC_FS */
  51. static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
  52. unsigned long dtt)
  53. {
  54. unsigned long table, mask;
  55. mask = 0;
  56. if (MACHINE_HAS_EDAT2) {
  57. switch (dtt) {
  58. case CRDTE_DTT_REGION3:
  59. mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
  60. break;
  61. case CRDTE_DTT_SEGMENT:
  62. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  63. break;
  64. case CRDTE_DTT_PAGE:
  65. mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
  66. break;
  67. }
  68. table = (unsigned long)old & mask;
  69. crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
  70. } else if (MACHINE_HAS_IDTE) {
  71. cspg(old, *old, new);
  72. } else {
  73. csp((unsigned int *)old + 1, *old, new);
  74. }
  75. }
  76. struct cpa {
  77. unsigned int set_ro : 1;
  78. unsigned int clear_ro : 1;
  79. };
  80. static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
  81. struct cpa cpa)
  82. {
  83. pte_t *ptep, new;
  84. ptep = pte_offset(pmdp, addr);
  85. do {
  86. if (pte_none(*ptep))
  87. return -EINVAL;
  88. if (cpa.set_ro)
  89. new = pte_wrprotect(*ptep);
  90. else if (cpa.clear_ro)
  91. new = pte_mkwrite(pte_mkdirty(*ptep));
  92. pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
  93. ptep++;
  94. addr += PAGE_SIZE;
  95. cond_resched();
  96. } while (addr < end);
  97. return 0;
  98. }
  99. static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
  100. {
  101. unsigned long pte_addr, prot;
  102. pte_t *pt_dir, *ptep;
  103. pmd_t new;
  104. int i, ro;
  105. pt_dir = vmem_pte_alloc();
  106. if (!pt_dir)
  107. return -ENOMEM;
  108. pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
  109. ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
  110. prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
  111. ptep = pt_dir;
  112. for (i = 0; i < PTRS_PER_PTE; i++) {
  113. pte_val(*ptep) = pte_addr | prot;
  114. pte_addr += PAGE_SIZE;
  115. ptep++;
  116. }
  117. pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
  118. pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
  119. update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
  120. update_page_count(PG_DIRECT_MAP_1M, -1);
  121. return 0;
  122. }
  123. static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
  124. {
  125. pmd_t new;
  126. if (cpa.set_ro)
  127. new = pmd_wrprotect(*pmdp);
  128. else if (cpa.clear_ro)
  129. new = pmd_mkwrite(pmd_mkdirty(*pmdp));
  130. pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
  131. }
  132. static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
  133. struct cpa cpa)
  134. {
  135. unsigned long next;
  136. pmd_t *pmdp;
  137. int rc = 0;
  138. pmdp = pmd_offset(pudp, addr);
  139. do {
  140. if (pmd_none(*pmdp))
  141. return -EINVAL;
  142. next = pmd_addr_end(addr, end);
  143. if (pmd_large(*pmdp)) {
  144. if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
  145. rc = split_pmd_page(pmdp, addr);
  146. if (rc)
  147. return rc;
  148. continue;
  149. }
  150. modify_pmd_page(pmdp, addr, cpa);
  151. } else {
  152. rc = walk_pte_level(pmdp, addr, next, cpa);
  153. if (rc)
  154. return rc;
  155. }
  156. pmdp++;
  157. addr = next;
  158. cond_resched();
  159. } while (addr < end);
  160. return rc;
  161. }
  162. static int split_pud_page(pud_t *pudp, unsigned long addr)
  163. {
  164. unsigned long pmd_addr, prot;
  165. pmd_t *pm_dir, *pmdp;
  166. pud_t new;
  167. int i, ro;
  168. pm_dir = vmem_pmd_alloc();
  169. if (!pm_dir)
  170. return -ENOMEM;
  171. pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
  172. ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
  173. prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
  174. pmdp = pm_dir;
  175. for (i = 0; i < PTRS_PER_PMD; i++) {
  176. pmd_val(*pmdp) = pmd_addr | prot;
  177. pmd_addr += PMD_SIZE;
  178. pmdp++;
  179. }
  180. pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
  181. pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
  182. update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
  183. update_page_count(PG_DIRECT_MAP_2G, -1);
  184. return 0;
  185. }
  186. static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
  187. {
  188. pud_t new;
  189. if (cpa.set_ro)
  190. new = pud_wrprotect(*pudp);
  191. else if (cpa.clear_ro)
  192. new = pud_mkwrite(pud_mkdirty(*pudp));
  193. pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
  194. }
  195. static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
  196. struct cpa cpa)
  197. {
  198. unsigned long next;
  199. pud_t *pudp;
  200. int rc = 0;
  201. pudp = pud_offset(pgd, addr);
  202. do {
  203. if (pud_none(*pudp))
  204. return -EINVAL;
  205. next = pud_addr_end(addr, end);
  206. if (pud_large(*pudp)) {
  207. if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
  208. rc = split_pud_page(pudp, addr);
  209. if (rc)
  210. break;
  211. continue;
  212. }
  213. modify_pud_page(pudp, addr, cpa);
  214. } else {
  215. rc = walk_pmd_level(pudp, addr, next, cpa);
  216. }
  217. pudp++;
  218. addr = next;
  219. cond_resched();
  220. } while (addr < end && !rc);
  221. return rc;
  222. }
  223. static DEFINE_MUTEX(cpa_mutex);
  224. static int change_page_attr(unsigned long addr, unsigned long end,
  225. struct cpa cpa)
  226. {
  227. unsigned long next;
  228. int rc = -EINVAL;
  229. pgd_t *pgdp;
  230. if (addr == end)
  231. return 0;
  232. if (end >= MODULES_END)
  233. return -EINVAL;
  234. mutex_lock(&cpa_mutex);
  235. pgdp = pgd_offset_k(addr);
  236. do {
  237. if (pgd_none(*pgdp))
  238. break;
  239. next = pgd_addr_end(addr, end);
  240. rc = walk_pud_level(pgdp, addr, next, cpa);
  241. if (rc)
  242. break;
  243. cond_resched();
  244. } while (pgdp++, addr = next, addr < end && !rc);
  245. mutex_unlock(&cpa_mutex);
  246. return rc;
  247. }
  248. int set_memory_ro(unsigned long addr, int numpages)
  249. {
  250. struct cpa cpa = {
  251. .set_ro = 1,
  252. };
  253. addr &= PAGE_MASK;
  254. return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
  255. }
  256. int set_memory_rw(unsigned long addr, int numpages)
  257. {
  258. struct cpa cpa = {
  259. .clear_ro = 1,
  260. };
  261. addr &= PAGE_MASK;
  262. return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
  263. }
  264. /* not possible */
  265. int set_memory_nx(unsigned long addr, int numpages)
  266. {
  267. return 0;
  268. }
  269. int set_memory_x(unsigned long addr, int numpages)
  270. {
  271. return 0;
  272. }
  273. #ifdef CONFIG_DEBUG_PAGEALLOC
  274. static void ipte_range(pte_t *pte, unsigned long address, int nr)
  275. {
  276. int i;
  277. if (test_facility(13)) {
  278. __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
  279. return;
  280. }
  281. for (i = 0; i < nr; i++) {
  282. __ptep_ipte(address, pte, IPTE_GLOBAL);
  283. address += PAGE_SIZE;
  284. pte++;
  285. }
  286. }
  287. void __kernel_map_pages(struct page *page, int numpages, int enable)
  288. {
  289. unsigned long address;
  290. int nr, i, j;
  291. pgd_t *pgd;
  292. pud_t *pud;
  293. pmd_t *pmd;
  294. pte_t *pte;
  295. for (i = 0; i < numpages;) {
  296. address = page_to_phys(page + i);
  297. pgd = pgd_offset_k(address);
  298. pud = pud_offset(pgd, address);
  299. pmd = pmd_offset(pud, address);
  300. pte = pte_offset_kernel(pmd, address);
  301. nr = (unsigned long)pte >> ilog2(sizeof(long));
  302. nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
  303. nr = min(numpages - i, nr);
  304. if (enable) {
  305. for (j = 0; j < nr; j++) {
  306. pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
  307. address += PAGE_SIZE;
  308. pte++;
  309. }
  310. } else {
  311. ipte_range(pte, address, nr);
  312. }
  313. i += nr;
  314. }
  315. }
  316. #ifdef CONFIG_HIBERNATION
  317. bool kernel_page_present(struct page *page)
  318. {
  319. unsigned long addr;
  320. int cc;
  321. addr = page_to_phys(page);
  322. asm volatile(
  323. " lra %1,0(%1)\n"
  324. " ipm %0\n"
  325. " srl %0,28"
  326. : "=d" (cc), "+a" (addr) : : "cc");
  327. return cc == 0;
  328. }
  329. #endif /* CONFIG_HIBERNATION */
  330. #endif /* CONFIG_DEBUG_PAGEALLOC */