vdso.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * vdso setup for s390
  3. *
  4. * Copyright IBM Corp. 2008
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License (version 2 only)
  9. * as published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/errno.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/stddef.h>
  18. #include <linux/unistd.h>
  19. #include <linux/slab.h>
  20. #include <linux/user.h>
  21. #include <linux/elf.h>
  22. #include <linux/security.h>
  23. #include <linux/bootmem.h>
  24. #include <linux/compat.h>
  25. #include <asm/asm-offsets.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/processor.h>
  28. #include <asm/mmu.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/sections.h>
  31. #include <asm/vdso.h>
  32. #include <asm/facility.h>
  33. #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
  34. extern char vdso32_start, vdso32_end;
  35. static void *vdso32_kbase = &vdso32_start;
  36. static unsigned int vdso32_pages;
  37. static struct page **vdso32_pagelist;
  38. #endif
  39. #ifdef CONFIG_64BIT
  40. extern char vdso64_start, vdso64_end;
  41. static void *vdso64_kbase = &vdso64_start;
  42. static unsigned int vdso64_pages;
  43. static struct page **vdso64_pagelist;
  44. #endif /* CONFIG_64BIT */
  45. /*
  46. * Should the kernel map a VDSO page into processes and pass its
  47. * address down to glibc upon exec()?
  48. */
  49. unsigned int __read_mostly vdso_enabled = 1;
  50. static int __init vdso_setup(char *s)
  51. {
  52. unsigned long val;
  53. int rc;
  54. rc = 0;
  55. if (strncmp(s, "on", 3) == 0)
  56. vdso_enabled = 1;
  57. else if (strncmp(s, "off", 4) == 0)
  58. vdso_enabled = 0;
  59. else {
  60. rc = strict_strtoul(s, 0, &val);
  61. vdso_enabled = rc ? 0 : !!val;
  62. }
  63. return !rc;
  64. }
  65. __setup("vdso=", vdso_setup);
  66. /*
  67. * The vdso data page
  68. */
  69. static union {
  70. struct vdso_data data;
  71. u8 page[PAGE_SIZE];
  72. } vdso_data_store __page_aligned_data;
  73. struct vdso_data *vdso_data = &vdso_data_store.data;
  74. /*
  75. * Setup vdso data page.
  76. */
  77. static void vdso_init_data(struct vdso_data *vd)
  78. {
  79. vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
  80. }
  81. #ifdef CONFIG_64BIT
  82. /*
  83. * Allocate/free per cpu vdso data.
  84. */
  85. #define SEGMENT_ORDER 2
  86. int vdso_alloc_per_cpu(struct _lowcore *lowcore)
  87. {
  88. unsigned long segment_table, page_table, page_frame;
  89. u32 *psal, *aste;
  90. int i;
  91. lowcore->vdso_per_cpu_data = __LC_PASTE;
  92. if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
  93. return 0;
  94. segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
  95. page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
  96. page_frame = get_zeroed_page(GFP_KERNEL);
  97. if (!segment_table || !page_table || !page_frame)
  98. goto out;
  99. clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
  100. PAGE_SIZE << SEGMENT_ORDER);
  101. clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
  102. 256*sizeof(unsigned long));
  103. *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
  104. *(unsigned long *) page_table = _PAGE_RO + page_frame;
  105. psal = (u32 *) (page_table + 256*sizeof(unsigned long));
  106. aste = psal + 32;
  107. for (i = 4; i < 32; i += 4)
  108. psal[i] = 0x80000000;
  109. lowcore->paste[4] = (u32)(addr_t) psal;
  110. psal[0] = 0x20000000;
  111. psal[2] = (u32)(addr_t) aste;
  112. *(unsigned long *) (aste + 2) = segment_table +
  113. _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
  114. aste[4] = (u32)(addr_t) psal;
  115. lowcore->vdso_per_cpu_data = page_frame;
  116. return 0;
  117. out:
  118. free_page(page_frame);
  119. free_page(page_table);
  120. free_pages(segment_table, SEGMENT_ORDER);
  121. return -ENOMEM;
  122. }
  123. void vdso_free_per_cpu(struct _lowcore *lowcore)
  124. {
  125. unsigned long segment_table, page_table, page_frame;
  126. u32 *psal, *aste;
  127. if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
  128. return;
  129. psal = (u32 *)(addr_t) lowcore->paste[4];
  130. aste = (u32 *)(addr_t) psal[2];
  131. segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
  132. page_table = *(unsigned long *) segment_table;
  133. page_frame = *(unsigned long *) page_table;
  134. free_page(page_frame);
  135. free_page(page_table);
  136. free_pages(segment_table, SEGMENT_ORDER);
  137. }
  138. static void vdso_init_cr5(void)
  139. {
  140. unsigned long cr5;
  141. if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
  142. return;
  143. cr5 = offsetof(struct _lowcore, paste);
  144. __ctl_load(cr5, 5, 5);
  145. }
  146. #endif /* CONFIG_64BIT */
  147. /*
  148. * This is called from binfmt_elf, we create the special vma for the
  149. * vDSO and insert it into the mm struct tree
  150. */
  151. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  152. {
  153. struct mm_struct *mm = current->mm;
  154. struct page **vdso_pagelist;
  155. unsigned long vdso_pages;
  156. unsigned long vdso_base;
  157. int rc;
  158. if (!vdso_enabled)
  159. return 0;
  160. /*
  161. * Only map the vdso for dynamically linked elf binaries.
  162. */
  163. if (!uses_interp)
  164. return 0;
  165. #ifdef CONFIG_64BIT
  166. vdso_pagelist = vdso64_pagelist;
  167. vdso_pages = vdso64_pages;
  168. #ifdef CONFIG_COMPAT
  169. if (is_compat_task()) {
  170. vdso_pagelist = vdso32_pagelist;
  171. vdso_pages = vdso32_pages;
  172. }
  173. #endif
  174. #else
  175. vdso_pagelist = vdso32_pagelist;
  176. vdso_pages = vdso32_pages;
  177. #endif
  178. /*
  179. * vDSO has a problem and was disabled, just don't "enable" it for
  180. * the process
  181. */
  182. if (vdso_pages == 0)
  183. return 0;
  184. current->mm->context.vdso_base = 0;
  185. /*
  186. * pick a base address for the vDSO in process space. We try to put
  187. * it at vdso_base which is the "natural" base for it, but we might
  188. * fail and end up putting it elsewhere.
  189. */
  190. down_write(&mm->mmap_sem);
  191. vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
  192. if (IS_ERR_VALUE(vdso_base)) {
  193. rc = vdso_base;
  194. goto out_up;
  195. }
  196. /*
  197. * Put vDSO base into mm struct. We need to do this before calling
  198. * install_special_mapping or the perf counter mmap tracking code
  199. * will fail to recognise it as a vDSO (since arch_vma_name fails).
  200. */
  201. current->mm->context.vdso_base = vdso_base;
  202. /*
  203. * our vma flags don't have VM_WRITE so by default, the process
  204. * isn't allowed to write those pages.
  205. * gdb can break that with ptrace interface, and thus trigger COW
  206. * on those pages but it's then your responsibility to never do that
  207. * on the "data" page of the vDSO or you'll stop getting kernel
  208. * updates and your nice userland gettimeofday will be totally dead.
  209. * It's fine to use that for setting breakpoints in the vDSO code
  210. * pages though.
  211. */
  212. rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
  213. VM_READ|VM_EXEC|
  214. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  215. vdso_pagelist);
  216. if (rc)
  217. current->mm->context.vdso_base = 0;
  218. out_up:
  219. up_write(&mm->mmap_sem);
  220. return rc;
  221. }
  222. const char *arch_vma_name(struct vm_area_struct *vma)
  223. {
  224. if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
  225. return "[vdso]";
  226. return NULL;
  227. }
  228. static int __init vdso_init(void)
  229. {
  230. int i;
  231. if (!vdso_enabled)
  232. return 0;
  233. vdso_init_data(vdso_data);
  234. #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
  235. /* Calculate the size of the 32 bit vDSO */
  236. vdso32_pages = ((&vdso32_end - &vdso32_start
  237. + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
  238. /* Make sure pages are in the correct state */
  239. vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
  240. GFP_KERNEL);
  241. BUG_ON(vdso32_pagelist == NULL);
  242. for (i = 0; i < vdso32_pages - 1; i++) {
  243. struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
  244. ClearPageReserved(pg);
  245. get_page(pg);
  246. vdso32_pagelist[i] = pg;
  247. }
  248. vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
  249. vdso32_pagelist[vdso32_pages] = NULL;
  250. #endif
  251. #ifdef CONFIG_64BIT
  252. /* Calculate the size of the 64 bit vDSO */
  253. vdso64_pages = ((&vdso64_end - &vdso64_start
  254. + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
  255. /* Make sure pages are in the correct state */
  256. vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
  257. GFP_KERNEL);
  258. BUG_ON(vdso64_pagelist == NULL);
  259. for (i = 0; i < vdso64_pages - 1; i++) {
  260. struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
  261. ClearPageReserved(pg);
  262. get_page(pg);
  263. vdso64_pagelist[i] = pg;
  264. }
  265. vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
  266. vdso64_pagelist[vdso64_pages] = NULL;
  267. if (vdso_alloc_per_cpu(&S390_lowcore))
  268. BUG();
  269. vdso_init_cr5();
  270. #endif /* CONFIG_64BIT */
  271. get_page(virt_to_page(vdso_data));
  272. smp_wmb();
  273. return 0;
  274. }
  275. early_initcall(vdso_init);
  276. int in_gate_area_no_mm(unsigned long addr)
  277. {
  278. return 0;
  279. }
  280. int in_gate_area(struct mm_struct *mm, unsigned long addr)
  281. {
  282. return 0;
  283. }
  284. struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
  285. {
  286. return NULL;
  287. }