vma.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * Copyright 2007 Andi Kleen, SUSE Labs.
  3. * Subject to the GPL, v.2
  4. *
  5. * This contains most of the x86 vDSO kernel-side code.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/random.h>
  13. #include <linux/elf.h>
  14. #include <linux/cpu.h>
  15. #include <linux/ptrace.h>
  16. #include <asm/pvclock.h>
  17. #include <asm/vgtod.h>
  18. #include <asm/proto.h>
  19. #include <asm/vdso.h>
  20. #include <asm/vvar.h>
  21. #include <asm/page.h>
  22. #include <asm/desc.h>
  23. #include <asm/cpufeature.h>
  24. #if defined(CONFIG_X86_64)
  25. unsigned int __read_mostly vdso64_enabled = 1;
  26. #endif
  27. void __init init_vdso_image(const struct vdso_image *image)
  28. {
  29. BUG_ON(image->size % PAGE_SIZE != 0);
  30. apply_alternatives((struct alt_instr *)(image->data + image->alt),
  31. (struct alt_instr *)(image->data + image->alt +
  32. image->alt_len));
  33. }
  34. struct linux_binprm;
  35. static int vdso_fault(const struct vm_special_mapping *sm,
  36. struct vm_area_struct *vma, struct vm_fault *vmf)
  37. {
  38. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  39. if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
  40. return VM_FAULT_SIGBUS;
  41. vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
  42. get_page(vmf->page);
  43. return 0;
  44. }
  45. static void vdso_fix_landing(const struct vdso_image *image,
  46. struct vm_area_struct *new_vma)
  47. {
  48. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  49. if (in_ia32_syscall() && image == &vdso_image_32) {
  50. struct pt_regs *regs = current_pt_regs();
  51. unsigned long vdso_land = image->sym_int80_landing_pad;
  52. unsigned long old_land_addr = vdso_land +
  53. (unsigned long)current->mm->context.vdso;
  54. /* Fixing userspace landing - look at do_fast_syscall_32 */
  55. if (regs->ip == old_land_addr)
  56. regs->ip = new_vma->vm_start + vdso_land;
  57. }
  58. #endif
  59. }
  60. static int vdso_mremap(const struct vm_special_mapping *sm,
  61. struct vm_area_struct *new_vma)
  62. {
  63. unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
  64. const struct vdso_image *image = current->mm->context.vdso_image;
  65. if (image->size != new_size)
  66. return -EINVAL;
  67. if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
  68. return -EFAULT;
  69. vdso_fix_landing(image, new_vma);
  70. current->mm->context.vdso = (void __user *)new_vma->vm_start;
  71. return 0;
  72. }
  73. static int vvar_fault(const struct vm_special_mapping *sm,
  74. struct vm_area_struct *vma, struct vm_fault *vmf)
  75. {
  76. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  77. long sym_offset;
  78. int ret = -EFAULT;
  79. if (!image)
  80. return VM_FAULT_SIGBUS;
  81. sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
  82. image->sym_vvar_start;
  83. /*
  84. * Sanity check: a symbol offset of zero means that the page
  85. * does not exist for this vdso image, not that the page is at
  86. * offset zero relative to the text mapping. This should be
  87. * impossible here, because sym_offset should only be zero for
  88. * the page past the end of the vvar mapping.
  89. */
  90. if (sym_offset == 0)
  91. return VM_FAULT_SIGBUS;
  92. if (sym_offset == image->sym_vvar_page) {
  93. ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
  94. __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
  95. } else if (sym_offset == image->sym_pvclock_page) {
  96. struct pvclock_vsyscall_time_info *pvti =
  97. pvclock_pvti_cpu0_va();
  98. if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
  99. ret = vm_insert_pfn(
  100. vma,
  101. (unsigned long)vmf->virtual_address,
  102. __pa(pvti) >> PAGE_SHIFT);
  103. }
  104. }
  105. if (ret == 0 || ret == -EBUSY)
  106. return VM_FAULT_NOPAGE;
  107. return VM_FAULT_SIGBUS;
  108. }
  109. static const struct vm_special_mapping vdso_mapping = {
  110. .name = "[vdso]",
  111. .fault = vdso_fault,
  112. .mremap = vdso_mremap,
  113. };
  114. static const struct vm_special_mapping vvar_mapping = {
  115. .name = "[vvar]",
  116. .fault = vvar_fault,
  117. };
  118. /*
  119. * Add vdso and vvar mappings to current process.
  120. * @image - blob to map
  121. * @addr - request a specific address (zero to map at free addr)
  122. */
  123. static int map_vdso(const struct vdso_image *image, unsigned long addr)
  124. {
  125. struct mm_struct *mm = current->mm;
  126. struct vm_area_struct *vma;
  127. unsigned long text_start;
  128. int ret = 0;
  129. if (down_write_killable(&mm->mmap_sem))
  130. return -EINTR;
  131. addr = get_unmapped_area(NULL, addr,
  132. image->size - image->sym_vvar_start, 0, 0);
  133. if (IS_ERR_VALUE(addr)) {
  134. ret = addr;
  135. goto up_fail;
  136. }
  137. text_start = addr - image->sym_vvar_start;
  138. current->mm->context.vdso = (void __user *)text_start;
  139. current->mm->context.vdso_image = image;
  140. /*
  141. * MAYWRITE to allow gdb to COW and set breakpoints
  142. */
  143. vma = _install_special_mapping(mm,
  144. text_start,
  145. image->size,
  146. VM_READ|VM_EXEC|
  147. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  148. &vdso_mapping);
  149. if (IS_ERR(vma)) {
  150. ret = PTR_ERR(vma);
  151. goto up_fail;
  152. }
  153. vma = _install_special_mapping(mm,
  154. addr,
  155. -image->sym_vvar_start,
  156. VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
  157. VM_PFNMAP,
  158. &vvar_mapping);
  159. if (IS_ERR(vma)) {
  160. ret = PTR_ERR(vma);
  161. do_munmap(mm, text_start, image->size);
  162. }
  163. up_fail:
  164. if (ret) {
  165. current->mm->context.vdso = NULL;
  166. current->mm->context.vdso_image = NULL;
  167. }
  168. up_write(&mm->mmap_sem);
  169. return ret;
  170. }
  171. #ifdef CONFIG_X86_64
  172. /*
  173. * Put the vdso above the (randomized) stack with another randomized
  174. * offset. This way there is no hole in the middle of address space.
  175. * To save memory make sure it is still in the same PTE as the stack
  176. * top. This doesn't give that many random bits.
  177. *
  178. * Note that this algorithm is imperfect: the distribution of the vdso
  179. * start address within a PMD is biased toward the end.
  180. *
  181. * Only used for the 64-bit and x32 vdsos.
  182. */
  183. static unsigned long vdso_addr(unsigned long start, unsigned len)
  184. {
  185. unsigned long addr, end;
  186. unsigned offset;
  187. /*
  188. * Round up the start address. It can start out unaligned as a result
  189. * of stack start randomization.
  190. */
  191. start = PAGE_ALIGN(start);
  192. /* Round the lowest possible end address up to a PMD boundary. */
  193. end = (start + len + PMD_SIZE - 1) & PMD_MASK;
  194. if (end >= TASK_SIZE_MAX)
  195. end = TASK_SIZE_MAX;
  196. end -= len;
  197. if (end > start) {
  198. offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
  199. addr = start + (offset << PAGE_SHIFT);
  200. } else {
  201. addr = start;
  202. }
  203. /*
  204. * Forcibly align the final address in case we have a hardware
  205. * issue that requires alignment for performance reasons.
  206. */
  207. addr = align_vdso_addr(addr);
  208. return addr;
  209. }
  210. static int map_vdso_randomized(const struct vdso_image *image)
  211. {
  212. unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
  213. return map_vdso(image, addr);
  214. }
  215. #endif
  216. int map_vdso_once(const struct vdso_image *image, unsigned long addr)
  217. {
  218. struct mm_struct *mm = current->mm;
  219. struct vm_area_struct *vma;
  220. down_write(&mm->mmap_sem);
  221. /*
  222. * Check if we have already mapped vdso blob - fail to prevent
  223. * abusing from userspace install_speciall_mapping, which may
  224. * not do accounting and rlimit right.
  225. * We could search vma near context.vdso, but it's a slowpath,
  226. * so let's explicitely check all VMAs to be completely sure.
  227. */
  228. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  229. if (vma_is_special_mapping(vma, &vdso_mapping) ||
  230. vma_is_special_mapping(vma, &vvar_mapping)) {
  231. up_write(&mm->mmap_sem);
  232. return -EEXIST;
  233. }
  234. }
  235. up_write(&mm->mmap_sem);
  236. return map_vdso(image, addr);
  237. }
  238. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  239. static int load_vdso32(void)
  240. {
  241. if (vdso32_enabled != 1) /* Other values all mean "disabled" */
  242. return 0;
  243. return map_vdso(&vdso_image_32, 0);
  244. }
  245. #endif
  246. #ifdef CONFIG_X86_64
  247. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  248. {
  249. if (!vdso64_enabled)
  250. return 0;
  251. return map_vdso_randomized(&vdso_image_64);
  252. }
  253. #ifdef CONFIG_COMPAT
  254. int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
  255. int uses_interp)
  256. {
  257. #ifdef CONFIG_X86_X32_ABI
  258. if (test_thread_flag(TIF_X32)) {
  259. if (!vdso64_enabled)
  260. return 0;
  261. return map_vdso_randomized(&vdso_image_x32);
  262. }
  263. #endif
  264. #ifdef CONFIG_IA32_EMULATION
  265. return load_vdso32();
  266. #else
  267. return 0;
  268. #endif
  269. }
  270. #endif
  271. #else
  272. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  273. {
  274. return load_vdso32();
  275. }
  276. #endif
  277. #ifdef CONFIG_X86_64
  278. static __init int vdso_setup(char *s)
  279. {
  280. vdso64_enabled = simple_strtoul(s, NULL, 0);
  281. return 0;
  282. }
  283. __setup("vdso=", vdso_setup);
  284. #endif
  285. #ifdef CONFIG_X86_64
  286. static void vgetcpu_cpu_init(void *arg)
  287. {
  288. int cpu = smp_processor_id();
  289. struct desc_struct d = { };
  290. unsigned long node = 0;
  291. #ifdef CONFIG_NUMA
  292. node = cpu_to_node(cpu);
  293. #endif
  294. if (static_cpu_has(X86_FEATURE_RDTSCP))
  295. write_rdtscp_aux((node << 12) | cpu);
  296. /*
  297. * Store cpu number in limit so that it can be loaded
  298. * quickly in user space in vgetcpu. (12 bits for the CPU
  299. * and 8 bits for the node)
  300. */
  301. d.limit0 = cpu | ((node & 0xf) << 12);
  302. d.limit = node >> 4;
  303. d.type = 5; /* RO data, expand down, accessed */
  304. d.dpl = 3; /* Visible to user code */
  305. d.s = 1; /* Not a system segment */
  306. d.p = 1; /* Present */
  307. d.d = 1; /* 32-bit */
  308. write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
  309. }
  310. static int vgetcpu_online(unsigned int cpu)
  311. {
  312. return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
  313. }
  314. static int __init init_vdso(void)
  315. {
  316. init_vdso_image(&vdso_image_64);
  317. #ifdef CONFIG_X86_X32_ABI
  318. init_vdso_image(&vdso_image_x32);
  319. #endif
  320. /* notifier priority > KVM */
  321. return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
  322. "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
  323. }
  324. subsys_initcall(init_vdso);
  325. #endif /* CONFIG_X86_64 */