vma.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /*
  2. * Set up the VMAs to tell the VM about the vDSO.
  3. * Copyright 2007 Andi Kleen, SUSE Labs.
  4. * Subject to the GPL, v.2
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/err.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/init.h>
  11. #include <linux/random.h>
  12. #include <linux/elf.h>
  13. #include <asm/vsyscall.h>
  14. #include <asm/vgtod.h>
  15. #include <asm/proto.h>
  16. #include <asm/vdso.h>
  17. #include <asm/page.h>
  18. unsigned int __read_mostly vdso_enabled = 1;
  19. extern char vdso_start[], vdso_end[];
  20. extern unsigned short vdso_sync_cpuid;
  21. extern struct page *vdso_pages[];
  22. static unsigned vdso_size;
  23. #ifdef CONFIG_X86_X32_ABI
  24. extern char vdsox32_start[], vdsox32_end[];
  25. extern struct page *vdsox32_pages[];
  26. static unsigned vdsox32_size;
  27. static void __init patch_vdsox32(void *vdso, size_t len)
  28. {
  29. Elf32_Ehdr *hdr = vdso;
  30. Elf32_Shdr *sechdrs, *alt_sec = 0;
  31. char *secstrings;
  32. void *alt_data;
  33. int i;
  34. BUG_ON(len < sizeof(Elf32_Ehdr));
  35. BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
  36. sechdrs = (void *)hdr + hdr->e_shoff;
  37. secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  38. for (i = 1; i < hdr->e_shnum; i++) {
  39. Elf32_Shdr *shdr = &sechdrs[i];
  40. if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
  41. alt_sec = shdr;
  42. goto found;
  43. }
  44. }
  45. /* If we get here, it's probably a bug. */
  46. pr_warning("patch_vdsox32: .altinstructions not found\n");
  47. return; /* nothing to patch */
  48. found:
  49. alt_data = (void *)hdr + alt_sec->sh_offset;
  50. apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
  51. }
  52. #endif
  53. static void __init patch_vdso64(void *vdso, size_t len)
  54. {
  55. Elf64_Ehdr *hdr = vdso;
  56. Elf64_Shdr *sechdrs, *alt_sec = 0;
  57. char *secstrings;
  58. void *alt_data;
  59. int i;
  60. BUG_ON(len < sizeof(Elf64_Ehdr));
  61. BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
  62. sechdrs = (void *)hdr + hdr->e_shoff;
  63. secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  64. for (i = 1; i < hdr->e_shnum; i++) {
  65. Elf64_Shdr *shdr = &sechdrs[i];
  66. if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
  67. alt_sec = shdr;
  68. goto found;
  69. }
  70. }
  71. /* If we get here, it's probably a bug. */
  72. pr_warning("patch_vdso64: .altinstructions not found\n");
  73. return; /* nothing to patch */
  74. found:
  75. alt_data = (void *)hdr + alt_sec->sh_offset;
  76. apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
  77. }
  78. static int __init init_vdso(void)
  79. {
  80. int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
  81. int i;
  82. patch_vdso64(vdso_start, vdso_end - vdso_start);
  83. vdso_size = npages << PAGE_SHIFT;
  84. for (i = 0; i < npages; i++)
  85. vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
  86. #ifdef CONFIG_X86_X32_ABI
  87. patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
  88. npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
  89. vdsox32_size = npages << PAGE_SHIFT;
  90. for (i = 0; i < npages; i++)
  91. vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
  92. #endif
  93. return 0;
  94. }
  95. subsys_initcall(init_vdso);
  96. struct linux_binprm;
  97. /*
  98. * Put the vdso above the (randomized) stack with another randomized
  99. * offset. This way there is no hole in the middle of address space.
  100. * To save memory make sure it is still in the same PTE as the stack
  101. * top. This doesn't give that many random bits.
  102. *
  103. * Note that this algorithm is imperfect: the distribution of the vdso
  104. * start address within a PMD is biased toward the end.
  105. *
  106. */
  107. static unsigned long vdso_addr(unsigned long start, unsigned len)
  108. {
  109. unsigned long addr, end;
  110. unsigned offset;
  111. /*
  112. * Round up the start address. It can start out unaligned as a result
  113. * of stack start randomization.
  114. */
  115. start = PAGE_ALIGN(start);
  116. /* Round the lowest possible end address up to a PMD boundary. */
  117. end = (start + len + PMD_SIZE - 1) & PMD_MASK;
  118. if (end >= TASK_SIZE_MAX)
  119. end = TASK_SIZE_MAX;
  120. end -= len;
  121. if (end > start) {
  122. offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
  123. addr = start + (offset << PAGE_SHIFT);
  124. } else {
  125. addr = start;
  126. }
  127. /*
  128. * page-align it here so that get_unmapped_area doesn't
  129. * align it wrongfully again to the next page. addr can come in 4K
  130. * unaligned here as a result of stack start randomization.
  131. */
  132. addr = PAGE_ALIGN(addr);
  133. addr = align_addr(addr, NULL, ALIGN_VDSO);
  134. return addr;
  135. }
  136. /* Setup a VMA at program startup for the vsyscall page.
  137. Not called for compat tasks */
  138. static int setup_additional_pages(struct linux_binprm *bprm,
  139. int uses_interp,
  140. struct page **pages,
  141. unsigned size)
  142. {
  143. struct mm_struct *mm = current->mm;
  144. unsigned long addr;
  145. int ret;
  146. if (!vdso_enabled)
  147. return 0;
  148. down_write(&mm->mmap_sem);
  149. addr = vdso_addr(mm->start_stack, size);
  150. addr = get_unmapped_area(NULL, addr, size, 0, 0);
  151. if (IS_ERR_VALUE(addr)) {
  152. ret = addr;
  153. goto up_fail;
  154. }
  155. current->mm->context.vdso = (void *)addr;
  156. ret = install_special_mapping(mm, addr, size,
  157. VM_READ|VM_EXEC|
  158. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  159. pages);
  160. if (ret) {
  161. current->mm->context.vdso = NULL;
  162. goto up_fail;
  163. }
  164. up_fail:
  165. up_write(&mm->mmap_sem);
  166. return ret;
  167. }
  168. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  169. {
  170. return setup_additional_pages(bprm, uses_interp, vdso_pages,
  171. vdso_size);
  172. }
  173. #ifdef CONFIG_X86_X32_ABI
  174. int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  175. {
  176. return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
  177. vdsox32_size);
  178. }
  179. #endif
  180. static __init int vdso_setup(char *s)
  181. {
  182. vdso_enabled = simple_strtoul(s, NULL, 0);
  183. return 0;
  184. }
  185. __setup("vdso=", vdso_setup);