fault.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1995 - 2000 by Ralf Baechle
  7. */
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/types.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mman.h>
  17. #include <linux/mm.h>
  18. #include <linux/smp.h>
  19. #include <linux/module.h>
  20. #include <linux/kprobes.h>
  21. #include <linux/perf_event.h>
  22. #include <asm/branch.h>
  23. #include <asm/mmu_context.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/highmem.h> /* For VMALLOC_END */
  27. #include <linux/kdebug.h>
  28. /*
  29. * This routine handles page faults. It determines the address,
  30. * and the problem, and then passes it off to one of the appropriate
  31. * routines.
  32. */
  33. asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
  34. unsigned long address)
  35. {
  36. struct vm_area_struct * vma = NULL;
  37. struct task_struct *tsk = current;
  38. struct mm_struct *mm = tsk->mm;
  39. const int field = sizeof(unsigned long) * 2;
  40. siginfo_t info;
  41. int fault;
  42. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  43. (write ? FAULT_FLAG_WRITE : 0);
  44. #if 0
  45. printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
  46. current->comm, current->pid, field, address, write,
  47. field, regs->cp0_epc);
  48. #endif
  49. #ifdef CONFIG_KPROBES
  50. /*
  51. * This is to notify the fault handler of the kprobes. The
  52. * exception code is redundant as it is also carried in REGS,
  53. * but we pass it anyhow.
  54. */
  55. if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
  56. (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
  57. return;
  58. #endif
  59. info.si_code = SEGV_MAPERR;
  60. /*
  61. * We fault-in kernel-space virtual memory on-demand. The
  62. * 'reference' page table is init_mm.pgd.
  63. *
  64. * NOTE! We MUST NOT take any locks for this case. We may
  65. * be in an interrupt or a critical region, and should
  66. * only copy the information from the master page table,
  67. * nothing more.
  68. */
  69. #ifdef CONFIG_64BIT
  70. # define VMALLOC_FAULT_TARGET no_context
  71. #else
  72. # define VMALLOC_FAULT_TARGET vmalloc_fault
  73. #endif
  74. if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
  75. goto VMALLOC_FAULT_TARGET;
  76. #ifdef MODULE_START
  77. if (unlikely(address >= MODULE_START && address < MODULE_END))
  78. goto VMALLOC_FAULT_TARGET;
  79. #endif
  80. /*
  81. * If we're in an interrupt or have no user
  82. * context, we must not take the fault..
  83. */
  84. if (in_atomic() || !mm)
  85. goto bad_area_nosemaphore;
  86. retry:
  87. down_read(&mm->mmap_sem);
  88. vma = find_vma(mm, address);
  89. if (!vma)
  90. goto bad_area;
  91. if (vma->vm_start <= address)
  92. goto good_area;
  93. if (!(vma->vm_flags & VM_GROWSDOWN))
  94. goto bad_area;
  95. if (expand_stack(vma, address))
  96. goto bad_area;
  97. /*
  98. * Ok, we have a good vm_area for this memory access, so
  99. * we can handle it..
  100. */
  101. good_area:
  102. info.si_code = SEGV_ACCERR;
  103. if (write) {
  104. if (!(vma->vm_flags & VM_WRITE))
  105. goto bad_area;
  106. } else {
  107. if (kernel_uses_smartmips_rixi) {
  108. if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
  109. #if 0
  110. pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
  111. raw_smp_processor_id(),
  112. current->comm, current->pid,
  113. field, address, write,
  114. field, regs->cp0_epc);
  115. #endif
  116. goto bad_area;
  117. }
  118. if (!(vma->vm_flags & VM_READ)) {
  119. #if 0
  120. pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
  121. raw_smp_processor_id(),
  122. current->comm, current->pid,
  123. field, address, write,
  124. field, regs->cp0_epc);
  125. #endif
  126. goto bad_area;
  127. }
  128. } else {
  129. if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
  130. goto bad_area;
  131. }
  132. }
  133. /*
  134. * If for any reason at all we couldn't handle the fault,
  135. * make sure we exit gracefully rather than endlessly redo
  136. * the fault.
  137. */
  138. fault = handle_mm_fault(mm, vma, address, flags);
  139. if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  140. return;
  141. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  142. if (unlikely(fault & VM_FAULT_ERROR)) {
  143. if (fault & VM_FAULT_OOM)
  144. goto out_of_memory;
  145. else if (fault & VM_FAULT_SIGSEGV)
  146. goto bad_area;
  147. else if (fault & VM_FAULT_SIGBUS)
  148. goto do_sigbus;
  149. BUG();
  150. }
  151. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  152. if (fault & VM_FAULT_MAJOR) {
  153. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  154. regs, address);
  155. tsk->maj_flt++;
  156. } else {
  157. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  158. regs, address);
  159. tsk->min_flt++;
  160. }
  161. if (fault & VM_FAULT_RETRY) {
  162. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  163. /*
  164. * No need to up_read(&mm->mmap_sem) as we would
  165. * have already released it in __lock_page_or_retry
  166. * in mm/filemap.c.
  167. */
  168. goto retry;
  169. }
  170. }
  171. up_read(&mm->mmap_sem);
  172. return;
  173. /*
  174. * Something tried to access memory that isn't in our memory map..
  175. * Fix it, but check if it's kernel or user first..
  176. */
  177. bad_area:
  178. up_read(&mm->mmap_sem);
  179. bad_area_nosemaphore:
  180. /* User mode accesses just cause a SIGSEGV */
  181. if (user_mode(regs)) {
  182. tsk->thread.cp0_badvaddr = address;
  183. tsk->thread.error_code = write;
  184. #if 0
  185. printk("do_page_fault() #2: sending SIGSEGV to %s for "
  186. "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
  187. tsk->comm,
  188. write ? "write access to" : "read access from",
  189. field, address,
  190. field, (unsigned long) regs->cp0_epc,
  191. field, (unsigned long) regs->regs[31]);
  192. #endif
  193. info.si_signo = SIGSEGV;
  194. info.si_errno = 0;
  195. /* info.si_code has been set above */
  196. info.si_addr = (void __user *) address;
  197. force_sig_info(SIGSEGV, &info, tsk);
  198. return;
  199. }
  200. no_context:
  201. /* Are we prepared to handle this kernel fault? */
  202. if (fixup_exception(regs)) {
  203. current->thread.cp0_baduaddr = address;
  204. return;
  205. }
  206. /*
  207. * Oops. The kernel tried to access some bad page. We'll have to
  208. * terminate things with extreme prejudice.
  209. */
  210. bust_spinlocks(1);
  211. printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
  212. "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
  213. raw_smp_processor_id(), field, address, field, regs->cp0_epc,
  214. field, regs->regs[31]);
  215. die("Oops", regs);
  216. out_of_memory:
  217. /*
  218. * We ran out of memory, call the OOM killer, and return the userspace
  219. * (which will retry the fault, or kill us if we got oom-killed).
  220. */
  221. up_read(&mm->mmap_sem);
  222. pagefault_out_of_memory();
  223. return;
  224. do_sigbus:
  225. up_read(&mm->mmap_sem);
  226. /* Kernel mode? Handle exceptions or die */
  227. if (!user_mode(regs))
  228. goto no_context;
  229. else
  230. /*
  231. * Send a sigbus, regardless of whether we were in kernel
  232. * or user mode.
  233. */
  234. #if 0
  235. printk("do_page_fault() #3: sending SIGBUS to %s for "
  236. "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
  237. tsk->comm,
  238. write ? "write access to" : "read access from",
  239. field, address,
  240. field, (unsigned long) regs->cp0_epc,
  241. field, (unsigned long) regs->regs[31]);
  242. #endif
  243. tsk->thread.cp0_badvaddr = address;
  244. info.si_signo = SIGBUS;
  245. info.si_errno = 0;
  246. info.si_code = BUS_ADRERR;
  247. info.si_addr = (void __user *) address;
  248. force_sig_info(SIGBUS, &info, tsk);
  249. return;
  250. #ifndef CONFIG_64BIT
  251. vmalloc_fault:
  252. {
  253. /*
  254. * Synchronize this task's top level page-table
  255. * with the 'reference' page table.
  256. *
  257. * Do _not_ use "tsk" here. We might be inside
  258. * an interrupt in the middle of a task switch..
  259. */
  260. int offset = __pgd_offset(address);
  261. pgd_t *pgd, *pgd_k;
  262. pud_t *pud, *pud_k;
  263. pmd_t *pmd, *pmd_k;
  264. pte_t *pte_k;
  265. pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
  266. pgd_k = init_mm.pgd + offset;
  267. if (!pgd_present(*pgd_k))
  268. goto no_context;
  269. set_pgd(pgd, *pgd_k);
  270. pud = pud_offset(pgd, address);
  271. pud_k = pud_offset(pgd_k, address);
  272. if (!pud_present(*pud_k))
  273. goto no_context;
  274. pmd = pmd_offset(pud, address);
  275. pmd_k = pmd_offset(pud_k, address);
  276. if (!pmd_present(*pmd_k))
  277. goto no_context;
  278. set_pmd(pmd, *pmd_k);
  279. pte_k = pte_offset_kernel(pmd_k, address);
  280. if (!pte_present(*pte_k))
  281. goto no_context;
  282. return;
  283. }
  284. #endif
  285. }