fault.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * linux/arch/m32r/mm/fault.c
  3. *
  4. * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo
  5. * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka
  6. *
  7. * Some code taken from i386 version.
  8. * Copyright (C) 1995 Linus Torvalds
  9. */
  10. #include <linux/signal.h>
  11. #include <linux/sched.h>
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/mman.h>
  18. #include <linux/mm.h>
  19. #include <linux/smp.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/init.h>
  22. #include <linux/tty.h>
  23. #include <linux/vt_kern.h> /* For unblank_screen() */
  24. #include <linux/highmem.h>
  25. #include <linux/module.h>
  26. #include <asm/m32r.h>
  27. #include <asm/system.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/hardirq.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/tlbflush.h>
  32. extern void die(const char *, struct pt_regs *, long);
  33. #ifndef CONFIG_SMP
  34. asmlinkage unsigned int tlb_entry_i_dat;
  35. asmlinkage unsigned int tlb_entry_d_dat;
  36. #define tlb_entry_i tlb_entry_i_dat
  37. #define tlb_entry_d tlb_entry_d_dat
  38. #else
  39. unsigned int tlb_entry_i_dat[NR_CPUS];
  40. unsigned int tlb_entry_d_dat[NR_CPUS];
  41. #define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
  42. #define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
  43. #endif
  44. extern void init_tlb(void);
  45. /*======================================================================*
  46. * do_page_fault()
  47. *======================================================================*
  48. * This routine handles page faults. It determines the address,
  49. * and the problem, and then passes it off to one of the appropriate
  50. * routines.
  51. *
  52. * ARGUMENT:
  53. * regs : M32R SP reg.
  54. * error_code : See below
  55. * address : M32R MMU MDEVA reg. (Operand ACE)
  56. * : M32R BPC reg. (Instruction ACE)
  57. *
  58. * error_code :
  59. * bit 0 == 0 means no page found, 1 means protection fault
  60. * bit 1 == 0 means read, 1 means write
  61. * bit 2 == 0 means kernel, 1 means user-mode
  62. * bit 3 == 0 means data, 1 means instruction
  63. *======================================================================*/
  64. #define ACE_PROTECTION 1
  65. #define ACE_WRITE 2
  66. #define ACE_USERMODE 4
  67. #define ACE_INSTRUCTION 8
  68. asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
  69. unsigned long address)
  70. {
  71. struct task_struct *tsk;
  72. struct mm_struct *mm;
  73. struct vm_area_struct * vma;
  74. unsigned long page, addr;
  75. int write;
  76. int fault;
  77. siginfo_t info;
  78. /*
  79. * If BPSW IE bit enable --> set PSW IE bit
  80. */
  81. if (regs->psw & M32R_PSW_BIE)
  82. local_irq_enable();
  83. tsk = current;
  84. info.si_code = SEGV_MAPERR;
  85. /*
  86. * We fault-in kernel-space virtual memory on-demand. The
  87. * 'reference' page table is init_mm.pgd.
  88. *
  89. * NOTE! We MUST NOT take any locks for this case. We may
  90. * be in an interrupt or a critical region, and should
  91. * only copy the information from the master page table,
  92. * nothing more.
  93. *
  94. * This verifies that the fault happens in kernel space
  95. * (error_code & ACE_USERMODE) == 0, and that the fault was not a
  96. * protection error (error_code & ACE_PROTECTION) == 0.
  97. */
  98. if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
  99. goto vmalloc_fault;
  100. mm = tsk->mm;
  101. /*
  102. * If we're in an interrupt or have no user context or are running in an
  103. * atomic region then we must not take the fault..
  104. */
  105. if (in_atomic() || !mm)
  106. goto bad_area_nosemaphore;
  107. /* When running in the kernel we expect faults to occur only to
  108. * addresses in user space. All other faults represent errors in the
  109. * kernel and should generate an OOPS. Unfortunately, in the case of an
  110. * erroneous fault occurring in a code path which already holds mmap_sem
  111. * we will deadlock attempting to validate the fault against the
  112. * address space. Luckily the kernel only validly references user
  113. * space from well defined areas of code, which are listed in the
  114. * exceptions table.
  115. *
  116. * As the vast majority of faults will be valid we will only perform
  117. * the source reference check when there is a possibility of a deadlock.
  118. * Attempt to lock the address space, if we cannot we then validate the
  119. * source. If this is invalid we can skip the address space check,
  120. * thus avoiding the deadlock.
  121. */
  122. if (!down_read_trylock(&mm->mmap_sem)) {
  123. if ((error_code & ACE_USERMODE) == 0 &&
  124. !search_exception_tables(regs->psw))
  125. goto bad_area_nosemaphore;
  126. down_read(&mm->mmap_sem);
  127. }
  128. vma = find_vma(mm, address);
  129. if (!vma)
  130. goto bad_area;
  131. if (vma->vm_start <= address)
  132. goto good_area;
  133. if (!(vma->vm_flags & VM_GROWSDOWN))
  134. goto bad_area;
  135. if (error_code & ACE_USERMODE) {
  136. /*
  137. * accessing the stack below "spu" is always a bug.
  138. * The "+ 4" is there due to the push instruction
  139. * doing pre-decrement on the stack and that
  140. * doesn't show up until later..
  141. */
  142. if (address + 4 < regs->spu)
  143. goto bad_area;
  144. }
  145. if (expand_stack(vma, address))
  146. goto bad_area;
  147. /*
  148. * Ok, we have a good vm_area for this memory access, so
  149. * we can handle it..
  150. */
  151. good_area:
  152. info.si_code = SEGV_ACCERR;
  153. write = 0;
  154. switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
  155. default: /* 3: write, present */
  156. /* fall through */
  157. case ACE_WRITE: /* write, not present */
  158. if (!(vma->vm_flags & VM_WRITE))
  159. goto bad_area;
  160. write++;
  161. break;
  162. case ACE_PROTECTION: /* read, present */
  163. case 0: /* read, not present */
  164. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  165. goto bad_area;
  166. }
  167. /*
  168. * For instruction access exception, check if the area is executable
  169. */
  170. if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
  171. goto bad_area;
  172. /*
  173. * If for any reason at all we couldn't handle the fault,
  174. * make sure we exit gracefully rather than endlessly redo
  175. * the fault.
  176. */
  177. addr = (address & PAGE_MASK);
  178. set_thread_fault_code(error_code);
  179. fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
  180. if (unlikely(fault & VM_FAULT_ERROR)) {
  181. if (fault & VM_FAULT_OOM)
  182. goto out_of_memory;
  183. else if (fault & VM_FAULT_SIGBUS)
  184. goto do_sigbus;
  185. BUG();
  186. }
  187. if (fault & VM_FAULT_MAJOR)
  188. tsk->maj_flt++;
  189. else
  190. tsk->min_flt++;
  191. set_thread_fault_code(0);
  192. up_read(&mm->mmap_sem);
  193. return;
  194. /*
  195. * Something tried to access memory that isn't in our memory map..
  196. * Fix it, but check if it's kernel or user first..
  197. */
  198. bad_area:
  199. up_read(&mm->mmap_sem);
  200. bad_area_nosemaphore:
  201. /* User mode accesses just cause a SIGSEGV */
  202. if (error_code & ACE_USERMODE) {
  203. tsk->thread.address = address;
  204. tsk->thread.error_code = error_code | (address >= TASK_SIZE);
  205. tsk->thread.trap_no = 14;
  206. info.si_signo = SIGSEGV;
  207. info.si_errno = 0;
  208. /* info.si_code has been set above */
  209. info.si_addr = (void __user *)address;
  210. force_sig_info(SIGSEGV, &info, tsk);
  211. return;
  212. }
  213. no_context:
  214. /* Are we prepared to handle this kernel fault? */
  215. if (fixup_exception(regs))
  216. return;
  217. /*
  218. * Oops. The kernel tried to access some bad page. We'll have to
  219. * terminate things with extreme prejudice.
  220. */
  221. bust_spinlocks(1);
  222. if (address < PAGE_SIZE)
  223. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
  224. else
  225. printk(KERN_ALERT "Unable to handle kernel paging request");
  226. printk(" at virtual address %08lx\n",address);
  227. printk(KERN_ALERT " printing bpc:\n");
  228. printk("%08lx\n", regs->bpc);
  229. page = *(unsigned long *)MPTB;
  230. page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
  231. printk(KERN_ALERT "*pde = %08lx\n", page);
  232. if (page & _PAGE_PRESENT) {
  233. page &= PAGE_MASK;
  234. address &= 0x003ff000;
  235. page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
  236. printk(KERN_ALERT "*pte = %08lx\n", page);
  237. }
  238. die("Oops", regs, error_code);
  239. bust_spinlocks(0);
  240. do_exit(SIGKILL);
  241. /*
  242. * We ran out of memory, or some other thing happened to us that made
  243. * us unable to handle the page fault gracefully.
  244. */
  245. out_of_memory:
  246. up_read(&mm->mmap_sem);
  247. if (!(error_code & ACE_USERMODE))
  248. goto no_context;
  249. pagefault_out_of_memory();
  250. return;
  251. do_sigbus:
  252. up_read(&mm->mmap_sem);
  253. /* Kernel mode? Handle exception or die */
  254. if (!(error_code & ACE_USERMODE))
  255. goto no_context;
  256. tsk->thread.address = address;
  257. tsk->thread.error_code = error_code;
  258. tsk->thread.trap_no = 14;
  259. info.si_signo = SIGBUS;
  260. info.si_errno = 0;
  261. info.si_code = BUS_ADRERR;
  262. info.si_addr = (void __user *)address;
  263. force_sig_info(SIGBUS, &info, tsk);
  264. return;
  265. vmalloc_fault:
  266. {
  267. /*
  268. * Synchronize this task's top level page-table
  269. * with the 'reference' page table.
  270. *
  271. * Do _not_ use "tsk" here. We might be inside
  272. * an interrupt in the middle of a task switch..
  273. */
  274. int offset = pgd_index(address);
  275. pgd_t *pgd, *pgd_k;
  276. pmd_t *pmd, *pmd_k;
  277. pte_t *pte_k;
  278. pgd = (pgd_t *)*(unsigned long *)MPTB;
  279. pgd = offset + (pgd_t *)pgd;
  280. pgd_k = init_mm.pgd + offset;
  281. if (!pgd_present(*pgd_k))
  282. goto no_context;
  283. /*
  284. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  285. * and redundant with the set_pmd() on non-PAE.
  286. */
  287. pmd = pmd_offset(pgd, address);
  288. pmd_k = pmd_offset(pgd_k, address);
  289. if (!pmd_present(*pmd_k))
  290. goto no_context;
  291. set_pmd(pmd, *pmd_k);
  292. pte_k = pte_offset_kernel(pmd_k, address);
  293. if (!pte_present(*pte_k))
  294. goto no_context;
  295. addr = (address & PAGE_MASK);
  296. set_thread_fault_code(error_code);
  297. update_mmu_cache(NULL, addr, pte_k);
  298. set_thread_fault_code(0);
  299. return;
  300. }
  301. }
  302. /*======================================================================*
  303. * update_mmu_cache()
  304. *======================================================================*/
  305. #define TLB_MASK (NR_TLB_ENTRIES - 1)
  306. #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
  307. #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
  308. void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
  309. pte_t *ptep)
  310. {
  311. volatile unsigned long *entry1, *entry2;
  312. unsigned long pte_data, flags;
  313. unsigned int *entry_dat;
  314. int inst = get_thread_fault_code() & ACE_INSTRUCTION;
  315. int i;
  316. /* Ptrace may call this routine. */
  317. if (vma && current->active_mm != vma->vm_mm)
  318. return;
  319. local_irq_save(flags);
  320. vaddr = (vaddr & PAGE_MASK) | get_asid();
  321. pte_data = pte_val(*ptep);
  322. #ifdef CONFIG_CHIP_OPSP
  323. entry1 = (unsigned long *)ITLB_BASE;
  324. for (i = 0; i < NR_TLB_ENTRIES; i++) {
  325. if (*entry1++ == vaddr) {
  326. set_tlb_data(entry1, pte_data);
  327. break;
  328. }
  329. entry1++;
  330. }
  331. entry2 = (unsigned long *)DTLB_BASE;
  332. for (i = 0; i < NR_TLB_ENTRIES; i++) {
  333. if (*entry2++ == vaddr) {
  334. set_tlb_data(entry2, pte_data);
  335. break;
  336. }
  337. entry2++;
  338. }
  339. #else
  340. /*
  341. * Update TLB entries
  342. * entry1: ITLB entry address
  343. * entry2: DTLB entry address
  344. */
  345. __asm__ __volatile__ (
  346. "seth %0, #high(%4) \n\t"
  347. "st %2, @(%5, %0) \n\t"
  348. "ldi %1, #1 \n\t"
  349. "st %1, @(%6, %0) \n\t"
  350. "add3 r4, %0, %7 \n\t"
  351. ".fillinsn \n"
  352. "1: \n\t"
  353. "ld %1, @(%6, %0) \n\t"
  354. "bnez %1, 1b \n\t"
  355. "ld %0, @r4+ \n\t"
  356. "ld %1, @r4 \n\t"
  357. "st %3, @+%0 \n\t"
  358. "st %3, @+%1 \n\t"
  359. : "=&r" (entry1), "=&r" (entry2)
  360. : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
  361. "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
  362. : "r4", "memory"
  363. );
  364. #endif
  365. if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
  366. goto notfound;
  367. found:
  368. local_irq_restore(flags);
  369. return;
  370. /* Valid entry not found */
  371. notfound:
  372. /*
  373. * Update ITLB or DTLB entry
  374. * entry1: TLB entry address
  375. * entry2: TLB base address
  376. */
  377. if (!inst) {
  378. entry2 = (unsigned long *)DTLB_BASE;
  379. entry_dat = &tlb_entry_d;
  380. } else {
  381. entry2 = (unsigned long *)ITLB_BASE;
  382. entry_dat = &tlb_entry_i;
  383. }
  384. entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
  385. for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
  386. if (!(entry1[1] & 2)) /* Valid bit check */
  387. break;
  388. if (entry1 != entry2)
  389. entry1 -= 2;
  390. else
  391. entry1 += TLB_MASK << 1;
  392. }
  393. if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */
  394. entry1 = entry2 + (*entry_dat << 1);
  395. *entry_dat = (*entry_dat + 1) & TLB_MASK;
  396. }
  397. *entry1++ = vaddr; /* Set TLB tag */
  398. set_tlb_data(entry1, pte_data);
  399. goto found;
  400. }
  401. /*======================================================================*
  402. * flush_tlb_page() : flushes one page
  403. *======================================================================*/
  404. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  405. {
  406. if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
  407. unsigned long flags;
  408. local_irq_save(flags);
  409. page &= PAGE_MASK;
  410. page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
  411. __flush_tlb_page(page);
  412. local_irq_restore(flags);
  413. }
  414. }
  415. /*======================================================================*
  416. * flush_tlb_range() : flushes a range of pages
  417. *======================================================================*/
  418. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  419. unsigned long end)
  420. {
  421. struct mm_struct *mm;
  422. mm = vma->vm_mm;
  423. if (mm_context(mm) != NO_CONTEXT) {
  424. unsigned long flags;
  425. int size;
  426. local_irq_save(flags);
  427. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  428. if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
  429. mm_context(mm) = NO_CONTEXT;
  430. if (mm == current->mm)
  431. activate_context(mm);
  432. } else {
  433. unsigned long asid;
  434. asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
  435. start &= PAGE_MASK;
  436. end += (PAGE_SIZE - 1);
  437. end &= PAGE_MASK;
  438. start |= asid;
  439. end |= asid;
  440. while (start < end) {
  441. __flush_tlb_page(start);
  442. start += PAGE_SIZE;
  443. }
  444. }
  445. local_irq_restore(flags);
  446. }
  447. }
  448. /*======================================================================*
  449. * flush_tlb_mm() : flushes the specified mm context TLB's
  450. *======================================================================*/
  451. void local_flush_tlb_mm(struct mm_struct *mm)
  452. {
  453. /* Invalidate all TLB of this process. */
  454. /* Instead of invalidating each TLB, we get new MMU context. */
  455. if (mm_context(mm) != NO_CONTEXT) {
  456. unsigned long flags;
  457. local_irq_save(flags);
  458. mm_context(mm) = NO_CONTEXT;
  459. if (mm == current->mm)
  460. activate_context(mm);
  461. local_irq_restore(flags);
  462. }
  463. }
  464. /*======================================================================*
  465. * flush_tlb_all() : flushes all processes TLBs
  466. *======================================================================*/
  467. void local_flush_tlb_all(void)
  468. {
  469. unsigned long flags;
  470. local_irq_save(flags);
  471. __flush_tlb_all();
  472. local_irq_restore(flags);
  473. }
  474. /*======================================================================*
  475. * init_mmu()
  476. *======================================================================*/
  477. void __init init_mmu(void)
  478. {
  479. tlb_entry_i = 0;
  480. tlb_entry_d = 0;
  481. mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
  482. set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
  483. *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
  484. }