fault.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. /*
  2. * linux/arch/arm/mm/fault.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Modifications for ARM processor (c) 1995-2004 Russell King
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/signal.h>
  13. #include <linux/mm.h>
  14. #include <linux/hardirq.h>
  15. #include <linux/init.h>
  16. #include <linux/kprobes.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/page-flags.h>
  19. #include <linux/sched.h>
  20. #include <linux/highmem.h>
  21. #include <linux/perf_event.h>
  22. #ifdef CONFIG_TIMA_RKP
  23. #include <linux/io.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock_types.h>
  26. #endif
  27. #include <asm/exception.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/system_misc.h>
  30. #include <asm/system_info.h>
  31. #include <asm/tlbflush.h>
  32. #include <asm/cputype.h>
  33. #if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
  34. #include <asm/io.h>
  35. #include <mach/msm_iomap.h>
  36. #endif
  37. #include "fault.h"
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/exception.h>
  40. #ifdef CONFIG_MMU
  41. #ifdef CONFIG_KPROBES
  42. static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
  43. {
  44. int ret = 0;
  45. if (!user_mode(regs)) {
  46. /* kprobe_running() needs smp_processor_id() */
  47. preempt_disable();
  48. if (kprobe_running() && kprobe_fault_handler(regs, fsr))
  49. ret = 1;
  50. preempt_enable();
  51. }
  52. return ret;
  53. }
  54. #else
  55. static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
  56. {
  57. return 0;
  58. }
  59. #endif
  60. /*
  61. * This is useful to dump out the page tables associated with
  62. * 'addr' in mm 'mm'.
  63. */
  64. void show_pte(struct mm_struct *mm, unsigned long addr)
  65. {
  66. pgd_t *pgd;
  67. if (!mm)
  68. mm = &init_mm;
  69. printk(KERN_ALERT "pgd = %p\n", mm->pgd);
  70. pgd = pgd_offset(mm, addr);
  71. printk(KERN_ALERT "[%08lx] *pgd=%08llx",
  72. addr, (long long)pgd_val(*pgd));
  73. do {
  74. pud_t *pud;
  75. pmd_t *pmd;
  76. pte_t *pte;
  77. if (pgd_none(*pgd))
  78. break;
  79. if (pgd_bad(*pgd)) {
  80. printk("(bad)");
  81. break;
  82. }
  83. pud = pud_offset(pgd, addr);
  84. if (PTRS_PER_PUD != 1)
  85. printk(", *pud=%08llx", (long long)pud_val(*pud));
  86. if (pud_none(*pud))
  87. break;
  88. if (pud_bad(*pud)) {
  89. printk("(bad)");
  90. break;
  91. }
  92. pmd = pmd_offset(pud, addr);
  93. if (PTRS_PER_PMD != 1)
  94. printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
  95. if (pmd_none(*pmd))
  96. break;
  97. if (pmd_bad(*pmd)) {
  98. printk("(bad)");
  99. break;
  100. }
  101. /* We must not map this if we have highmem enabled */
  102. if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
  103. break;
  104. pte = pte_offset_map(pmd, addr);
  105. #ifndef CONFIG_TIMA_RKP
  106. printk(", *pte=%08llx", (long long)pte_val(*pte));
  107. #ifndef CONFIG_ARM_LPAE
  108. printk(", *ppte=%08llx",
  109. (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
  110. #endif
  111. #endif
  112. pte_unmap(pte);
  113. } while(0);
  114. printk("\n");
  115. }
  116. #else /* CONFIG_MMU */
  117. void show_pte(struct mm_struct *mm, unsigned long addr)
  118. { }
  119. #endif /* CONFIG_MMU */
  120. #ifdef CONFIG_TIMA_RKP
  121. #if 0
  122. inline void tima_dump_log2()
  123. {
  124. char *tima_log = (char *)0xde100000, *ptr, *ptr1;
  125. int line_ctr=0;
  126. return; /* WARNING: THIS FUNCTION HAS BEEN DISABLED */
  127. /* After the move to the new memory address, there is no virtual address for the log
  128. * This function is disabled pending the availability of the same
  129. */
  130. ptr = tima_log;
  131. ptr1 = ptr;
  132. while(line_ctr<100) {
  133. line_ctr++;
  134. while(*ptr1 != '\n')
  135. ptr1++;
  136. *ptr1 = '\0';
  137. printk(KERN_EMERG"%s\n", ptr);
  138. *ptr1 = '\n';
  139. ptr1++;
  140. if(*ptr1 == '\0')
  141. break;
  142. ptr = ptr1;
  143. }
  144. }
  145. #endif
  146. inline void tima_verify_state(unsigned long pmdp, unsigned long val, unsigned long rd_only, unsigned long caller)
  147. {
  148. unsigned long pmdp_addr = (unsigned long)pmdp;
  149. unsigned long init_pgd, pgd_val;
  150. unsigned long init_pte;
  151. unsigned long pte_val;
  152. unsigned long npte_val;
  153. static unsigned long call_count = 0;
  154. return; /* WARNING: THIS FUNCTION HAS BEEN DISABLED BECAUSE SECT_TO_PGT CAN NO LONGER BE ACCESSED
  155. VIA VIRTUAL MEMORY MAPPING */
  156. if ((pmdp>>24)==0xc0) return;
  157. call_count++;
  158. init_pgd = (unsigned long)init_mm.pgd;
  159. init_pgd += (pmdp_addr >> 20) << 2;
  160. pgd_val = (unsigned long)*(unsigned long *)init_pgd;
  161. if ((pgd_val & 0x3) != 0x1) {
  162. printk(KERN_ERR"TIMA: Entry is not L2 page. VA:%lx, PGD=%lx\n", pmdp, pgd_val);
  163. return;
  164. }
  165. init_pte = (unsigned long)__va(pgd_val & (~0x3ff));
  166. init_pte += ((pmdp_addr >> 12) & 0xff) << 2;
  167. pte_val = *(unsigned long *)init_pte;
  168. invalidate_caches(init_pte, 4, __pa(init_pte));
  169. npte_val = *(unsigned long *)init_pte;
  170. if (rd_only) {
  171. if ((pte_val & 0x230) != 0x210) { /* Page is RO */
  172. printk(KERN_ERR"Page is NOT RO, CALLER=%lx VA=%lx, PTE=%lx FLUSHED PTE=%lx PA=%lx\n", caller, pmdp_addr, pte_val, npte_val, __pa(pmdp_addr));
  173. //tima_send_cmd(pmdp_addr, 0x0e);
  174. //tima_dump_log2();
  175. }
  176. } else {
  177. if ((pte_val & 0x230) != 0x010) { /* Page is RW */
  178. printk(KERN_ERR"Page is NOT RW, CALLER=%lx VA=%lx, PTE=%lx FLUSHED PTE=%lx PA=%lx\n", caller, pmdp_addr, pte_val, npte_val, __pa(pmdp_addr));
  179. //tima_send_cmd(pmdp_addr, 0x0e);
  180. //tima_dump_log2();
  181. }
  182. }
  183. }
  184. /*
  185. * Check if a certain va is made read-only by tima
  186. * return: -1 error, 0 writable, 1 readonly
  187. */
  188. extern unsigned long tima_switch_count;
  189. #ifdef CONFIG_TIMA_RKP_30
  190. extern unsigned long pgt_bit_array[];
  191. int tima_is_pg_protected(unsigned long va)
  192. {
  193. unsigned long paddr = __pa(va);
  194. unsigned long index = paddr >> PAGE_SHIFT;
  195. unsigned long *p = (unsigned long *)pgt_bit_array;
  196. unsigned long tmp = index>>5;
  197. unsigned long rindex;
  198. unsigned long val;
  199. p += (tmp);
  200. #ifndef CONFIG_TIMA_RKP_COHERENT_TT
  201. asm volatile("mcr p15, 0, %0, c7, c6, 1\n"
  202. "dsb\n"
  203. "isb\n"
  204. : : "r" (p));
  205. #endif
  206. rindex = index % 32;
  207. val = (*p) & (1 << rindex)?1:0;
  208. return val;
  209. }
  210. #else
  211. static DEFINE_RAW_SPINLOCK(par_lock);
  212. int tima_is_pg_protected(unsigned long va)
  213. {
  214. unsigned long par;
  215. unsigned long flags;
  216. /* Translate the page use writable priv.
  217. Failing means a read-only page
  218. (tranlation was confirmed by previous step)*/
  219. raw_spin_lock_irqsave(&par_lock, flags);
  220. __asm__ __volatile__ (
  221. "mcr p15, 0, %1, c7, c8, 1\n"
  222. "dsb\n"
  223. "isb\n"
  224. "mrc p15, 0, %0, c7, c4, 0\n"
  225. :"=r"(par):"r"(va));
  226. raw_spin_unlock_irqrestore(&par_lock, flags);
  227. if (par & 0x1) {
  228. return 1;
  229. }
  230. return 0;
  231. }
  232. #endif /* CONFIG_TIMA_RKP_30 */
  233. EXPORT_SYMBOL(tima_is_pg_protected);
  234. #endif
  235. #ifdef CONFIG_TIMA_RKP
  236. #if defined(CONFIG_TIMA_RKP_30) || defined(CONFIG_ARCH_MSM8974)
  237. #define INS_STR_R1 0xe5801000
  238. #define INS_STR_R3 0xe5a03800
  239. extern void* cpu_v7_set_pte_ext_proc_end;
  240. static unsigned int rkp_fixup(unsigned long addr, struct pt_regs *regs) {
  241. unsigned long inst = *((unsigned long*) regs->ARM_pc);
  242. unsigned long reg_val = 0;
  243. unsigned long emulate = 0;
  244. if (regs->ARM_pc < (long) cpu_v7_set_pte_ext
  245. || regs->ARM_pc > (long) &cpu_v7_set_pte_ext_proc_end) {
  246. printk(KERN_ERR
  247. "RKP -> Inst %lx out of cpu_v7_set_pte_ext range from %lx to %lx\n",
  248. (unsigned long) regs->ARM_pc, (long) cpu_v7_set_pte_ext,
  249. (long) &cpu_v7_set_pte_ext_proc_end);
  250. return false;
  251. }
  252. if (inst == INS_STR_R1)
  253. {
  254. reg_val = regs->ARM_r1;
  255. emulate = 1;
  256. }
  257. else if (inst == INS_STR_R3)
  258. {
  259. reg_val = regs->ARM_r3;
  260. emulate = 1;
  261. }
  262. if (emulate) {
  263. printk(KERN_ERR"Emulating RKP instruction %lx at %p\n",
  264. inst, (unsigned long*) regs->ARM_pc);
  265. #ifndef CONFIG_TIMA_RKP_COHERENT_TT
  266. asm volatile("mcr p15, 0, %0, c7, c14, 1\n"
  267. "dsb\n"
  268. "isb\n"
  269. : : "r" (addr));
  270. #endif
  271. tima_send_cmd2(__pa(addr), reg_val, 0x08);
  272. #ifndef CONFIG_TIMA_RKP_COHERENT_TT
  273. asm volatile("mcr p15, 0, %0, c7, c6, 1\n"
  274. "dsb\n"
  275. "isb\n"
  276. : : "r" (addr));
  277. #endif
  278. regs->ARM_pc += 4;
  279. return true;
  280. }
  281. printk(KERN_ERR"CANNOT Emulate RKP instruction %lx at %p\n",
  282. inst, (unsigned long*) regs->ARM_pc);
  283. return false;
  284. }
  285. #endif
  286. #endif
  287. /*
  288. * Oops. The kernel tried to access some page that wasn't present.
  289. */
  290. static void
  291. __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
  292. struct pt_regs *regs)
  293. {
  294. /*
  295. * Are we prepared to handle this kernel fault?
  296. */
  297. if (fixup_exception(regs))
  298. return;
  299. #ifdef CONFIG_TIMA_RKP
  300. #if defined(CONFIG_TIMA_RKP_30) || defined(CONFIG_ARCH_MSM8974)
  301. if (addr >= 0xc0000000 && (fsr & FSR_WRITE)) {
  302. if (rkp_fixup(addr, regs)) {
  303. return;
  304. }
  305. }
  306. #else
  307. printk(KERN_ERR"TIMA:====> %lx, [%lx]\n", addr, tima_switch_count);
  308. if (addr >= 0xc0000000 && (fsr & FSR_WRITE)) {
  309. printk(KERN_ERR"TIMA:==> Handling fault for %lx\n", addr);
  310. tima_send_cmd(addr, 0x21);
  311. __asm__ ("mcr p15, 0, %0, c8, c3, 0\n"
  312. "isb"
  313. ::"r"(0));
  314. if (tima_is_pg_protected(addr) == 1) {
  315. /* Is the page still read-only even after we free it */
  316. printk(KERN_ERR"TIMA ==> Err freeing page %lx\n", addr);
  317. } else {
  318. return;
  319. }
  320. }
  321. #endif
  322. #endif
  323. /*
  324. * No handler, we'll have to terminate things with extreme prejudice.
  325. */
  326. bust_spinlocks(1);
  327. printk(KERN_ALERT
  328. "Unable to handle kernel %s at virtual address %08lx\n",
  329. (addr < PAGE_SIZE) ? "NULL pointer dereference" :
  330. "paging request", addr);
  331. show_pte(mm, addr);
  332. #ifdef CONFIG_TIMA_RKP
  333. if (tima_is_pg_protected(addr) == 1) {
  334. printk(KERN_ERR"RKP ==> Address %lx is RO by RKP\n", addr);
  335. }
  336. tima_send_cmd(addr, 0x0e);
  337. #endif
  338. die("Oops", regs, fsr);
  339. bust_spinlocks(0);
  340. do_exit(SIGKILL);
  341. }
  342. /*
  343. * Something tried to access memory that isn't in our memory map..
  344. * User mode accesses just cause a SIGSEGV
  345. */
  346. static void
  347. __do_user_fault(struct task_struct *tsk, unsigned long addr,
  348. unsigned int fsr, unsigned int sig, int code,
  349. struct pt_regs *regs)
  350. {
  351. struct siginfo si;
  352. trace_user_fault(tsk, addr, fsr);
  353. #ifdef CONFIG_DEBUG_USER
  354. if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
  355. ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
  356. printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
  357. tsk->comm, sig, addr, fsr);
  358. show_pte(tsk->mm, addr);
  359. show_regs(regs);
  360. }
  361. #endif
  362. tsk->thread.address = addr;
  363. tsk->thread.error_code = fsr;
  364. tsk->thread.trap_no = 14;
  365. si.si_signo = sig;
  366. si.si_errno = 0;
  367. si.si_code = code;
  368. si.si_addr = (void __user *)addr;
  369. force_sig_info(sig, &si, tsk);
  370. }
  371. void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  372. {
  373. struct task_struct *tsk = current;
  374. struct mm_struct *mm = tsk->active_mm;
  375. /*
  376. * If we are in kernel mode at this point, we
  377. * have no context to handle this fault with.
  378. */
  379. if (user_mode(regs))
  380. __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
  381. else
  382. __do_kernel_fault(mm, addr, fsr, regs);
  383. }
  384. #ifdef CONFIG_MMU
  385. #define VM_FAULT_BADMAP 0x010000
  386. #define VM_FAULT_BADACCESS 0x020000
  387. /*
  388. * Check that the permissions on the VMA allow for the fault which occurred.
  389. * If we encountered a write fault, we must have write permission, otherwise
  390. * we allow any permission.
  391. */
  392. static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
  393. {
  394. unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
  395. if (fsr & FSR_WRITE)
  396. mask = VM_WRITE;
  397. if (fsr & FSR_LNX_PF)
  398. mask = VM_EXEC;
  399. return vma->vm_flags & mask ? false : true;
  400. }
  401. static int __kprobes
  402. __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
  403. unsigned int flags, struct task_struct *tsk)
  404. {
  405. struct vm_area_struct *vma;
  406. int fault;
  407. vma = find_vma(mm, addr);
  408. fault = VM_FAULT_BADMAP;
  409. if (unlikely(!vma))
  410. goto out;
  411. if (unlikely(vma->vm_start > addr))
  412. goto check_stack;
  413. /*
  414. * Ok, we have a good vm_area for this
  415. * memory access, so we can handle it.
  416. */
  417. good_area:
  418. if (access_error(fsr, vma)) {
  419. fault = VM_FAULT_BADACCESS;
  420. goto out;
  421. }
  422. return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
  423. check_stack:
  424. /* Don't allow expansion below FIRST_USER_ADDRESS */
  425. if (vma->vm_flags & VM_GROWSDOWN &&
  426. addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
  427. goto good_area;
  428. out:
  429. return fault;
  430. }
  431. static int __kprobes
  432. do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  433. {
  434. struct task_struct *tsk;
  435. struct mm_struct *mm;
  436. int fault, sig, code;
  437. int write = fsr & FSR_WRITE;
  438. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  439. (write ? FAULT_FLAG_WRITE : 0);
  440. if (notify_page_fault(regs, fsr))
  441. return 0;
  442. tsk = current;
  443. mm = tsk->mm;
  444. /* Enable interrupts if they were enabled in the parent context. */
  445. if (interrupts_enabled(regs))
  446. local_irq_enable();
  447. /*
  448. * If we're in an interrupt, or have no irqs, or have no user
  449. * context, we must not take the fault..
  450. */
  451. if (in_atomic() || irqs_disabled() || !mm)
  452. goto no_context;
  453. /*
  454. * As per x86, we may deadlock here. However, since the kernel only
  455. * validly references user space from well defined areas of the code,
  456. * we can bug out early if this is from code which shouldn't.
  457. */
  458. if (!down_read_trylock(&mm->mmap_sem)) {
  459. if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
  460. goto no_context;
  461. retry:
  462. down_read(&mm->mmap_sem);
  463. } else {
  464. /*
  465. * The above down_read_trylock() might have succeeded in
  466. * which case, we'll have missed the might_sleep() from
  467. * down_read()
  468. */
  469. might_sleep();
  470. #ifdef CONFIG_DEBUG_VM
  471. if (!user_mode(regs) &&
  472. !search_exception_tables(regs->ARM_pc))
  473. goto no_context;
  474. #endif
  475. }
  476. fault = __do_page_fault(mm, addr, fsr, flags, tsk);
  477. /* If we need to retry but a fatal signal is pending, handle the
  478. * signal first. We do not need to release the mmap_sem because
  479. * it would already be released in __lock_page_or_retry in
  480. * mm/filemap.c. */
  481. if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  482. return 0;
  483. /*
  484. * Major/minor page fault accounting is only done on the
  485. * initial attempt. If we go through a retry, it is extremely
  486. * likely that the page will be found in page cache at that point.
  487. */
  488. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
  489. if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
  490. if (fault & VM_FAULT_MAJOR) {
  491. tsk->maj_flt++;
  492. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  493. regs, addr);
  494. } else {
  495. tsk->min_flt++;
  496. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  497. regs, addr);
  498. }
  499. if (fault & VM_FAULT_RETRY) {
  500. /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
  501. * of starvation. */
  502. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  503. goto retry;
  504. }
  505. }
  506. up_read(&mm->mmap_sem);
  507. /*
  508. * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
  509. */
  510. if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
  511. return 0;
  512. if (fault & VM_FAULT_OOM) {
  513. /*
  514. * We ran out of memory, call the OOM killer, and return to
  515. * userspace (which will retry the fault, or kill us if we
  516. * got oom-killed)
  517. */
  518. pagefault_out_of_memory();
  519. return 0;
  520. }
  521. /*
  522. * If we are in kernel mode at this point, we
  523. * have no context to handle this fault with.
  524. */
  525. if (!user_mode(regs))
  526. goto no_context;
  527. if (fault & VM_FAULT_SIGBUS) {
  528. /*
  529. * We had some memory, but were unable to
  530. * successfully fix up this page fault.
  531. */
  532. sig = SIGBUS;
  533. code = BUS_ADRERR;
  534. } else {
  535. /*
  536. * Something tried to access memory that
  537. * isn't in our memory map..
  538. */
  539. sig = SIGSEGV;
  540. code = fault == VM_FAULT_BADACCESS ?
  541. SEGV_ACCERR : SEGV_MAPERR;
  542. }
  543. __do_user_fault(tsk, addr, fsr, sig, code, regs);
  544. return 0;
  545. no_context:
  546. __do_kernel_fault(mm, addr, fsr, regs);
  547. return 0;
  548. }
  549. #else /* CONFIG_MMU */
  550. static int
  551. do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  552. {
  553. return 0;
  554. }
  555. #endif /* CONFIG_MMU */
  556. /*
  557. * First Level Translation Fault Handler
  558. *
  559. * We enter here because the first level page table doesn't contain
  560. * a valid entry for the address.
  561. *
  562. * If the address is in kernel space (>= TASK_SIZE), then we are
  563. * probably faulting in the vmalloc() area.
  564. *
  565. * If the init_task's first level page tables contains the relevant
  566. * entry, we copy the it to this task. If not, we send the process
  567. * a signal, fixup the exception, or oops the kernel.
  568. *
  569. * NOTE! We MUST NOT take any locks for this case. We may be in an
  570. * interrupt or a critical region, and should only copy the information
  571. * from the master page table, nothing more.
  572. */
  573. #ifdef CONFIG_MMU
  574. static int __kprobes
  575. do_translation_fault(unsigned long addr, unsigned int fsr,
  576. struct pt_regs *regs)
  577. {
  578. unsigned int index;
  579. pgd_t *pgd, *pgd_k;
  580. pud_t *pud, *pud_k;
  581. pmd_t *pmd, *pmd_k;
  582. if (addr < TASK_SIZE)
  583. return do_page_fault(addr, fsr, regs);
  584. if (user_mode(regs))
  585. goto bad_area;
  586. index = pgd_index(addr);
  587. /*
  588. * FIXME: CP15 C1 is write only on ARMv3 architectures.
  589. */
  590. pgd = cpu_get_pgd() + index;
  591. pgd_k = init_mm.pgd + index;
  592. if (pgd_none(*pgd_k))
  593. goto bad_area;
  594. if (!pgd_present(*pgd))
  595. set_pgd(pgd, *pgd_k);
  596. pud = pud_offset(pgd, addr);
  597. pud_k = pud_offset(pgd_k, addr);
  598. if (pud_none(*pud_k))
  599. goto bad_area;
  600. if (!pud_present(*pud))
  601. set_pud(pud, *pud_k);
  602. pmd = pmd_offset(pud, addr);
  603. pmd_k = pmd_offset(pud_k, addr);
  604. #ifdef CONFIG_ARM_LPAE
  605. /*
  606. * Only one hardware entry per PMD with LPAE.
  607. */
  608. index = 0;
  609. #else
  610. /*
  611. * On ARM one Linux PGD entry contains two hardware entries (see page
  612. * tables layout in pgtable.h). We normally guarantee that we always
  613. * fill both L1 entries. But create_mapping() doesn't follow the rule.
  614. * It can create inidividual L1 entries, so here we have to call
  615. * pmd_none() check for the entry really corresponded to address, not
  616. * for the first of pair.
  617. */
  618. index = (addr >> SECTION_SHIFT) & 1;
  619. #endif
  620. if (pmd_none(pmd_k[index]))
  621. goto bad_area;
  622. copy_pmd(pmd, pmd_k);
  623. return 0;
  624. bad_area:
  625. do_bad_area(addr, fsr, regs);
  626. return 0;
  627. }
  628. #else /* CONFIG_MMU */
  629. static int
  630. do_translation_fault(unsigned long addr, unsigned int fsr,
  631. struct pt_regs *regs)
  632. {
  633. return 0;
  634. }
  635. #endif /* CONFIG_MMU */
  636. /*
  637. * Some section permission faults need to be handled gracefully.
  638. * They can happen due to a __{get,put}_user during an oops.
  639. */
  640. static int
  641. do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  642. {
  643. do_bad_area(addr, fsr, regs);
  644. return 0;
  645. }
  646. /*
  647. * This abort handler always returns "fault".
  648. */
  649. static int
  650. do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  651. {
  652. return 1;
  653. }
  654. #if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
  655. #define __str(x) #x
  656. #define MRC(x, v1, v2, v4, v5, v6) do { \
  657. unsigned int __##x; \
  658. asm("mrc " __str(v1) ", " __str(v2) ", %0, " __str(v4) ", " \
  659. __str(v5) ", " __str(v6) "\n" \
  660. : "=r" (__##x)); \
  661. pr_info("%s: %s = 0x%.8x\n", __func__, #x, __##x); \
  662. } while(0)
  663. #define MSM_TCSR_SPARE2 (MSM_TCSR_BASE + 0x60)
  664. #endif
  665. int
  666. do_imprecise_ext(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  667. {
  668. #if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
  669. MRC(ADFSR, p15, 0, c5, c1, 0);
  670. MRC(DFSR, p15, 0, c5, c0, 0);
  671. MRC(ACTLR, p15, 0, c1, c0, 1);
  672. MRC(EFSR, p15, 7, c15, c0, 1);
  673. MRC(L2SR, p15, 3, c15, c1, 0);
  674. MRC(L2CR0, p15, 3, c15, c0, 1);
  675. MRC(L2CPUESR, p15, 3, c15, c1, 1);
  676. MRC(L2CPUCR, p15, 3, c15, c0, 2);
  677. MRC(SPESR, p15, 1, c9, c7, 0);
  678. MRC(SPCR, p15, 0, c9, c7, 0);
  679. MRC(DMACHSR, p15, 1, c11, c0, 0);
  680. MRC(DMACHESR, p15, 1, c11, c0, 1);
  681. MRC(DMACHCR, p15, 0, c11, c0, 2);
  682. /* clear out EFSR and ADFSR after fault */
  683. asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t"
  684. "mcr p15, 0, %0, c5, c1, 0"
  685. : : "r" (0));
  686. #endif
  687. #if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
  688. pr_info("%s: TCSR_SPARE2 = 0x%.8x\n", __func__, readl(MSM_TCSR_SPARE2));
  689. #endif
  690. return 1;
  691. }
  692. struct fsr_info {
  693. int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
  694. int sig;
  695. int code;
  696. const char *name;
  697. };
  698. /* FSR definition */
  699. #ifdef CONFIG_ARM_LPAE
  700. #include "fsr-3level.c"
  701. #else
  702. #include "fsr-2level.c"
  703. #endif
  704. void __init
  705. hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
  706. int sig, int code, const char *name)
  707. {
  708. if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
  709. BUG();
  710. fsr_info[nr].fn = fn;
  711. fsr_info[nr].sig = sig;
  712. fsr_info[nr].code = code;
  713. fsr_info[nr].name = name;
  714. }
  715. #ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER
  716. static int krait_tbb_fixup(unsigned int fsr, struct pt_regs *regs)
  717. {
  718. int base_cond, cond = 0;
  719. unsigned int p1, cpsr_z, cpsr_c, cpsr_n, cpsr_v;
  720. if ((read_cpuid_id() & 0xFFFFFFFC) != 0x510F04D0)
  721. return 0;
  722. if (!thumb_mode(regs))
  723. return 0;
  724. /* If ITSTATE is 0, return quickly */
  725. if ((regs->ARM_cpsr & PSR_IT_MASK) == 0)
  726. return 0;
  727. cpsr_n = (regs->ARM_cpsr & PSR_N_BIT) ? 1 : 0;
  728. cpsr_z = (regs->ARM_cpsr & PSR_Z_BIT) ? 1 : 0;
  729. cpsr_c = (regs->ARM_cpsr & PSR_C_BIT) ? 1 : 0;
  730. cpsr_v = (regs->ARM_cpsr & PSR_V_BIT) ? 1 : 0;
  731. p1 = (regs->ARM_cpsr & BIT(12)) ? 1 : 0;
  732. base_cond = (regs->ARM_cpsr >> 13) & 0x07;
  733. switch (base_cond) {
  734. case 0x0: /* equal */
  735. cond = cpsr_z;
  736. break;
  737. case 0x1: /* carry set */
  738. cond = cpsr_c;
  739. break;
  740. case 0x2: /* minus / negative */
  741. cond = cpsr_n;
  742. break;
  743. case 0x3: /* overflow */
  744. cond = cpsr_v;
  745. break;
  746. case 0x4: /* unsigned higher */
  747. cond = (cpsr_c == 1) && (cpsr_z == 0);
  748. break;
  749. case 0x5: /* signed greater / equal */
  750. cond = (cpsr_n == cpsr_v);
  751. break;
  752. case 0x6: /* signed greater */
  753. cond = (cpsr_z == 0) && (cpsr_n == cpsr_v);
  754. break;
  755. case 0x7: /* always */
  756. cond = 1;
  757. break;
  758. };
  759. if (cond == p1) {
  760. pr_debug("Conditional abort fixup, PC=%08x, base=%d, cond=%d\n",
  761. (unsigned int) regs->ARM_pc, base_cond, cond);
  762. regs->ARM_pc += 2;
  763. return 1;
  764. }
  765. return 0;
  766. }
  767. #endif
  768. /*
  769. * Dispatch a data abort to the relevant handler.
  770. */
  771. asmlinkage void __exception
  772. do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  773. {
  774. const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
  775. struct siginfo info;
  776. #ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER
  777. if (krait_tbb_fixup(fsr, regs))
  778. return;
  779. #endif
  780. if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
  781. return;
  782. trace_unhandled_abort(regs, addr, fsr);
  783. printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
  784. inf->name, fsr, addr);
  785. info.si_signo = inf->sig;
  786. info.si_errno = 0;
  787. info.si_code = inf->code;
  788. info.si_addr = (void __user *)addr;
  789. arm_notify_die("", regs, &info, fsr, 0);
  790. }
  791. void __init
  792. hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
  793. int sig, int code, const char *name)
  794. {
  795. if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
  796. BUG();
  797. ifsr_info[nr].fn = fn;
  798. ifsr_info[nr].sig = sig;
  799. ifsr_info[nr].code = code;
  800. ifsr_info[nr].name = name;
  801. }
  802. asmlinkage void __exception
  803. do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
  804. {
  805. const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
  806. struct siginfo info;
  807. if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
  808. return;
  809. trace_unhandled_abort(regs, addr, ifsr);
  810. printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
  811. inf->name, ifsr, addr);
  812. info.si_signo = inf->sig;
  813. info.si_errno = 0;
  814. info.si_code = inf->code;
  815. info.si_addr = (void __user *)addr;
  816. arm_notify_die("", regs, &info, ifsr, 0);
  817. }
  818. #ifndef CONFIG_ARM_LPAE
  819. static int __init exceptions_init(void)
  820. {
  821. if (cpu_architecture() >= CPU_ARCH_ARMv6) {
  822. hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
  823. "I-cache maintenance fault");
  824. }
  825. if (cpu_architecture() >= CPU_ARCH_ARMv7) {
  826. /*
  827. * TODO: Access flag faults introduced in ARMv6K.
  828. * Runtime check for 'K' extension is needed
  829. */
  830. hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
  831. "section access flag fault");
  832. hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
  833. "section access flag fault");
  834. }
  835. return 0;
  836. }
  837. arch_initcall(exceptions_init);
  838. #endif