fault.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * From i386 code copyright (C) 1995 Linus Torvalds
  15. */
  16. #include <linux/signal.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/string.h>
  21. #include <linux/types.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/mman.h>
  24. #include <linux/mm.h>
  25. #include <linux/smp.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/init.h>
  28. #include <linux/tty.h>
  29. #include <linux/vt_kern.h> /* For unblank_screen() */
  30. #include <linux/highmem.h>
  31. #include <linux/module.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/hugetlb.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/pgalloc.h>
  37. #include <asm/sections.h>
  38. #include <asm/traps.h>
  39. #include <asm/syscalls.h>
  40. #include <arch/interrupts.h>
  41. static noinline void force_sig_info_fault(const char *type, int si_signo,
  42. int si_code, unsigned long address,
  43. int fault_num,
  44. struct task_struct *tsk,
  45. struct pt_regs *regs)
  46. {
  47. siginfo_t info;
  48. if (unlikely(tsk->pid < 2)) {
  49. panic("Signal %d (code %d) at %#lx sent to %s!",
  50. si_signo, si_code & 0xffff, address,
  51. is_idle_task(tsk) ? "the idle task" : "init");
  52. }
  53. info.si_signo = si_signo;
  54. info.si_errno = 0;
  55. info.si_code = si_code;
  56. info.si_addr = (void __user *)address;
  57. info.si_trapno = fault_num;
  58. trace_unhandled_signal(type, regs, address, si_signo);
  59. force_sig_info(si_signo, &info, tsk);
  60. }
  61. #ifndef __tilegx__
  62. /*
  63. * Synthesize the fault a PL0 process would get by doing a word-load of
  64. * an unaligned address or a high kernel address.
  65. */
  66. SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
  67. struct pt_regs *, regs)
  68. {
  69. if (address >= PAGE_OFFSET)
  70. force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
  71. address, INT_DTLB_MISS, current, regs);
  72. else
  73. force_sig_info_fault("atomic alignment fault", SIGBUS,
  74. BUS_ADRALN, address,
  75. INT_UNALIGN_DATA, current, regs);
  76. /*
  77. * Adjust pc to point at the actual instruction, which is unusual
  78. * for syscalls normally, but is appropriate when we are claiming
  79. * that a syscall swint1 caused a page fault or bus error.
  80. */
  81. regs->pc -= 8;
  82. /*
  83. * Mark this as a caller-save interrupt, like a normal page fault,
  84. * so that when we go through the signal handler path we will
  85. * properly restore r0, r1, and r2 for the signal handler arguments.
  86. */
  87. regs->flags |= PT_FLAGS_CALLER_SAVES;
  88. return 0;
  89. }
  90. #endif
  91. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  92. {
  93. unsigned index = pgd_index(address);
  94. pgd_t *pgd_k;
  95. pud_t *pud, *pud_k;
  96. pmd_t *pmd, *pmd_k;
  97. pgd += index;
  98. pgd_k = init_mm.pgd + index;
  99. if (!pgd_present(*pgd_k))
  100. return NULL;
  101. pud = pud_offset(pgd, address);
  102. pud_k = pud_offset(pgd_k, address);
  103. if (!pud_present(*pud_k))
  104. return NULL;
  105. pmd = pmd_offset(pud, address);
  106. pmd_k = pmd_offset(pud_k, address);
  107. if (!pmd_present(*pmd_k))
  108. return NULL;
  109. if (!pmd_present(*pmd)) {
  110. set_pmd(pmd, *pmd_k);
  111. arch_flush_lazy_mmu_mode();
  112. } else
  113. BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
  114. return pmd_k;
  115. }
  116. /*
  117. * Handle a fault on the vmalloc area.
  118. */
  119. static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
  120. {
  121. pmd_t *pmd_k;
  122. pte_t *pte_k;
  123. /* Make sure we are in vmalloc area */
  124. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  125. return -1;
  126. /*
  127. * Synchronize this task's top level page-table
  128. * with the 'reference' page table.
  129. */
  130. pmd_k = vmalloc_sync_one(pgd, address);
  131. if (!pmd_k)
  132. return -1;
  133. if (pmd_huge(*pmd_k))
  134. return 0; /* support TILE huge_vmap() API */
  135. pte_k = pte_offset_kernel(pmd_k, address);
  136. if (!pte_present(*pte_k))
  137. return -1;
  138. return 0;
  139. }
  140. /* Wait until this PTE has completed migration. */
  141. static void wait_for_migration(pte_t *pte)
  142. {
  143. if (pte_migrating(*pte)) {
  144. /*
  145. * Wait until the migrater fixes up this pte.
  146. * We scale the loop count by the clock rate so we'll wait for
  147. * a few seconds here.
  148. */
  149. int retries = 0;
  150. int bound = get_clock_rate();
  151. while (pte_migrating(*pte)) {
  152. barrier();
  153. if (++retries > bound)
  154. panic("Hit migrating PTE (%#llx) and"
  155. " page PFN %#lx still migrating",
  156. pte->val, pte_pfn(*pte));
  157. }
  158. }
  159. }
  160. /*
  161. * It's not generally safe to use "current" to get the page table pointer,
  162. * since we might be running an oprofile interrupt in the middle of a
  163. * task switch.
  164. */
  165. static pgd_t *get_current_pgd(void)
  166. {
  167. HV_Context ctx = hv_inquire_context();
  168. unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
  169. struct page *pgd_page = pfn_to_page(pgd_pfn);
  170. BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */
  171. return (pgd_t *) __va(ctx.page_table);
  172. }
  173. /*
  174. * We can receive a page fault from a migrating PTE at any time.
  175. * Handle it by just waiting until the fault resolves.
  176. *
  177. * It's also possible to get a migrating kernel PTE that resolves
  178. * itself during the downcall from hypervisor to Linux. We just check
  179. * here to see if the PTE seems valid, and if so we retry it.
  180. *
  181. * NOTE! We MUST NOT take any locks for this case. We may be in an
  182. * interrupt or a critical region, and must do as little as possible.
  183. * Similarly, we can't use atomic ops here, since we may be handling a
  184. * fault caused by an atomic op access.
  185. *
  186. * If we find a migrating PTE while we're in an NMI context, and we're
  187. * at a PC that has a registered exception handler, we don't wait,
  188. * since this thread may (e.g.) have been interrupted while migrating
  189. * its own stack, which would then cause us to self-deadlock.
  190. */
  191. static int handle_migrating_pte(pgd_t *pgd, int fault_num,
  192. unsigned long address, unsigned long pc,
  193. int is_kernel_mode, int write)
  194. {
  195. pud_t *pud;
  196. pmd_t *pmd;
  197. pte_t *pte;
  198. pte_t pteval;
  199. if (pgd_addr_invalid(address))
  200. return 0;
  201. pgd += pgd_index(address);
  202. pud = pud_offset(pgd, address);
  203. if (!pud || !pud_present(*pud))
  204. return 0;
  205. pmd = pmd_offset(pud, address);
  206. if (!pmd || !pmd_present(*pmd))
  207. return 0;
  208. pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
  209. pte_offset_kernel(pmd, address);
  210. pteval = *pte;
  211. if (pte_migrating(pteval)) {
  212. if (in_nmi() && search_exception_tables(pc))
  213. return 0;
  214. wait_for_migration(pte);
  215. return 1;
  216. }
  217. if (!is_kernel_mode || !pte_present(pteval))
  218. return 0;
  219. if (fault_num == INT_ITLB_MISS) {
  220. if (pte_exec(pteval))
  221. return 1;
  222. } else if (write) {
  223. if (pte_write(pteval))
  224. return 1;
  225. } else {
  226. if (pte_read(pteval))
  227. return 1;
  228. }
  229. return 0;
  230. }
  231. /*
  232. * This routine is responsible for faulting in user pages.
  233. * It passes the work off to one of the appropriate routines.
  234. * It returns true if the fault was successfully handled.
  235. */
  236. static int handle_page_fault(struct pt_regs *regs,
  237. int fault_num,
  238. int is_page_fault,
  239. unsigned long address,
  240. int write)
  241. {
  242. struct task_struct *tsk;
  243. struct mm_struct *mm;
  244. struct vm_area_struct *vma;
  245. unsigned long stack_offset;
  246. int fault;
  247. int si_code;
  248. int is_kernel_mode;
  249. pgd_t *pgd;
  250. /* on TILE, protection faults are always writes */
  251. if (!is_page_fault)
  252. write = 1;
  253. is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
  254. tsk = validate_current();
  255. /*
  256. * Check to see if we might be overwriting the stack, and bail
  257. * out if so. The page fault code is a relatively likely
  258. * place to get trapped in an infinite regress, and once we
  259. * overwrite the whole stack, it becomes very hard to recover.
  260. */
  261. stack_offset = stack_pointer & (THREAD_SIZE-1);
  262. if (stack_offset < THREAD_SIZE / 8) {
  263. pr_alert("Potential stack overrun: sp %#lx\n",
  264. stack_pointer);
  265. show_regs(regs);
  266. pr_alert("Killing current process %d/%s\n",
  267. tsk->pid, tsk->comm);
  268. do_group_exit(SIGKILL);
  269. }
  270. /*
  271. * Early on, we need to check for migrating PTE entries;
  272. * see homecache.c. If we find a migrating PTE, we wait until
  273. * the backing page claims to be done migrating, then we proceed.
  274. * For kernel PTEs, we rewrite the PTE and return and retry.
  275. * Otherwise, we treat the fault like a normal "no PTE" fault,
  276. * rather than trying to patch up the existing PTE.
  277. */
  278. pgd = get_current_pgd();
  279. if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
  280. is_kernel_mode, write))
  281. return 1;
  282. si_code = SEGV_MAPERR;
  283. /*
  284. * We fault-in kernel-space virtual memory on-demand. The
  285. * 'reference' page table is init_mm.pgd.
  286. *
  287. * NOTE! We MUST NOT take any locks for this case. We may
  288. * be in an interrupt or a critical region, and should
  289. * only copy the information from the master page table,
  290. * nothing more.
  291. *
  292. * This verifies that the fault happens in kernel space
  293. * and that the fault was not a protection fault.
  294. */
  295. if (unlikely(address >= TASK_SIZE &&
  296. !is_arch_mappable_range(address, 0))) {
  297. if (is_kernel_mode && is_page_fault &&
  298. vmalloc_fault(pgd, address) >= 0)
  299. return 1;
  300. /*
  301. * Don't take the mm semaphore here. If we fixup a prefetch
  302. * fault we could otherwise deadlock.
  303. */
  304. mm = NULL; /* happy compiler */
  305. vma = NULL;
  306. goto bad_area_nosemaphore;
  307. }
  308. /*
  309. * If we're trying to touch user-space addresses, we must
  310. * be either at PL0, or else with interrupts enabled in the
  311. * kernel, so either way we can re-enable interrupts here
  312. * unless we are doing atomic access to user space with
  313. * interrupts disabled.
  314. */
  315. if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
  316. local_irq_enable();
  317. mm = tsk->mm;
  318. /*
  319. * If we're in an interrupt, have no user context or are running in an
  320. * atomic region then we must not take the fault.
  321. */
  322. if (in_atomic() || !mm) {
  323. vma = NULL; /* happy compiler */
  324. goto bad_area_nosemaphore;
  325. }
  326. /*
  327. * When running in the kernel we expect faults to occur only to
  328. * addresses in user space. All other faults represent errors in the
  329. * kernel and should generate an OOPS. Unfortunately, in the case of an
  330. * erroneous fault occurring in a code path which already holds mmap_sem
  331. * we will deadlock attempting to validate the fault against the
  332. * address space. Luckily the kernel only validly references user
  333. * space from well defined areas of code, which are listed in the
  334. * exceptions table.
  335. *
  336. * As the vast majority of faults will be valid we will only perform
  337. * the source reference check when there is a possibility of a deadlock.
  338. * Attempt to lock the address space, if we cannot we then validate the
  339. * source. If this is invalid we can skip the address space check,
  340. * thus avoiding the deadlock.
  341. */
  342. if (!down_read_trylock(&mm->mmap_sem)) {
  343. if (is_kernel_mode &&
  344. !search_exception_tables(regs->pc)) {
  345. vma = NULL; /* happy compiler */
  346. goto bad_area_nosemaphore;
  347. }
  348. down_read(&mm->mmap_sem);
  349. }
  350. vma = find_vma(mm, address);
  351. if (!vma)
  352. goto bad_area;
  353. if (vma->vm_start <= address)
  354. goto good_area;
  355. if (!(vma->vm_flags & VM_GROWSDOWN))
  356. goto bad_area;
  357. if (regs->sp < PAGE_OFFSET) {
  358. /*
  359. * accessing the stack below sp is always a bug.
  360. */
  361. if (address < regs->sp)
  362. goto bad_area;
  363. }
  364. if (expand_stack(vma, address))
  365. goto bad_area;
  366. /*
  367. * Ok, we have a good vm_area for this memory access, so
  368. * we can handle it..
  369. */
  370. good_area:
  371. si_code = SEGV_ACCERR;
  372. if (fault_num == INT_ITLB_MISS) {
  373. if (!(vma->vm_flags & VM_EXEC))
  374. goto bad_area;
  375. } else if (write) {
  376. #ifdef TEST_VERIFY_AREA
  377. if (!is_page_fault && regs->cs == KERNEL_CS)
  378. pr_err("WP fault at "REGFMT"\n", regs->eip);
  379. #endif
  380. if (!(vma->vm_flags & VM_WRITE))
  381. goto bad_area;
  382. } else {
  383. if (!is_page_fault || !(vma->vm_flags & VM_READ))
  384. goto bad_area;
  385. }
  386. survive:
  387. /*
  388. * If for any reason at all we couldn't handle the fault,
  389. * make sure we exit gracefully rather than endlessly redo
  390. * the fault.
  391. */
  392. fault = handle_mm_fault(mm, vma, address, write);
  393. if (unlikely(fault & VM_FAULT_ERROR)) {
  394. if (fault & VM_FAULT_OOM)
  395. goto out_of_memory;
  396. else if (fault & VM_FAULT_SIGSEGV)
  397. goto bad_area;
  398. else if (fault & VM_FAULT_SIGBUS)
  399. goto do_sigbus;
  400. BUG();
  401. }
  402. if (fault & VM_FAULT_MAJOR)
  403. tsk->maj_flt++;
  404. else
  405. tsk->min_flt++;
  406. #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
  407. /*
  408. * If this was an asynchronous fault,
  409. * restart the appropriate engine.
  410. */
  411. switch (fault_num) {
  412. #if CHIP_HAS_TILE_DMA()
  413. case INT_DMATLB_MISS:
  414. case INT_DMATLB_MISS_DWNCL:
  415. case INT_DMATLB_ACCESS:
  416. case INT_DMATLB_ACCESS_DWNCL:
  417. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
  418. break;
  419. #endif
  420. #if CHIP_HAS_SN_PROC()
  421. case INT_SNITLB_MISS:
  422. case INT_SNITLB_MISS_DWNCL:
  423. __insn_mtspr(SPR_SNCTL,
  424. __insn_mfspr(SPR_SNCTL) &
  425. ~SPR_SNCTL__FRZPROC_MASK);
  426. break;
  427. #endif
  428. }
  429. #endif
  430. up_read(&mm->mmap_sem);
  431. return 1;
  432. /*
  433. * Something tried to access memory that isn't in our memory map..
  434. * Fix it, but check if it's kernel or user first..
  435. */
  436. bad_area:
  437. up_read(&mm->mmap_sem);
  438. bad_area_nosemaphore:
  439. /* User mode accesses just cause a SIGSEGV */
  440. if (!is_kernel_mode) {
  441. /*
  442. * It's possible to have interrupts off here.
  443. */
  444. local_irq_enable();
  445. force_sig_info_fault("segfault", SIGSEGV, si_code, address,
  446. fault_num, tsk, regs);
  447. return 0;
  448. }
  449. no_context:
  450. /* Are we prepared to handle this kernel fault? */
  451. if (fixup_exception(regs))
  452. return 0;
  453. /*
  454. * Oops. The kernel tried to access some bad page. We'll have to
  455. * terminate things with extreme prejudice.
  456. */
  457. bust_spinlocks(1);
  458. /* FIXME: no lookup_address() yet */
  459. #ifdef SUPPORT_LOOKUP_ADDRESS
  460. if (fault_num == INT_ITLB_MISS) {
  461. pte_t *pte = lookup_address(address);
  462. if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
  463. pr_crit("kernel tried to execute"
  464. " non-executable page - exploit attempt?"
  465. " (uid: %d)\n", current->uid);
  466. }
  467. #endif
  468. if (address < PAGE_SIZE)
  469. pr_alert("Unable to handle kernel NULL pointer dereference\n");
  470. else
  471. pr_alert("Unable to handle kernel paging request\n");
  472. pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
  473. address, regs->pc);
  474. show_regs(regs);
  475. if (unlikely(tsk->pid < 2)) {
  476. panic("Kernel page fault running %s!",
  477. is_idle_task(tsk) ? "the idle task" : "init");
  478. }
  479. /*
  480. * More FIXME: we should probably copy the i386 here and
  481. * implement a generic die() routine. Not today.
  482. */
  483. #ifdef SUPPORT_DIE
  484. die("Oops", regs);
  485. #endif
  486. bust_spinlocks(1);
  487. do_group_exit(SIGKILL);
  488. /*
  489. * We ran out of memory, or some other thing happened to us that made
  490. * us unable to handle the page fault gracefully.
  491. */
  492. out_of_memory:
  493. up_read(&mm->mmap_sem);
  494. if (is_global_init(tsk)) {
  495. yield();
  496. down_read(&mm->mmap_sem);
  497. goto survive;
  498. }
  499. pr_alert("VM: killing process %s\n", tsk->comm);
  500. if (!is_kernel_mode)
  501. do_group_exit(SIGKILL);
  502. goto no_context;
  503. do_sigbus:
  504. up_read(&mm->mmap_sem);
  505. /* Kernel mode? Handle exceptions or die */
  506. if (is_kernel_mode)
  507. goto no_context;
  508. force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
  509. fault_num, tsk, regs);
  510. return 0;
  511. }
  512. #ifndef __tilegx__
  513. /* We must release ICS before panicking or we won't get anywhere. */
  514. #define ics_panic(fmt, ...) do { \
  515. __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
  516. panic(fmt, __VA_ARGS__); \
  517. } while (0)
  518. /*
  519. * When we take an ITLB or DTLB fault or access violation in the
  520. * supervisor while the critical section bit is set, the hypervisor is
  521. * reluctant to write new values into the EX_CONTEXT_K_x registers,
  522. * since that might indicate we have not yet squirreled the SPR
  523. * contents away and can thus safely take a recursive interrupt.
  524. * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
  525. *
  526. * Note that this routine is called before homecache_tlb_defer_enter(),
  527. * which means that we can properly unlock any atomics that might
  528. * be used there (good), but also means we must be very sensitive
  529. * to not touch any data structures that might be located in memory
  530. * that could migrate, as we could be entering the kernel on a dataplane
  531. * cpu that has been deferring kernel TLB updates. This means, for
  532. * example, that we can't migrate init_mm or its pgd.
  533. */
  534. struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
  535. unsigned long address,
  536. unsigned long info)
  537. {
  538. unsigned long pc = info & ~1;
  539. int write = info & 1;
  540. pgd_t *pgd = get_current_pgd();
  541. /* Retval is 1 at first since we will handle the fault fully. */
  542. struct intvec_state state = {
  543. do_page_fault, fault_num, address, write, 1
  544. };
  545. /* Validate that we are plausibly in the right routine. */
  546. if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
  547. (fault_num != INT_DTLB_MISS &&
  548. fault_num != INT_DTLB_ACCESS)) {
  549. unsigned long old_pc = regs->pc;
  550. regs->pc = pc;
  551. ics_panic("Bad ICS page fault args:"
  552. " old PC %#lx, fault %d/%d at %#lx\n",
  553. old_pc, fault_num, write, address);
  554. }
  555. /* We might be faulting on a vmalloc page, so check that first. */
  556. if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
  557. return state;
  558. /*
  559. * If we faulted with ICS set in sys_cmpxchg, we are providing
  560. * a user syscall service that should generate a signal on
  561. * fault. We didn't set up a kernel stack on initial entry to
  562. * sys_cmpxchg, but instead had one set up by the fault, which
  563. * (because sys_cmpxchg never releases ICS) came to us via the
  564. * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
  565. * still referencing the original user code. We release the
  566. * atomic lock and rewrite pt_regs so that it appears that we
  567. * came from user-space directly, and after we finish the
  568. * fault we'll go back to user space and re-issue the swint.
  569. * This way the backtrace information is correct if we need to
  570. * emit a stack dump at any point while handling this.
  571. *
  572. * Must match register use in sys_cmpxchg().
  573. */
  574. if (pc >= (unsigned long) sys_cmpxchg &&
  575. pc < (unsigned long) __sys_cmpxchg_end) {
  576. #ifdef CONFIG_SMP
  577. /* Don't unlock before we could have locked. */
  578. if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
  579. int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
  580. __atomic_fault_unlock(lock_ptr);
  581. }
  582. #endif
  583. regs->sp = regs->regs[27];
  584. }
  585. /*
  586. * We can also fault in the atomic assembly, in which
  587. * case we use the exception table to do the first-level fixup.
  588. * We may re-fixup again in the real fault handler if it
  589. * turns out the faulting address is just bad, and not,
  590. * for example, migrating.
  591. */
  592. else if (pc >= (unsigned long) __start_atomic_asm_code &&
  593. pc < (unsigned long) __end_atomic_asm_code) {
  594. const struct exception_table_entry *fixup;
  595. #ifdef CONFIG_SMP
  596. /* Unlock the atomic lock. */
  597. int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
  598. __atomic_fault_unlock(lock_ptr);
  599. #endif
  600. fixup = search_exception_tables(pc);
  601. if (!fixup)
  602. ics_panic("ICS atomic fault not in table:"
  603. " PC %#lx, fault %d", pc, fault_num);
  604. regs->pc = fixup->fixup;
  605. regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
  606. }
  607. /*
  608. * Now that we have released the atomic lock (if necessary),
  609. * it's safe to spin if the PTE that caused the fault was migrating.
  610. */
  611. if (fault_num == INT_DTLB_ACCESS)
  612. write = 1;
  613. if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
  614. return state;
  615. /* Return zero so that we continue on with normal fault handling. */
  616. state.retval = 0;
  617. return state;
  618. }
  619. #endif /* !__tilegx__ */
  620. /*
  621. * This routine handles page faults. It determines the address, and the
  622. * problem, and then passes it handle_page_fault() for normal DTLB and
  623. * ITLB issues, and for DMA or SN processor faults when we are in user
  624. * space. For the latter, if we're in kernel mode, we just save the
  625. * interrupt away appropriately and return immediately. We can't do
  626. * page faults for user code while in kernel mode.
  627. */
  628. void do_page_fault(struct pt_regs *regs, int fault_num,
  629. unsigned long address, unsigned long write)
  630. {
  631. int is_page_fault;
  632. /* This case should have been handled by do_page_fault_ics(). */
  633. BUG_ON(write & ~1);
  634. #if CHIP_HAS_TILE_DMA()
  635. /*
  636. * If it's a DMA fault, suspend the transfer while we're
  637. * handling the miss; we'll restart after it's handled. If we
  638. * don't suspend, it's possible that this process could swap
  639. * out and back in, and restart the engine since the DMA is
  640. * still 'running'.
  641. */
  642. if (fault_num == INT_DMATLB_MISS ||
  643. fault_num == INT_DMATLB_ACCESS ||
  644. fault_num == INT_DMATLB_MISS_DWNCL ||
  645. fault_num == INT_DMATLB_ACCESS_DWNCL) {
  646. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
  647. while (__insn_mfspr(SPR_DMA_USER_STATUS) &
  648. SPR_DMA_STATUS__BUSY_MASK)
  649. ;
  650. }
  651. #endif
  652. /* Validate fault num and decide if this is a first-time page fault. */
  653. switch (fault_num) {
  654. case INT_ITLB_MISS:
  655. case INT_DTLB_MISS:
  656. #if CHIP_HAS_TILE_DMA()
  657. case INT_DMATLB_MISS:
  658. case INT_DMATLB_MISS_DWNCL:
  659. #endif
  660. #if CHIP_HAS_SN_PROC()
  661. case INT_SNITLB_MISS:
  662. case INT_SNITLB_MISS_DWNCL:
  663. #endif
  664. is_page_fault = 1;
  665. break;
  666. case INT_DTLB_ACCESS:
  667. #if CHIP_HAS_TILE_DMA()
  668. case INT_DMATLB_ACCESS:
  669. case INT_DMATLB_ACCESS_DWNCL:
  670. #endif
  671. is_page_fault = 0;
  672. break;
  673. default:
  674. panic("Bad fault number %d in do_page_fault", fault_num);
  675. }
  676. #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
  677. if (EX1_PL(regs->ex1) != USER_PL) {
  678. struct async_tlb *async;
  679. switch (fault_num) {
  680. #if CHIP_HAS_TILE_DMA()
  681. case INT_DMATLB_MISS:
  682. case INT_DMATLB_ACCESS:
  683. case INT_DMATLB_MISS_DWNCL:
  684. case INT_DMATLB_ACCESS_DWNCL:
  685. async = &current->thread.dma_async_tlb;
  686. break;
  687. #endif
  688. #if CHIP_HAS_SN_PROC()
  689. case INT_SNITLB_MISS:
  690. case INT_SNITLB_MISS_DWNCL:
  691. async = &current->thread.sn_async_tlb;
  692. break;
  693. #endif
  694. default:
  695. async = NULL;
  696. }
  697. if (async) {
  698. /*
  699. * No vmalloc check required, so we can allow
  700. * interrupts immediately at this point.
  701. */
  702. local_irq_enable();
  703. set_thread_flag(TIF_ASYNC_TLB);
  704. if (async->fault_num != 0) {
  705. panic("Second async fault %d;"
  706. " old fault was %d (%#lx/%ld)",
  707. fault_num, async->fault_num,
  708. address, write);
  709. }
  710. BUG_ON(fault_num == 0);
  711. async->fault_num = fault_num;
  712. async->is_fault = is_page_fault;
  713. async->is_write = write;
  714. async->address = address;
  715. return;
  716. }
  717. }
  718. #endif
  719. handle_page_fault(regs, fault_num, is_page_fault, address, write);
  720. }
  721. #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
  722. /*
  723. * Check an async_tlb structure to see if a deferred fault is waiting,
  724. * and if so pass it to the page-fault code.
  725. */
  726. static void handle_async_page_fault(struct pt_regs *regs,
  727. struct async_tlb *async)
  728. {
  729. if (async->fault_num) {
  730. /*
  731. * Clear async->fault_num before calling the page-fault
  732. * handler so that if we re-interrupt before returning
  733. * from the function we have somewhere to put the
  734. * information from the new interrupt.
  735. */
  736. int fault_num = async->fault_num;
  737. async->fault_num = 0;
  738. handle_page_fault(regs, fault_num, async->is_fault,
  739. async->address, async->is_write);
  740. }
  741. }
  742. /*
  743. * This routine effectively re-issues asynchronous page faults
  744. * when we are returning to user space.
  745. */
  746. void do_async_page_fault(struct pt_regs *regs)
  747. {
  748. /*
  749. * Clear thread flag early. If we re-interrupt while processing
  750. * code here, we will reset it and recall this routine before
  751. * returning to user space.
  752. */
  753. clear_thread_flag(TIF_ASYNC_TLB);
  754. #if CHIP_HAS_TILE_DMA()
  755. handle_async_page_fault(regs, &current->thread.dma_async_tlb);
  756. #endif
  757. #if CHIP_HAS_SN_PROC()
  758. handle_async_page_fault(regs, &current->thread.sn_async_tlb);
  759. #endif
  760. }
  761. #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
  762. void vmalloc_sync_all(void)
  763. {
  764. #ifdef __tilegx__
  765. /* Currently all L1 kernel pmd's are static and shared. */
  766. BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START));
  767. #else
  768. /*
  769. * Note that races in the updates of insync and start aren't
  770. * problematic: insync can only get set bits added, and updates to
  771. * start are only improving performance (without affecting correctness
  772. * if undone).
  773. */
  774. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  775. static unsigned long start = PAGE_OFFSET;
  776. unsigned long address;
  777. BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
  778. for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
  779. if (!test_bit(pgd_index(address), insync)) {
  780. unsigned long flags;
  781. struct list_head *pos;
  782. spin_lock_irqsave(&pgd_lock, flags);
  783. list_for_each(pos, &pgd_list)
  784. if (!vmalloc_sync_one(list_to_pgd(pos),
  785. address)) {
  786. /* Must be at first entry in list. */
  787. BUG_ON(pos != pgd_list.next);
  788. break;
  789. }
  790. spin_unlock_irqrestore(&pgd_lock, flags);
  791. if (pos != pgd_list.next)
  792. set_bit(pgd_index(address), insync);
  793. }
  794. if (address == start && test_bit(pgd_index(address), insync))
  795. start = address + PGDIR_SIZE;
  796. }
  797. #endif
  798. }