signal.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * linux/arch/arm/kernel/signal.c
  3. *
  4. * Copyright (C) 1995-2009 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/random.h>
  12. #include <linux/signal.h>
  13. #include <linux/personality.h>
  14. #include <linux/freezer.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/tracehook.h>
  17. #include <asm/elf.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/traps.h>
  20. #include <asm/ucontext.h>
  21. #include <asm/unistd.h>
  22. #include <asm/vfp.h>
  23. #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
  24. /*
  25. * For ARM syscalls, we encode the syscall number into the instruction.
  26. */
  27. #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
  28. #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
  29. /*
  30. * With EABI, the syscall number has to be loaded into r7.
  31. */
  32. #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
  33. #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
  34. /*
  35. * For Thumb syscalls, we pass the syscall number via r7. We therefore
  36. * need two 16-bit instructions.
  37. */
  38. #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
  39. #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
  40. static const unsigned long sigreturn_codes[7] = {
  41. MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
  42. MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
  43. };
  44. static unsigned long signal_return_offset;
  45. /*
  46. * atomically swap in the new signal mask, and wait for a signal.
  47. */
  48. asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
  49. {
  50. sigset_t blocked;
  51. current->saved_sigmask = current->blocked;
  52. mask &= _BLOCKABLE;
  53. siginitset(&blocked, mask);
  54. set_current_blocked(&blocked);
  55. current->state = TASK_INTERRUPTIBLE;
  56. schedule();
  57. set_restore_sigmask();
  58. return -ERESTARTNOHAND;
  59. }
  60. asmlinkage int
  61. sys_sigaction(int sig, const struct old_sigaction __user *act,
  62. struct old_sigaction __user *oact)
  63. {
  64. struct k_sigaction new_ka, old_ka;
  65. int ret;
  66. if (act) {
  67. old_sigset_t mask;
  68. if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
  69. __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
  70. __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
  71. __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
  72. __get_user(mask, &act->sa_mask))
  73. return -EFAULT;
  74. siginitset(&new_ka.sa.sa_mask, mask);
  75. }
  76. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  77. if (!ret && oact) {
  78. if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
  79. __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
  80. __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
  81. __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
  82. __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
  83. return -EFAULT;
  84. }
  85. return ret;
  86. }
  87. static unsigned long signal_return_offset;
  88. #ifdef CONFIG_CRUNCH
  89. static int preserve_crunch_context(struct crunch_sigframe __user *frame)
  90. {
  91. char kbuf[sizeof(*frame) + 8];
  92. struct crunch_sigframe *kframe;
  93. /* the crunch context must be 64 bit aligned */
  94. kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  95. kframe->magic = CRUNCH_MAGIC;
  96. kframe->size = CRUNCH_STORAGE_SIZE;
  97. crunch_task_copy(current_thread_info(), &kframe->storage);
  98. return __copy_to_user(frame, kframe, sizeof(*frame));
  99. }
  100. static int restore_crunch_context(struct crunch_sigframe __user *frame)
  101. {
  102. char kbuf[sizeof(*frame) + 8];
  103. struct crunch_sigframe *kframe;
  104. /* the crunch context must be 64 bit aligned */
  105. kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  106. if (__copy_from_user(kframe, frame, sizeof(*frame)))
  107. return -1;
  108. if (kframe->magic != CRUNCH_MAGIC ||
  109. kframe->size != CRUNCH_STORAGE_SIZE)
  110. return -1;
  111. crunch_task_restore(current_thread_info(), &kframe->storage);
  112. return 0;
  113. }
  114. #endif
  115. #ifdef CONFIG_IWMMXT
  116. static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
  117. {
  118. char kbuf[sizeof(*frame) + 8];
  119. struct iwmmxt_sigframe *kframe;
  120. /* the iWMMXt context must be 64 bit aligned */
  121. kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  122. kframe->magic = IWMMXT_MAGIC;
  123. kframe->size = IWMMXT_STORAGE_SIZE;
  124. iwmmxt_task_copy(current_thread_info(), &kframe->storage);
  125. return __copy_to_user(frame, kframe, sizeof(*frame));
  126. }
  127. static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
  128. {
  129. char kbuf[sizeof(*frame) + 8];
  130. struct iwmmxt_sigframe *kframe;
  131. /* the iWMMXt context must be 64 bit aligned */
  132. kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  133. if (__copy_from_user(kframe, frame, sizeof(*frame)))
  134. return -1;
  135. if (kframe->magic != IWMMXT_MAGIC ||
  136. kframe->size != IWMMXT_STORAGE_SIZE)
  137. return -1;
  138. iwmmxt_task_restore(current_thread_info(), &kframe->storage);
  139. return 0;
  140. }
  141. #endif
  142. #ifdef CONFIG_VFP
  143. static int preserve_vfp_context(struct vfp_sigframe __user *frame)
  144. {
  145. const unsigned long magic = VFP_MAGIC;
  146. const unsigned long size = VFP_STORAGE_SIZE;
  147. int err = 0;
  148. __put_user_error(magic, &frame->magic, err);
  149. __put_user_error(size, &frame->size, err);
  150. if (err)
  151. return -EFAULT;
  152. return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
  153. }
  154. static int restore_vfp_context(struct vfp_sigframe __user *frame)
  155. {
  156. unsigned long magic;
  157. unsigned long size;
  158. int err = 0;
  159. __get_user_error(magic, &frame->magic, err);
  160. __get_user_error(size, &frame->size, err);
  161. if (err)
  162. return -EFAULT;
  163. if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
  164. return -EINVAL;
  165. return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
  166. }
  167. #endif
  168. /*
  169. * Do a signal return; undo the signal stack. These are aligned to 64-bit.
  170. */
  171. struct sigframe {
  172. struct ucontext uc;
  173. unsigned long retcode[2];
  174. };
  175. struct rt_sigframe {
  176. struct siginfo info;
  177. struct sigframe sig;
  178. };
  179. static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
  180. {
  181. struct aux_sigframe __user *aux;
  182. sigset_t set;
  183. int err;
  184. err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
  185. if (err == 0) {
  186. sigdelsetmask(&set, ~_BLOCKABLE);
  187. set_current_blocked(&set);
  188. }
  189. __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
  190. __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
  191. __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
  192. __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
  193. __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
  194. __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
  195. __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
  196. __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
  197. __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
  198. __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
  199. __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
  200. __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
  201. __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
  202. __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
  203. __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
  204. __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
  205. __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
  206. err |= !valid_user_regs(regs);
  207. aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
  208. #ifdef CONFIG_CRUNCH
  209. if (err == 0)
  210. err |= restore_crunch_context(&aux->crunch);
  211. #endif
  212. #ifdef CONFIG_IWMMXT
  213. if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
  214. err |= restore_iwmmxt_context(&aux->iwmmxt);
  215. #endif
  216. #ifdef CONFIG_VFP
  217. if (err == 0)
  218. err |= restore_vfp_context(&aux->vfp);
  219. #endif
  220. return err;
  221. }
  222. asmlinkage int sys_sigreturn(struct pt_regs *regs)
  223. {
  224. struct sigframe __user *frame;
  225. /* Always make any pending restarted system calls return -EINTR */
  226. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  227. /*
  228. * Since we stacked the signal on a 64-bit boundary,
  229. * then 'sp' should be word aligned here. If it's
  230. * not, then the user is trying to mess with us.
  231. */
  232. if (regs->ARM_sp & 7)
  233. goto badframe;
  234. frame = (struct sigframe __user *)regs->ARM_sp;
  235. if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
  236. goto badframe;
  237. if (restore_sigframe(regs, frame))
  238. goto badframe;
  239. return regs->ARM_r0;
  240. badframe:
  241. force_sig(SIGSEGV, current);
  242. return 0;
  243. }
  244. asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
  245. {
  246. struct rt_sigframe __user *frame;
  247. /* Always make any pending restarted system calls return -EINTR */
  248. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  249. /*
  250. * Since we stacked the signal on a 64-bit boundary,
  251. * then 'sp' should be word aligned here. If it's
  252. * not, then the user is trying to mess with us.
  253. */
  254. if (regs->ARM_sp & 7)
  255. goto badframe;
  256. frame = (struct rt_sigframe __user *)regs->ARM_sp;
  257. if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
  258. goto badframe;
  259. if (restore_sigframe(regs, &frame->sig))
  260. goto badframe;
  261. if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
  262. goto badframe;
  263. return regs->ARM_r0;
  264. badframe:
  265. force_sig(SIGSEGV, current);
  266. return 0;
  267. }
  268. static int
  269. setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
  270. {
  271. struct aux_sigframe __user *aux;
  272. int err = 0;
  273. __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
  274. __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
  275. __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
  276. __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
  277. __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
  278. __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
  279. __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
  280. __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
  281. __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
  282. __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
  283. __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
  284. __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
  285. __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
  286. __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
  287. __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
  288. __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
  289. __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
  290. __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
  291. __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
  292. __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
  293. __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
  294. err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
  295. aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
  296. #ifdef CONFIG_CRUNCH
  297. if (err == 0)
  298. err |= preserve_crunch_context(&aux->crunch);
  299. #endif
  300. #ifdef CONFIG_IWMMXT
  301. if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
  302. err |= preserve_iwmmxt_context(&aux->iwmmxt);
  303. #endif
  304. #ifdef CONFIG_VFP
  305. if (err == 0)
  306. err |= preserve_vfp_context(&aux->vfp);
  307. #endif
  308. __put_user_error(0, &aux->end_magic, err);
  309. return err;
  310. }
  311. static inline void __user *
  312. get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
  313. {
  314. unsigned long sp = regs->ARM_sp;
  315. void __user *frame;
  316. /*
  317. * This is the X/Open sanctioned signal stack switching.
  318. */
  319. if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
  320. sp = current->sas_ss_sp + current->sas_ss_size;
  321. /*
  322. * ATPCS B01 mandates 8-byte alignment
  323. */
  324. frame = (void __user *)((sp - framesize) & ~7);
  325. /*
  326. * Check that we can actually write to the signal frame.
  327. */
  328. if (!access_ok(VERIFY_WRITE, frame, framesize))
  329. frame = NULL;
  330. return frame;
  331. }
  332. static int
  333. setup_return(struct pt_regs *regs, struct k_sigaction *ka,
  334. unsigned long __user *rc, void __user *frame, int usig)
  335. {
  336. unsigned long handler = (unsigned long)ka->sa.sa_handler;
  337. unsigned long retcode;
  338. int thumb = 0;
  339. unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
  340. cpsr |= PSR_ENDSTATE;
  341. /*
  342. * Maybe we need to deliver a 32-bit signal to a 26-bit task.
  343. */
  344. if (ka->sa.sa_flags & SA_THIRTYTWO)
  345. cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
  346. #ifdef CONFIG_ARM_THUMB
  347. if (elf_hwcap & HWCAP_THUMB) {
  348. /*
  349. * The LSB of the handler determines if we're going to
  350. * be using THUMB or ARM mode for this signal handler.
  351. */
  352. thumb = handler & 1;
  353. #if __LINUX_ARM_ARCH__ >= 6
  354. /*
  355. * Clear the If-Then Thumb-2 execution state. ARM spec
  356. * requires this to be all 000s in ARM mode. Snapdragon
  357. * S4/Krait misbehaves on a Thumb=>ARM signal transition
  358. * without this.
  359. *
  360. * We must do this whenever we are running on a Thumb-2
  361. * capable CPU, which includes ARMv6T2. However, we elect
  362. * to do this whenever we're on an ARMv6 or later CPU for
  363. * simplicity.
  364. */
  365. cpsr &= ~PSR_IT_MASK;
  366. #endif
  367. if (thumb) {
  368. cpsr |= PSR_T_BIT;
  369. } else
  370. cpsr &= ~PSR_T_BIT;
  371. }
  372. #endif
  373. if (ka->sa.sa_flags & SA_RESTORER) {
  374. retcode = (unsigned long)ka->sa.sa_restorer;
  375. } else {
  376. unsigned int idx = thumb << 1;
  377. if (ka->sa.sa_flags & SA_SIGINFO)
  378. idx += 3;
  379. if (__put_user(sigreturn_codes[idx], rc) ||
  380. __put_user(sigreturn_codes[idx+1], rc+1))
  381. return 1;
  382. #ifdef CONFIG_MMU
  383. if (cpsr & MODE32_BIT) {
  384. struct mm_struct *mm = current->mm;
  385. /*
  386. * 32-bit code can use the signal return page
  387. * except when the MPU has protected the vectors
  388. * page from PL0
  389. */
  390. retcode = mm->context.sigpage + signal_return_offset +
  391. (idx << 2) + thumb;
  392. } else
  393. #endif
  394. {
  395. /*
  396. * Ensure that the instruction cache sees
  397. * the return code written onto the stack.
  398. */
  399. flush_icache_range((unsigned long)rc,
  400. (unsigned long)(rc + 2));
  401. retcode = ((unsigned long)rc) + thumb;
  402. }
  403. }
  404. regs->ARM_r0 = usig;
  405. regs->ARM_sp = (unsigned long)frame;
  406. regs->ARM_lr = retcode;
  407. regs->ARM_pc = handler;
  408. regs->ARM_cpsr = cpsr;
  409. return 0;
  410. }
  411. static int
  412. setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
  413. {
  414. struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
  415. int err = 0;
  416. if (!frame)
  417. return 1;
  418. /*
  419. * Set uc.uc_flags to a value which sc.trap_no would never have.
  420. */
  421. __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
  422. err |= setup_sigframe(frame, regs, set);
  423. if (err == 0)
  424. err = setup_return(regs, ka, frame->retcode, frame, usig);
  425. return err;
  426. }
  427. static int
  428. setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
  429. sigset_t *set, struct pt_regs *regs)
  430. {
  431. struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
  432. stack_t stack;
  433. int err = 0;
  434. if (!frame)
  435. return 1;
  436. err |= copy_siginfo_to_user(&frame->info, info);
  437. __put_user_error(0, &frame->sig.uc.uc_flags, err);
  438. __put_user_error(NULL, &frame->sig.uc.uc_link, err);
  439. memset(&stack, 0, sizeof(stack));
  440. stack.ss_sp = (void __user *)current->sas_ss_sp;
  441. stack.ss_flags = sas_ss_flags(regs->ARM_sp);
  442. stack.ss_size = current->sas_ss_size;
  443. err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
  444. err |= setup_sigframe(&frame->sig, regs, set);
  445. if (err == 0)
  446. err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
  447. if (err == 0) {
  448. /*
  449. * For realtime signals we must also set the second and third
  450. * arguments for the signal handler.
  451. * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
  452. */
  453. regs->ARM_r1 = (unsigned long)&frame->info;
  454. regs->ARM_r2 = (unsigned long)&frame->sig.uc;
  455. }
  456. return err;
  457. }
  458. /*
  459. * OK, we're invoking a handler
  460. */
  461. static int
  462. handle_signal(unsigned long sig, struct k_sigaction *ka,
  463. siginfo_t *info, sigset_t *oldset,
  464. struct pt_regs * regs)
  465. {
  466. struct thread_info *thread = current_thread_info();
  467. struct task_struct *tsk = current;
  468. int usig = sig;
  469. int ret;
  470. /*
  471. * translate the signal
  472. */
  473. if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
  474. usig = thread->exec_domain->signal_invmap[usig];
  475. /*
  476. * Set up the stack frame
  477. */
  478. if (ka->sa.sa_flags & SA_SIGINFO)
  479. ret = setup_rt_frame(usig, ka, info, oldset, regs);
  480. else
  481. ret = setup_frame(usig, ka, oldset, regs);
  482. /*
  483. * Check that the resulting registers are actually sane.
  484. */
  485. ret |= !valid_user_regs(regs);
  486. if (ret != 0) {
  487. force_sigsegv(sig, tsk);
  488. return ret;
  489. }
  490. /*
  491. * Block the signal if we were successful.
  492. */
  493. block_sigmask(ka, sig);
  494. return 0;
  495. }
  496. /*
  497. * Note that 'init' is a special process: it doesn't get signals it doesn't
  498. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  499. * mistake.
  500. *
  501. * Note that we go through the signals twice: once to check the signals that
  502. * the kernel can handle, and then we build all the user-level signal handling
  503. * stack-frames in one go after that.
  504. */
  505. static int do_signal(struct pt_regs *regs, int syscall)
  506. {
  507. unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
  508. struct k_sigaction ka;
  509. siginfo_t info;
  510. int signr;
  511. int restart = 0;
  512. /*
  513. * If we were from a system call, check for system call restarting...
  514. */
  515. if (syscall) {
  516. continue_addr = regs->ARM_pc;
  517. restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
  518. retval = regs->ARM_r0;
  519. /*
  520. * Prepare for system call restart. We do this here so that a
  521. * debugger will see the already changed PSW.
  522. */
  523. switch (retval) {
  524. case -ERESTART_RESTARTBLOCK:
  525. restart -= 2;
  526. case -ERESTARTNOHAND:
  527. case -ERESTARTSYS:
  528. case -ERESTARTNOINTR:
  529. restart++;
  530. regs->ARM_r0 = regs->ARM_ORIG_r0;
  531. regs->ARM_pc = restart_addr;
  532. break;
  533. }
  534. }
  535. /*
  536. * Get the signal to deliver. When running under ptrace, at this
  537. * point the debugger may change all our registers ...
  538. */
  539. signr = get_signal_to_deliver(&info, &ka, regs, NULL);
  540. /*
  541. * Depending on the signal settings we may need to revert the
  542. * decision to restart the system call. But skip this if a
  543. * debugger has chosen to restart at a different PC.
  544. */
  545. if (regs->ARM_pc != restart_addr)
  546. restart = 0;
  547. if (signr > 0) {
  548. sigset_t *oldset;
  549. if (unlikely(restart)) {
  550. if (retval == -ERESTARTNOHAND ||
  551. retval == -ERESTART_RESTARTBLOCK
  552. || (retval == -ERESTARTSYS
  553. && !(ka.sa.sa_flags & SA_RESTART))) {
  554. regs->ARM_r0 = -EINTR;
  555. regs->ARM_pc = continue_addr;
  556. }
  557. clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
  558. }
  559. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  560. oldset = &current->saved_sigmask;
  561. else
  562. oldset = &current->blocked;
  563. if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
  564. /*
  565. * A signal was successfully delivered; the saved
  566. * sigmask will have been stored in the signal frame,
  567. * and will be restored by sigreturn, so we can simply
  568. * clear the TIF_RESTORE_SIGMASK flag.
  569. */
  570. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  571. clear_thread_flag(TIF_RESTORE_SIGMASK);
  572. }
  573. return 0;
  574. }
  575. if (syscall) {
  576. /*
  577. * Handle restarting a different system call. As above,
  578. * if a debugger has chosen to restart at a different PC,
  579. * ignore the restart.
  580. */
  581. if (retval == -ERESTART_RESTARTBLOCK
  582. && regs->ARM_pc == restart_addr)
  583. set_thread_flag(TIF_SYSCALL_RESTARTSYS);
  584. }
  585. /* If there's no signal to deliver, we just put the saved sigmask
  586. * back.
  587. */
  588. if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
  589. set_current_blocked(&current->saved_sigmask);
  590. if (unlikely(restart))
  591. regs->ARM_pc = continue_addr;
  592. return restart;
  593. }
  594. asmlinkage int
  595. do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
  596. {
  597. do {
  598. if (likely(thread_flags & _TIF_NEED_RESCHED)) {
  599. schedule();
  600. } else {
  601. if (unlikely(!user_mode(regs)))
  602. return 0;
  603. local_irq_enable();
  604. if (thread_flags & _TIF_SIGPENDING) {
  605. int restart = do_signal(regs, syscall);
  606. if (unlikely(restart)) {
  607. /*
  608. * Restart without handlers.
  609. * Deal with it without leaving
  610. * the kernel space.
  611. */
  612. return restart;
  613. }
  614. syscall = 0;
  615. } else {
  616. clear_thread_flag(TIF_NOTIFY_RESUME);
  617. tracehook_notify_resume(regs);
  618. if (current->replacement_session_keyring)
  619. key_replace_session_keyring();
  620. }
  621. }
  622. local_irq_disable();
  623. thread_flags = current_thread_info()->flags;
  624. } while (thread_flags & _TIF_WORK_MASK);
  625. return 0;
  626. }
  627. struct page *get_signal_page(void)
  628. {
  629. unsigned long ptr;
  630. unsigned offset;
  631. struct page *page;
  632. void *addr;
  633. page = alloc_pages(GFP_KERNEL, 0);
  634. if (!page)
  635. return NULL;
  636. addr = page_address(page);
  637. /* Give the signal return code some randomness */
  638. offset = 0x200 + (get_random_int() & 0x7fc);
  639. signal_return_offset = offset;
  640. /*
  641. * Copy signal return handlers into the vector page, and
  642. * set sigreturn to be a pointer to these.
  643. */
  644. memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
  645. ptr = (unsigned long)addr + offset;
  646. flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
  647. return page;
  648. }