ptrace_32.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/security.h>
  21. #include <linux/signal.h>
  22. #include <linux/io.h>
  23. #include <linux/audit.h>
  24. #include <linux/seccomp.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/elf.h>
  27. #include <linux/regset.h>
  28. #include <linux/hw_breakpoint.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/system.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/syscalls.h>
  35. #include <asm/fpu.h>
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/syscalls.h>
  38. /*
  39. * This routine will get a word off of the process kernel stack.
  40. */
  41. static inline int get_stack_long(struct task_struct *task, int offset)
  42. {
  43. unsigned char *stack;
  44. stack = (unsigned char *)task_pt_regs(task);
  45. stack += offset;
  46. return (*((int *)stack));
  47. }
  48. /*
  49. * This routine will put a word on the process kernel stack.
  50. */
  51. static inline int put_stack_long(struct task_struct *task, int offset,
  52. unsigned long data)
  53. {
  54. unsigned char *stack;
  55. stack = (unsigned char *)task_pt_regs(task);
  56. stack += offset;
  57. *(unsigned long *) stack = data;
  58. return 0;
  59. }
  60. void ptrace_triggered(struct perf_event *bp, int nmi,
  61. struct perf_sample_data *data, struct pt_regs *regs)
  62. {
  63. struct perf_event_attr attr;
  64. /*
  65. * Disable the breakpoint request here since ptrace has defined a
  66. * one-shot behaviour for breakpoint exceptions.
  67. */
  68. attr = bp->attr;
  69. attr.disabled = true;
  70. modify_user_hw_breakpoint(bp, &attr);
  71. }
  72. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  73. {
  74. struct thread_struct *thread = &tsk->thread;
  75. struct perf_event *bp;
  76. struct perf_event_attr attr;
  77. bp = thread->ptrace_bps[0];
  78. if (!bp) {
  79. ptrace_breakpoint_init(&attr);
  80. attr.bp_addr = addr;
  81. attr.bp_len = HW_BREAKPOINT_LEN_2;
  82. attr.bp_type = HW_BREAKPOINT_R;
  83. bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
  84. if (IS_ERR(bp))
  85. return PTR_ERR(bp);
  86. thread->ptrace_bps[0] = bp;
  87. } else {
  88. int err;
  89. attr = bp->attr;
  90. attr.bp_addr = addr;
  91. /* reenable breakpoint */
  92. attr.disabled = false;
  93. err = modify_user_hw_breakpoint(bp, &attr);
  94. if (unlikely(err))
  95. return err;
  96. }
  97. return 0;
  98. }
  99. void user_enable_single_step(struct task_struct *child)
  100. {
  101. unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
  102. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  103. if (ptrace_get_breakpoints(child) < 0)
  104. return;
  105. set_single_step(child, pc);
  106. ptrace_put_breakpoints(child);
  107. }
  108. void user_disable_single_step(struct task_struct *child)
  109. {
  110. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  111. }
  112. /*
  113. * Called by kernel/ptrace.c when detaching..
  114. *
  115. * Make sure single step bits etc are not set.
  116. */
  117. void ptrace_disable(struct task_struct *child)
  118. {
  119. user_disable_single_step(child);
  120. }
  121. static int genregs_get(struct task_struct *target,
  122. const struct user_regset *regset,
  123. unsigned int pos, unsigned int count,
  124. void *kbuf, void __user *ubuf)
  125. {
  126. const struct pt_regs *regs = task_pt_regs(target);
  127. int ret;
  128. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  129. regs->regs,
  130. 0, 16 * sizeof(unsigned long));
  131. if (!ret)
  132. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  133. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  134. &regs->pc,
  135. offsetof(struct pt_regs, pc),
  136. sizeof(struct pt_regs));
  137. if (!ret)
  138. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  139. sizeof(struct pt_regs), -1);
  140. return ret;
  141. }
  142. static int genregs_set(struct task_struct *target,
  143. const struct user_regset *regset,
  144. unsigned int pos, unsigned int count,
  145. const void *kbuf, const void __user *ubuf)
  146. {
  147. struct pt_regs *regs = task_pt_regs(target);
  148. int ret;
  149. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  150. regs->regs,
  151. 0, 16 * sizeof(unsigned long));
  152. if (!ret && count > 0)
  153. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  154. &regs->pc,
  155. offsetof(struct pt_regs, pc),
  156. sizeof(struct pt_regs));
  157. if (!ret)
  158. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  159. sizeof(struct pt_regs), -1);
  160. return ret;
  161. }
  162. #ifdef CONFIG_SH_FPU
  163. int fpregs_get(struct task_struct *target,
  164. const struct user_regset *regset,
  165. unsigned int pos, unsigned int count,
  166. void *kbuf, void __user *ubuf)
  167. {
  168. int ret;
  169. ret = init_fpu(target);
  170. if (ret)
  171. return ret;
  172. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  173. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  174. &target->thread.xstate->hardfpu, 0, -1);
  175. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  176. &target->thread.xstate->softfpu, 0, -1);
  177. }
  178. static int fpregs_set(struct task_struct *target,
  179. const struct user_regset *regset,
  180. unsigned int pos, unsigned int count,
  181. const void *kbuf, const void __user *ubuf)
  182. {
  183. int ret;
  184. ret = init_fpu(target);
  185. if (ret)
  186. return ret;
  187. set_stopped_child_used_math(target);
  188. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  189. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  190. &target->thread.xstate->hardfpu, 0, -1);
  191. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  192. &target->thread.xstate->softfpu, 0, -1);
  193. }
  194. static int fpregs_active(struct task_struct *target,
  195. const struct user_regset *regset)
  196. {
  197. return tsk_used_math(target) ? regset->n : 0;
  198. }
  199. #endif
  200. #ifdef CONFIG_SH_DSP
  201. static int dspregs_get(struct task_struct *target,
  202. const struct user_regset *regset,
  203. unsigned int pos, unsigned int count,
  204. void *kbuf, void __user *ubuf)
  205. {
  206. const struct pt_dspregs *regs =
  207. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  208. int ret;
  209. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  210. 0, sizeof(struct pt_dspregs));
  211. if (!ret)
  212. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  213. sizeof(struct pt_dspregs), -1);
  214. return ret;
  215. }
  216. static int dspregs_set(struct task_struct *target,
  217. const struct user_regset *regset,
  218. unsigned int pos, unsigned int count,
  219. const void *kbuf, const void __user *ubuf)
  220. {
  221. struct pt_dspregs *regs =
  222. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  223. int ret;
  224. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  225. 0, sizeof(struct pt_dspregs));
  226. if (!ret)
  227. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  228. sizeof(struct pt_dspregs), -1);
  229. return ret;
  230. }
  231. static int dspregs_active(struct task_struct *target,
  232. const struct user_regset *regset)
  233. {
  234. struct pt_regs *regs = task_pt_regs(target);
  235. return regs->sr & SR_DSP ? regset->n : 0;
  236. }
  237. #endif
  238. const struct pt_regs_offset regoffset_table[] = {
  239. REGS_OFFSET_NAME(0),
  240. REGS_OFFSET_NAME(1),
  241. REGS_OFFSET_NAME(2),
  242. REGS_OFFSET_NAME(3),
  243. REGS_OFFSET_NAME(4),
  244. REGS_OFFSET_NAME(5),
  245. REGS_OFFSET_NAME(6),
  246. REGS_OFFSET_NAME(7),
  247. REGS_OFFSET_NAME(8),
  248. REGS_OFFSET_NAME(9),
  249. REGS_OFFSET_NAME(10),
  250. REGS_OFFSET_NAME(11),
  251. REGS_OFFSET_NAME(12),
  252. REGS_OFFSET_NAME(13),
  253. REGS_OFFSET_NAME(14),
  254. REGS_OFFSET_NAME(15),
  255. REG_OFFSET_NAME(pc),
  256. REG_OFFSET_NAME(pr),
  257. REG_OFFSET_NAME(sr),
  258. REG_OFFSET_NAME(gbr),
  259. REG_OFFSET_NAME(mach),
  260. REG_OFFSET_NAME(macl),
  261. REG_OFFSET_NAME(tra),
  262. REG_OFFSET_END,
  263. };
  264. /*
  265. * These are our native regset flavours.
  266. */
  267. enum sh_regset {
  268. REGSET_GENERAL,
  269. #ifdef CONFIG_SH_FPU
  270. REGSET_FPU,
  271. #endif
  272. #ifdef CONFIG_SH_DSP
  273. REGSET_DSP,
  274. #endif
  275. };
  276. static const struct user_regset sh_regsets[] = {
  277. /*
  278. * Format is:
  279. * R0 --> R15
  280. * PC, PR, SR, GBR, MACH, MACL, TRA
  281. */
  282. [REGSET_GENERAL] = {
  283. .core_note_type = NT_PRSTATUS,
  284. .n = ELF_NGREG,
  285. .size = sizeof(long),
  286. .align = sizeof(long),
  287. .get = genregs_get,
  288. .set = genregs_set,
  289. },
  290. #ifdef CONFIG_SH_FPU
  291. [REGSET_FPU] = {
  292. .core_note_type = NT_PRFPREG,
  293. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  294. .size = sizeof(long),
  295. .align = sizeof(long),
  296. .get = fpregs_get,
  297. .set = fpregs_set,
  298. .active = fpregs_active,
  299. },
  300. #endif
  301. #ifdef CONFIG_SH_DSP
  302. [REGSET_DSP] = {
  303. .n = sizeof(struct pt_dspregs) / sizeof(long),
  304. .size = sizeof(long),
  305. .align = sizeof(long),
  306. .get = dspregs_get,
  307. .set = dspregs_set,
  308. .active = dspregs_active,
  309. },
  310. #endif
  311. };
  312. static const struct user_regset_view user_sh_native_view = {
  313. .name = "sh",
  314. .e_machine = EM_SH,
  315. .regsets = sh_regsets,
  316. .n = ARRAY_SIZE(sh_regsets),
  317. };
  318. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  319. {
  320. return &user_sh_native_view;
  321. }
  322. long arch_ptrace(struct task_struct *child, long request,
  323. unsigned long addr, unsigned long data)
  324. {
  325. unsigned long __user *datap = (unsigned long __user *)data;
  326. int ret;
  327. switch (request) {
  328. /* read the word at location addr in the USER area. */
  329. case PTRACE_PEEKUSR: {
  330. unsigned long tmp;
  331. ret = -EIO;
  332. if ((addr & 3) || addr < 0 ||
  333. addr > sizeof(struct user) - 3)
  334. break;
  335. if (addr < sizeof(struct pt_regs))
  336. tmp = get_stack_long(child, addr);
  337. else if (addr >= offsetof(struct user, fpu) &&
  338. addr < offsetof(struct user, u_fpvalid)) {
  339. if (!tsk_used_math(child)) {
  340. if (addr == offsetof(struct user, fpu.fpscr))
  341. tmp = FPSCR_INIT;
  342. else
  343. tmp = 0;
  344. } else {
  345. unsigned long index;
  346. ret = init_fpu(child);
  347. if (ret)
  348. break;
  349. index = addr - offsetof(struct user, fpu);
  350. tmp = ((unsigned long *)child->thread.xstate)
  351. [index >> 2];
  352. }
  353. } else if (addr == offsetof(struct user, u_fpvalid))
  354. tmp = !!tsk_used_math(child);
  355. else if (addr == PT_TEXT_ADDR)
  356. tmp = child->mm->start_code;
  357. else if (addr == PT_DATA_ADDR)
  358. tmp = child->mm->start_data;
  359. else if (addr == PT_TEXT_END_ADDR)
  360. tmp = child->mm->end_code;
  361. else if (addr == PT_TEXT_LEN)
  362. tmp = child->mm->end_code - child->mm->start_code;
  363. else
  364. tmp = 0;
  365. ret = put_user(tmp, datap);
  366. break;
  367. }
  368. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  369. ret = -EIO;
  370. if ((addr & 3) || addr < 0 ||
  371. addr > sizeof(struct user) - 3)
  372. break;
  373. if (addr < sizeof(struct pt_regs))
  374. ret = put_stack_long(child, addr, data);
  375. else if (addr >= offsetof(struct user, fpu) &&
  376. addr < offsetof(struct user, u_fpvalid)) {
  377. unsigned long index;
  378. ret = init_fpu(child);
  379. if (ret)
  380. break;
  381. index = addr - offsetof(struct user, fpu);
  382. set_stopped_child_used_math(child);
  383. ((unsigned long *)child->thread.xstate)
  384. [index >> 2] = data;
  385. ret = 0;
  386. } else if (addr == offsetof(struct user, u_fpvalid)) {
  387. conditional_stopped_child_used_math(data, child);
  388. ret = 0;
  389. }
  390. break;
  391. case PTRACE_GETREGS:
  392. return copy_regset_to_user(child, &user_sh_native_view,
  393. REGSET_GENERAL,
  394. 0, sizeof(struct pt_regs),
  395. datap);
  396. case PTRACE_SETREGS:
  397. return copy_regset_from_user(child, &user_sh_native_view,
  398. REGSET_GENERAL,
  399. 0, sizeof(struct pt_regs),
  400. datap);
  401. #ifdef CONFIG_SH_FPU
  402. case PTRACE_GETFPREGS:
  403. return copy_regset_to_user(child, &user_sh_native_view,
  404. REGSET_FPU,
  405. 0, sizeof(struct user_fpu_struct),
  406. datap);
  407. case PTRACE_SETFPREGS:
  408. return copy_regset_from_user(child, &user_sh_native_view,
  409. REGSET_FPU,
  410. 0, sizeof(struct user_fpu_struct),
  411. datap);
  412. #endif
  413. #ifdef CONFIG_SH_DSP
  414. case PTRACE_GETDSPREGS:
  415. return copy_regset_to_user(child, &user_sh_native_view,
  416. REGSET_DSP,
  417. 0, sizeof(struct pt_dspregs),
  418. datap);
  419. case PTRACE_SETDSPREGS:
  420. return copy_regset_from_user(child, &user_sh_native_view,
  421. REGSET_DSP,
  422. 0, sizeof(struct pt_dspregs),
  423. datap);
  424. #endif
  425. default:
  426. ret = ptrace_request(child, request, addr, data);
  427. break;
  428. }
  429. return ret;
  430. }
  431. static inline int audit_arch(void)
  432. {
  433. int arch = EM_SH;
  434. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  435. arch |= __AUDIT_ARCH_LE;
  436. #endif
  437. return arch;
  438. }
  439. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  440. {
  441. long ret = 0;
  442. secure_computing(regs->regs[0]);
  443. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  444. tracehook_report_syscall_entry(regs))
  445. /*
  446. * Tracing decided this syscall should not happen.
  447. * We'll return a bogus call number to get an ENOSYS
  448. * error, but leave the original number in regs->regs[0].
  449. */
  450. ret = -1L;
  451. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  452. trace_sys_enter(regs, regs->regs[0]);
  453. if (unlikely(current->audit_context))
  454. audit_syscall_entry(audit_arch(), regs->regs[3],
  455. regs->regs[4], regs->regs[5],
  456. regs->regs[6], regs->regs[7]);
  457. return ret ?: regs->regs[0];
  458. }
  459. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  460. {
  461. int step;
  462. if (unlikely(current->audit_context))
  463. audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
  464. regs->regs[0]);
  465. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  466. trace_sys_exit(regs, regs->regs[0]);
  467. step = test_thread_flag(TIF_SINGLESTEP);
  468. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  469. tracehook_report_syscall_exit(regs, step);
  470. }