kprobes.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. /* arch/sparc64/kernel/kprobes.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/kprobes.h>
  7. #include <linux/module.h>
  8. #include <linux/kdebug.h>
  9. #include <linux/slab.h>
  10. #include <asm/signal.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/uaccess.h>
  13. /* We do not have hardware single-stepping on sparc64.
  14. * So we implement software single-stepping with breakpoint
  15. * traps. The top-level scheme is similar to that used
  16. * in the x86 kprobes implementation.
  17. *
  18. * In the kprobe->ainsn.insn[] array we store the original
  19. * instruction at index zero and a break instruction at
  20. * index one.
  21. *
  22. * When we hit a kprobe we:
  23. * - Run the pre-handler
  24. * - Remember "regs->tnpc" and interrupt level stored in
  25. * "regs->tstate" so we can restore them later
  26. * - Disable PIL interrupts
  27. * - Set regs->tpc to point to kprobe->ainsn.insn[0]
  28. * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
  29. * - Mark that we are actively in a kprobe
  30. *
  31. * At this point we wait for the second breakpoint at
  32. * kprobe->ainsn.insn[1] to hit. When it does we:
  33. * - Run the post-handler
  34. * - Set regs->tpc to "remembered" regs->tnpc stored above,
  35. * restore the PIL interrupt level in "regs->tstate" as well
  36. * - Make any adjustments necessary to regs->tnpc in order
  37. * to handle relative branches correctly. See below.
  38. * - Mark that we are no longer actively in a kprobe.
  39. */
  40. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  41. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  42. struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  43. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  44. {
  45. if ((unsigned long) p->addr & 0x3UL)
  46. return -EILSEQ;
  47. p->ainsn.insn[0] = *p->addr;
  48. flushi(&p->ainsn.insn[0]);
  49. p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
  50. flushi(&p->ainsn.insn[1]);
  51. p->opcode = *p->addr;
  52. return 0;
  53. }
  54. void __kprobes arch_arm_kprobe(struct kprobe *p)
  55. {
  56. *p->addr = BREAKPOINT_INSTRUCTION;
  57. flushi(p->addr);
  58. }
  59. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  60. {
  61. *p->addr = p->opcode;
  62. flushi(p->addr);
  63. }
  64. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  65. {
  66. kcb->prev_kprobe.kp = kprobe_running();
  67. kcb->prev_kprobe.status = kcb->kprobe_status;
  68. kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
  69. kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
  70. }
  71. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  72. {
  73. __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  74. kcb->kprobe_status = kcb->prev_kprobe.status;
  75. kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
  76. kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
  77. }
  78. static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  79. struct kprobe_ctlblk *kcb)
  80. {
  81. __get_cpu_var(current_kprobe) = p;
  82. kcb->kprobe_orig_tnpc = regs->tnpc;
  83. kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
  84. }
  85. static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
  86. struct kprobe_ctlblk *kcb)
  87. {
  88. regs->tstate |= TSTATE_PIL;
  89. /*single step inline, if it a breakpoint instruction*/
  90. if (p->opcode == BREAKPOINT_INSTRUCTION) {
  91. regs->tpc = (unsigned long) p->addr;
  92. regs->tnpc = kcb->kprobe_orig_tnpc;
  93. } else {
  94. regs->tpc = (unsigned long) &p->ainsn.insn[0];
  95. regs->tnpc = (unsigned long) &p->ainsn.insn[1];
  96. }
  97. }
  98. static int __kprobes kprobe_handler(struct pt_regs *regs)
  99. {
  100. struct kprobe *p;
  101. void *addr = (void *) regs->tpc;
  102. int ret = 0;
  103. struct kprobe_ctlblk *kcb;
  104. /*
  105. * We don't want to be preempted for the entire
  106. * duration of kprobe processing
  107. */
  108. preempt_disable();
  109. kcb = get_kprobe_ctlblk();
  110. if (kprobe_running()) {
  111. p = get_kprobe(addr);
  112. if (p) {
  113. if (kcb->kprobe_status == KPROBE_HIT_SS) {
  114. regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
  115. kcb->kprobe_orig_tstate_pil);
  116. goto no_kprobe;
  117. }
  118. /* We have reentered the kprobe_handler(), since
  119. * another probe was hit while within the handler.
  120. * We here save the original kprobes variables and
  121. * just single step on the instruction of the new probe
  122. * without calling any user handlers.
  123. */
  124. save_previous_kprobe(kcb);
  125. set_current_kprobe(p, regs, kcb);
  126. kprobes_inc_nmissed_count(p);
  127. kcb->kprobe_status = KPROBE_REENTER;
  128. prepare_singlestep(p, regs, kcb);
  129. return 1;
  130. } else {
  131. if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
  132. /* The breakpoint instruction was removed by
  133. * another cpu right after we hit, no further
  134. * handling of this interrupt is appropriate
  135. */
  136. ret = 1;
  137. goto no_kprobe;
  138. }
  139. p = __get_cpu_var(current_kprobe);
  140. if (p->break_handler && p->break_handler(p, regs))
  141. goto ss_probe;
  142. }
  143. goto no_kprobe;
  144. }
  145. p = get_kprobe(addr);
  146. if (!p) {
  147. if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
  148. /*
  149. * The breakpoint instruction was removed right
  150. * after we hit it. Another cpu has removed
  151. * either a probepoint or a debugger breakpoint
  152. * at this address. In either case, no further
  153. * handling of this interrupt is appropriate.
  154. */
  155. ret = 1;
  156. }
  157. /* Not one of ours: let kernel handle it */
  158. goto no_kprobe;
  159. }
  160. set_current_kprobe(p, regs, kcb);
  161. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  162. if (p->pre_handler && p->pre_handler(p, regs))
  163. return 1;
  164. ss_probe:
  165. prepare_singlestep(p, regs, kcb);
  166. kcb->kprobe_status = KPROBE_HIT_SS;
  167. return 1;
  168. no_kprobe:
  169. preempt_enable_no_resched();
  170. return ret;
  171. }
  172. /* If INSN is a relative control transfer instruction,
  173. * return the corrected branch destination value.
  174. *
  175. * regs->tpc and regs->tnpc still hold the values of the
  176. * program counters at the time of trap due to the execution
  177. * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
  178. *
  179. */
  180. static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
  181. struct pt_regs *regs)
  182. {
  183. unsigned long real_pc = (unsigned long) p->addr;
  184. /* Branch not taken, no mods necessary. */
  185. if (regs->tnpc == regs->tpc + 0x4UL)
  186. return real_pc + 0x8UL;
  187. /* The three cases are call, branch w/prediction,
  188. * and traditional branch.
  189. */
  190. if ((insn & 0xc0000000) == 0x40000000 ||
  191. (insn & 0xc1c00000) == 0x00400000 ||
  192. (insn & 0xc1c00000) == 0x00800000) {
  193. unsigned long ainsn_addr;
  194. ainsn_addr = (unsigned long) &p->ainsn.insn[0];
  195. /* The instruction did all the work for us
  196. * already, just apply the offset to the correct
  197. * instruction location.
  198. */
  199. return (real_pc + (regs->tnpc - ainsn_addr));
  200. }
  201. /* It is jmpl or some other absolute PC modification instruction,
  202. * leave NPC as-is.
  203. */
  204. return regs->tnpc;
  205. }
  206. /* If INSN is an instruction which writes it's PC location
  207. * into a destination register, fix that up.
  208. */
  209. static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
  210. unsigned long real_pc)
  211. {
  212. unsigned long *slot = NULL;
  213. /* Simplest case is 'call', which always uses %o7 */
  214. if ((insn & 0xc0000000) == 0x40000000) {
  215. slot = &regs->u_regs[UREG_I7];
  216. }
  217. /* 'jmpl' encodes the register inside of the opcode */
  218. if ((insn & 0xc1f80000) == 0x81c00000) {
  219. unsigned long rd = ((insn >> 25) & 0x1f);
  220. if (rd <= 15) {
  221. slot = &regs->u_regs[rd];
  222. } else {
  223. /* Hard case, it goes onto the stack. */
  224. flushw_all();
  225. rd -= 16;
  226. slot = (unsigned long *)
  227. (regs->u_regs[UREG_FP] + STACK_BIAS);
  228. slot += rd;
  229. }
  230. }
  231. if (slot != NULL)
  232. *slot = real_pc;
  233. }
  234. /*
  235. * Called after single-stepping. p->addr is the address of the
  236. * instruction which has been replaced by the breakpoint
  237. * instruction. To avoid the SMP problems that can occur when we
  238. * temporarily put back the original opcode to single-step, we
  239. * single-stepped a copy of the instruction. The address of this
  240. * copy is &p->ainsn.insn[0].
  241. *
  242. * This function prepares to return from the post-single-step
  243. * breakpoint trap.
  244. */
  245. static void __kprobes resume_execution(struct kprobe *p,
  246. struct pt_regs *regs, struct kprobe_ctlblk *kcb)
  247. {
  248. u32 insn = p->ainsn.insn[0];
  249. regs->tnpc = relbranch_fixup(insn, p, regs);
  250. /* This assignment must occur after relbranch_fixup() */
  251. regs->tpc = kcb->kprobe_orig_tnpc;
  252. retpc_fixup(regs, insn, (unsigned long) p->addr);
  253. regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
  254. kcb->kprobe_orig_tstate_pil);
  255. }
  256. static int __kprobes post_kprobe_handler(struct pt_regs *regs)
  257. {
  258. struct kprobe *cur = kprobe_running();
  259. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  260. if (!cur)
  261. return 0;
  262. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  263. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  264. cur->post_handler(cur, regs, 0);
  265. }
  266. resume_execution(cur, regs, kcb);
  267. /*Restore back the original saved kprobes variables and continue. */
  268. if (kcb->kprobe_status == KPROBE_REENTER) {
  269. restore_previous_kprobe(kcb);
  270. goto out;
  271. }
  272. reset_current_kprobe();
  273. out:
  274. preempt_enable_no_resched();
  275. return 1;
  276. }
  277. int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  278. {
  279. struct kprobe *cur = kprobe_running();
  280. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  281. const struct exception_table_entry *entry;
  282. switch(kcb->kprobe_status) {
  283. case KPROBE_HIT_SS:
  284. case KPROBE_REENTER:
  285. /*
  286. * We are here because the instruction being single
  287. * stepped caused a page fault. We reset the current
  288. * kprobe and the tpc points back to the probe address
  289. * and allow the page fault handler to continue as a
  290. * normal page fault.
  291. */
  292. regs->tpc = (unsigned long)cur->addr;
  293. regs->tnpc = kcb->kprobe_orig_tnpc;
  294. regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
  295. kcb->kprobe_orig_tstate_pil);
  296. if (kcb->kprobe_status == KPROBE_REENTER)
  297. restore_previous_kprobe(kcb);
  298. else
  299. reset_current_kprobe();
  300. preempt_enable_no_resched();
  301. break;
  302. case KPROBE_HIT_ACTIVE:
  303. case KPROBE_HIT_SSDONE:
  304. /*
  305. * We increment the nmissed count for accounting,
  306. * we can also use npre/npostfault count for accouting
  307. * these specific fault cases.
  308. */
  309. kprobes_inc_nmissed_count(cur);
  310. /*
  311. * We come here because instructions in the pre/post
  312. * handler caused the page_fault, this could happen
  313. * if handler tries to access user space by
  314. * copy_from_user(), get_user() etc. Let the
  315. * user-specified handler try to fix it first.
  316. */
  317. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  318. return 1;
  319. /*
  320. * In case the user-specified fault handler returned
  321. * zero, try to fix up.
  322. */
  323. entry = search_exception_tables(regs->tpc);
  324. if (entry) {
  325. regs->tpc = entry->fixup;
  326. regs->tnpc = regs->tpc + 4;
  327. return 1;
  328. }
  329. /*
  330. * fixup_exception() could not handle it,
  331. * Let do_page_fault() fix it.
  332. */
  333. break;
  334. default:
  335. break;
  336. }
  337. return 0;
  338. }
  339. /*
  340. * Wrapper routine to for handling exceptions.
  341. */
  342. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  343. unsigned long val, void *data)
  344. {
  345. struct die_args *args = (struct die_args *)data;
  346. int ret = NOTIFY_DONE;
  347. if (args->regs && user_mode(args->regs))
  348. return ret;
  349. switch (val) {
  350. case DIE_DEBUG:
  351. if (kprobe_handler(args->regs))
  352. ret = NOTIFY_STOP;
  353. break;
  354. case DIE_DEBUG_2:
  355. if (post_kprobe_handler(args->regs))
  356. ret = NOTIFY_STOP;
  357. break;
  358. default:
  359. break;
  360. }
  361. return ret;
  362. }
  363. asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
  364. struct pt_regs *regs)
  365. {
  366. BUG_ON(trap_level != 0x170 && trap_level != 0x171);
  367. if (user_mode(regs)) {
  368. local_irq_enable();
  369. bad_trap(regs, trap_level);
  370. return;
  371. }
  372. /* trap_level == 0x170 --> ta 0x70
  373. * trap_level == 0x171 --> ta 0x71
  374. */
  375. if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
  376. (trap_level == 0x170) ? "debug" : "debug_2",
  377. regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
  378. bad_trap(regs, trap_level);
  379. }
  380. /* Jprobes support. */
  381. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  382. {
  383. struct jprobe *jp = container_of(p, struct jprobe, kp);
  384. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  385. memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
  386. regs->tpc = (unsigned long) jp->entry;
  387. regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
  388. regs->tstate |= TSTATE_PIL;
  389. return 1;
  390. }
  391. void __kprobes jprobe_return(void)
  392. {
  393. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  394. register unsigned long orig_fp asm("g1");
  395. orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
  396. __asm__ __volatile__("\n"
  397. "1: cmp %%sp, %0\n\t"
  398. "blu,a,pt %%xcc, 1b\n\t"
  399. " restore\n\t"
  400. ".globl jprobe_return_trap_instruction\n"
  401. "jprobe_return_trap_instruction:\n\t"
  402. "ta 0x70"
  403. : /* no outputs */
  404. : "r" (orig_fp));
  405. }
  406. extern void jprobe_return_trap_instruction(void);
  407. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  408. {
  409. u32 *addr = (u32 *) regs->tpc;
  410. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  411. if (addr == (u32 *) jprobe_return_trap_instruction) {
  412. memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
  413. preempt_enable_no_resched();
  414. return 1;
  415. }
  416. return 0;
  417. }
  418. /* The value stored in the return address register is actually 2
  419. * instructions before where the callee will return to.
  420. * Sequences usually look something like this
  421. *
  422. * call some_function <--- return register points here
  423. * nop <--- call delay slot
  424. * whatever <--- where callee returns to
  425. *
  426. * To keep trampoline_probe_handler logic simpler, we normalize the
  427. * value kept in ri->ret_addr so we don't need to keep adjusting it
  428. * back and forth.
  429. */
  430. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  431. struct pt_regs *regs)
  432. {
  433. ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
  434. /* Replace the return addr with trampoline addr */
  435. regs->u_regs[UREG_RETPC] =
  436. ((unsigned long)kretprobe_trampoline) - 8;
  437. }
  438. /*
  439. * Called when the probe at kretprobe trampoline is hit
  440. */
  441. int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
  442. {
  443. struct kretprobe_instance *ri = NULL;
  444. struct hlist_head *head, empty_rp;
  445. struct hlist_node *node, *tmp;
  446. unsigned long flags, orig_ret_address = 0;
  447. unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
  448. INIT_HLIST_HEAD(&empty_rp);
  449. kretprobe_hash_lock(current, &head, &flags);
  450. /*
  451. * It is possible to have multiple instances associated with a given
  452. * task either because an multiple functions in the call path
  453. * have a return probe installed on them, and/or more than one return
  454. * return probe was registered for a target function.
  455. *
  456. * We can handle this because:
  457. * - instances are always inserted at the head of the list
  458. * - when multiple return probes are registered for the same
  459. * function, the first instance's ret_addr will point to the
  460. * real return address, and all the rest will point to
  461. * kretprobe_trampoline
  462. */
  463. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  464. if (ri->task != current)
  465. /* another task is sharing our hash bucket */
  466. continue;
  467. if (ri->rp && ri->rp->handler)
  468. ri->rp->handler(ri, regs);
  469. orig_ret_address = (unsigned long)ri->ret_addr;
  470. recycle_rp_inst(ri, &empty_rp);
  471. if (orig_ret_address != trampoline_address)
  472. /*
  473. * This is the real return address. Any other
  474. * instances associated with this task are for
  475. * other calls deeper on the call stack
  476. */
  477. break;
  478. }
  479. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  480. regs->tpc = orig_ret_address;
  481. regs->tnpc = orig_ret_address + 4;
  482. reset_current_kprobe();
  483. kretprobe_hash_unlock(current, &flags);
  484. preempt_enable_no_resched();
  485. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  486. hlist_del(&ri->hlist);
  487. kfree(ri);
  488. }
  489. /*
  490. * By returning a non-zero value, we are telling
  491. * kprobe_handler() that we don't want the post_handler
  492. * to run (and have re-enabled preemption)
  493. */
  494. return 1;
  495. }
  496. void kretprobe_trampoline_holder(void)
  497. {
  498. asm volatile(".global kretprobe_trampoline\n"
  499. "kretprobe_trampoline:\n"
  500. "\tnop\n"
  501. "\tnop\n");
  502. }
  503. static struct kprobe trampoline_p = {
  504. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  505. .pre_handler = trampoline_probe_handler
  506. };
  507. int __init arch_init_kprobes(void)
  508. {
  509. return register_kprobe(&trampoline_p);
  510. }
  511. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  512. {
  513. if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
  514. return 1;
  515. return 0;
  516. }