kprobes.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * Kernel Probes (KProbes)
  3. * arch/mips/kernel/kprobes.c
  4. *
  5. * Copyright 2006 Sony Corp.
  6. * Copyright 2010 Cavium Networks
  7. *
  8. * Some portions copied from the powerpc version.
  9. *
  10. * Copyright (C) IBM Corporation, 2002, 2004
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; version 2 of the License.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. #include <linux/kprobes.h>
  26. #include <linux/preempt.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/kdebug.h>
  29. #include <linux/slab.h>
  30. #include <asm/ptrace.h>
  31. #include <asm/branch.h>
  32. #include <asm/break.h>
  33. #include <asm/inst.h>
  34. static const union mips_instruction breakpoint_insn = {
  35. .b_format = {
  36. .opcode = spec_op,
  37. .code = BRK_KPROBE_BP,
  38. .func = break_op
  39. }
  40. };
  41. static const union mips_instruction breakpoint2_insn = {
  42. .b_format = {
  43. .opcode = spec_op,
  44. .code = BRK_KPROBE_SSTEPBP,
  45. .func = break_op
  46. }
  47. };
  48. DEFINE_PER_CPU(struct kprobe *, current_kprobe);
  49. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  50. static int __kprobes insn_has_delayslot(union mips_instruction insn)
  51. {
  52. switch (insn.i_format.opcode) {
  53. /*
  54. * This group contains:
  55. * jr and jalr are in r_format format.
  56. */
  57. case spec_op:
  58. switch (insn.r_format.func) {
  59. case jr_op:
  60. case jalr_op:
  61. break;
  62. default:
  63. goto insn_ok;
  64. }
  65. /*
  66. * This group contains:
  67. * bltz_op, bgez_op, bltzl_op, bgezl_op,
  68. * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  69. */
  70. case bcond_op:
  71. /*
  72. * These are unconditional and in j_format.
  73. */
  74. case jal_op:
  75. case j_op:
  76. /*
  77. * These are conditional and in i_format.
  78. */
  79. case beq_op:
  80. case beql_op:
  81. case bne_op:
  82. case bnel_op:
  83. case blez_op:
  84. case blezl_op:
  85. case bgtz_op:
  86. case bgtzl_op:
  87. /*
  88. * These are the FPA/cp1 branch instructions.
  89. */
  90. case cop1_op:
  91. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  92. case lwc2_op: /* This is bbit0 on Octeon */
  93. case ldc2_op: /* This is bbit032 on Octeon */
  94. case swc2_op: /* This is bbit1 on Octeon */
  95. case sdc2_op: /* This is bbit132 on Octeon */
  96. #endif
  97. return 1;
  98. default:
  99. break;
  100. }
  101. insn_ok:
  102. return 0;
  103. }
  104. /*
  105. * insn_has_ll_or_sc function checks whether instruction is ll or sc
  106. * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
  107. * so we need to prevent it and refuse kprobes insertion for such
  108. * instructions; cannot do much about breakpoint in the middle of
  109. * ll/sc pair; it is upto user to avoid those places
  110. */
  111. static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
  112. {
  113. int ret = 0;
  114. switch (insn.i_format.opcode) {
  115. case ll_op:
  116. case lld_op:
  117. case sc_op:
  118. case scd_op:
  119. ret = 1;
  120. break;
  121. default:
  122. break;
  123. }
  124. return ret;
  125. }
  126. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  127. {
  128. union mips_instruction insn;
  129. union mips_instruction prev_insn;
  130. int ret = 0;
  131. insn = p->addr[0];
  132. if (insn_has_ll_or_sc(insn)) {
  133. pr_notice("Kprobes for ll and sc instructions are not"
  134. "supported\n");
  135. ret = -EINVAL;
  136. goto out;
  137. }
  138. if ((probe_kernel_read(&prev_insn, p->addr - 1,
  139. sizeof(mips_instruction)) == 0) &&
  140. insn_has_delayslot(prev_insn)) {
  141. pr_notice("Kprobes for branch delayslot are not supported\n");
  142. ret = -EINVAL;
  143. goto out;
  144. }
  145. /* insn: must be on special executable page on mips. */
  146. p->ainsn.insn = get_insn_slot();
  147. if (!p->ainsn.insn) {
  148. ret = -ENOMEM;
  149. goto out;
  150. }
  151. /*
  152. * In the kprobe->ainsn.insn[] array we store the original
  153. * instruction at index zero and a break trap instruction at
  154. * index one.
  155. *
  156. * On MIPS arch if the instruction at probed address is a
  157. * branch instruction, we need to execute the instruction at
  158. * Branch Delayslot (BD) at the time of probe hit. As MIPS also
  159. * doesn't have single stepping support, the BD instruction can
  160. * not be executed in-line and it would be executed on SSOL slot
  161. * using a normal breakpoint instruction in the next slot.
  162. * So, read the instruction and save it for later execution.
  163. */
  164. if (insn_has_delayslot(insn))
  165. memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
  166. else
  167. memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
  168. p->ainsn.insn[1] = breakpoint2_insn;
  169. p->opcode = *p->addr;
  170. out:
  171. return ret;
  172. }
  173. void __kprobes arch_arm_kprobe(struct kprobe *p)
  174. {
  175. *p->addr = breakpoint_insn;
  176. flush_insn_slot(p);
  177. }
  178. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  179. {
  180. *p->addr = p->opcode;
  181. flush_insn_slot(p);
  182. }
  183. void __kprobes arch_remove_kprobe(struct kprobe *p)
  184. {
  185. free_insn_slot(p->ainsn.insn, 0);
  186. }
  187. static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
  188. {
  189. kcb->prev_kprobe.kp = kprobe_running();
  190. kcb->prev_kprobe.status = kcb->kprobe_status;
  191. kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
  192. kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
  193. kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
  194. }
  195. static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  196. {
  197. __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  198. kcb->kprobe_status = kcb->prev_kprobe.status;
  199. kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
  200. kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
  201. kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
  202. }
  203. static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  204. struct kprobe_ctlblk *kcb)
  205. {
  206. __get_cpu_var(current_kprobe) = p;
  207. kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
  208. kcb->kprobe_saved_epc = regs->cp0_epc;
  209. }
  210. /**
  211. * evaluate_branch_instrucion -
  212. *
  213. * Evaluate the branch instruction at probed address during probe hit. The
  214. * result of evaluation would be the updated epc. The insturction in delayslot
  215. * would actually be single stepped using a normal breakpoint) on SSOL slot.
  216. *
  217. * The result is also saved in the kprobe control block for later use,
  218. * in case we need to execute the delayslot instruction. The latter will be
  219. * false for NOP instruction in dealyslot and the branch-likely instructions
  220. * when the branch is taken. And for those cases we set a flag as
  221. * SKIP_DELAYSLOT in the kprobe control block
  222. */
  223. static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
  224. struct kprobe_ctlblk *kcb)
  225. {
  226. union mips_instruction insn = p->opcode;
  227. long epc;
  228. int ret = 0;
  229. epc = regs->cp0_epc;
  230. if (epc & 3)
  231. goto unaligned;
  232. if (p->ainsn.insn->word == 0)
  233. kcb->flags |= SKIP_DELAYSLOT;
  234. else
  235. kcb->flags &= ~SKIP_DELAYSLOT;
  236. ret = __compute_return_epc_for_insn(regs, insn);
  237. if (ret < 0)
  238. return ret;
  239. if (ret == BRANCH_LIKELY_TAKEN)
  240. kcb->flags |= SKIP_DELAYSLOT;
  241. kcb->target_epc = regs->cp0_epc;
  242. return 0;
  243. unaligned:
  244. pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
  245. force_sig(SIGBUS, current);
  246. return -EFAULT;
  247. }
  248. static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
  249. struct kprobe_ctlblk *kcb)
  250. {
  251. int ret = 0;
  252. regs->cp0_status &= ~ST0_IE;
  253. /* single step inline if the instruction is a break */
  254. if (p->opcode.word == breakpoint_insn.word ||
  255. p->opcode.word == breakpoint2_insn.word)
  256. regs->cp0_epc = (unsigned long)p->addr;
  257. else if (insn_has_delayslot(p->opcode)) {
  258. ret = evaluate_branch_instruction(p, regs, kcb);
  259. if (ret < 0) {
  260. pr_notice("Kprobes: Error in evaluating branch\n");
  261. return;
  262. }
  263. }
  264. regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
  265. }
  266. /*
  267. * Called after single-stepping. p->addr is the address of the
  268. * instruction whose first byte has been replaced by the "break 0"
  269. * instruction. To avoid the SMP problems that can occur when we
  270. * temporarily put back the original opcode to single-step, we
  271. * single-stepped a copy of the instruction. The address of this
  272. * copy is p->ainsn.insn.
  273. *
  274. * This function prepares to return from the post-single-step
  275. * breakpoint trap. In case of branch instructions, the target
  276. * epc to be restored.
  277. */
  278. static void __kprobes resume_execution(struct kprobe *p,
  279. struct pt_regs *regs,
  280. struct kprobe_ctlblk *kcb)
  281. {
  282. if (insn_has_delayslot(p->opcode))
  283. regs->cp0_epc = kcb->target_epc;
  284. else {
  285. unsigned long orig_epc = kcb->kprobe_saved_epc;
  286. regs->cp0_epc = orig_epc + 4;
  287. }
  288. }
  289. static int __kprobes kprobe_handler(struct pt_regs *regs)
  290. {
  291. struct kprobe *p;
  292. int ret = 0;
  293. kprobe_opcode_t *addr;
  294. struct kprobe_ctlblk *kcb;
  295. addr = (kprobe_opcode_t *) regs->cp0_epc;
  296. /*
  297. * We don't want to be preempted for the entire
  298. * duration of kprobe processing
  299. */
  300. preempt_disable();
  301. kcb = get_kprobe_ctlblk();
  302. /* Check we're not actually recursing */
  303. if (kprobe_running()) {
  304. p = get_kprobe(addr);
  305. if (p) {
  306. if (kcb->kprobe_status == KPROBE_HIT_SS &&
  307. p->ainsn.insn->word == breakpoint_insn.word) {
  308. regs->cp0_status &= ~ST0_IE;
  309. regs->cp0_status |= kcb->kprobe_saved_SR;
  310. goto no_kprobe;
  311. }
  312. /*
  313. * We have reentered the kprobe_handler(), since
  314. * another probe was hit while within the handler.
  315. * We here save the original kprobes variables and
  316. * just single step on the instruction of the new probe
  317. * without calling any user handlers.
  318. */
  319. save_previous_kprobe(kcb);
  320. set_current_kprobe(p, regs, kcb);
  321. kprobes_inc_nmissed_count(p);
  322. prepare_singlestep(p, regs, kcb);
  323. kcb->kprobe_status = KPROBE_REENTER;
  324. if (kcb->flags & SKIP_DELAYSLOT) {
  325. resume_execution(p, regs, kcb);
  326. restore_previous_kprobe(kcb);
  327. preempt_enable_no_resched();
  328. }
  329. return 1;
  330. } else {
  331. if (addr->word != breakpoint_insn.word) {
  332. /*
  333. * The breakpoint instruction was removed by
  334. * another cpu right after we hit, no further
  335. * handling of this interrupt is appropriate
  336. */
  337. ret = 1;
  338. goto no_kprobe;
  339. }
  340. p = __get_cpu_var(current_kprobe);
  341. if (p->break_handler && p->break_handler(p, regs))
  342. goto ss_probe;
  343. }
  344. goto no_kprobe;
  345. }
  346. p = get_kprobe(addr);
  347. if (!p) {
  348. if (addr->word != breakpoint_insn.word) {
  349. /*
  350. * The breakpoint instruction was removed right
  351. * after we hit it. Another cpu has removed
  352. * either a probepoint or a debugger breakpoint
  353. * at this address. In either case, no further
  354. * handling of this interrupt is appropriate.
  355. */
  356. ret = 1;
  357. }
  358. /* Not one of ours: let kernel handle it */
  359. goto no_kprobe;
  360. }
  361. set_current_kprobe(p, regs, kcb);
  362. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  363. if (p->pre_handler && p->pre_handler(p, regs)) {
  364. /* handler has already set things up, so skip ss setup */
  365. return 1;
  366. }
  367. ss_probe:
  368. prepare_singlestep(p, regs, kcb);
  369. if (kcb->flags & SKIP_DELAYSLOT) {
  370. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  371. if (p->post_handler)
  372. p->post_handler(p, regs, 0);
  373. resume_execution(p, regs, kcb);
  374. preempt_enable_no_resched();
  375. } else
  376. kcb->kprobe_status = KPROBE_HIT_SS;
  377. return 1;
  378. no_kprobe:
  379. preempt_enable_no_resched();
  380. return ret;
  381. }
  382. static inline int post_kprobe_handler(struct pt_regs *regs)
  383. {
  384. struct kprobe *cur = kprobe_running();
  385. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  386. if (!cur)
  387. return 0;
  388. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  389. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  390. cur->post_handler(cur, regs, 0);
  391. }
  392. resume_execution(cur, regs, kcb);
  393. regs->cp0_status |= kcb->kprobe_saved_SR;
  394. /* Restore back the original saved kprobes variables and continue. */
  395. if (kcb->kprobe_status == KPROBE_REENTER) {
  396. restore_previous_kprobe(kcb);
  397. goto out;
  398. }
  399. reset_current_kprobe();
  400. out:
  401. preempt_enable_no_resched();
  402. return 1;
  403. }
  404. static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  405. {
  406. struct kprobe *cur = kprobe_running();
  407. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  408. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  409. return 1;
  410. if (kcb->kprobe_status & KPROBE_HIT_SS) {
  411. resume_execution(cur, regs, kcb);
  412. regs->cp0_status |= kcb->kprobe_old_SR;
  413. reset_current_kprobe();
  414. preempt_enable_no_resched();
  415. }
  416. return 0;
  417. }
  418. /*
  419. * Wrapper routine for handling exceptions.
  420. */
  421. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  422. unsigned long val, void *data)
  423. {
  424. struct die_args *args = (struct die_args *)data;
  425. int ret = NOTIFY_DONE;
  426. switch (val) {
  427. case DIE_BREAK:
  428. if (kprobe_handler(args->regs))
  429. ret = NOTIFY_STOP;
  430. break;
  431. case DIE_SSTEPBP:
  432. if (post_kprobe_handler(args->regs))
  433. ret = NOTIFY_STOP;
  434. break;
  435. case DIE_PAGE_FAULT:
  436. /* kprobe_running() needs smp_processor_id() */
  437. preempt_disable();
  438. if (kprobe_running()
  439. && kprobe_fault_handler(args->regs, args->trapnr))
  440. ret = NOTIFY_STOP;
  441. preempt_enable();
  442. break;
  443. default:
  444. break;
  445. }
  446. return ret;
  447. }
  448. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  449. {
  450. struct jprobe *jp = container_of(p, struct jprobe, kp);
  451. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  452. kcb->jprobe_saved_regs = *regs;
  453. kcb->jprobe_saved_sp = regs->regs[29];
  454. memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
  455. MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
  456. regs->cp0_epc = (unsigned long)(jp->entry);
  457. return 1;
  458. }
  459. /* Defined in the inline asm below. */
  460. void jprobe_return_end(void);
  461. void __kprobes jprobe_return(void)
  462. {
  463. /* Assembler quirk necessitates this '0,code' business. */
  464. asm volatile(
  465. "break 0,%0\n\t"
  466. ".globl jprobe_return_end\n"
  467. "jprobe_return_end:\n"
  468. : : "n" (BRK_KPROBE_BP) : "memory");
  469. }
  470. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  471. {
  472. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  473. if (regs->cp0_epc >= (unsigned long)jprobe_return &&
  474. regs->cp0_epc <= (unsigned long)jprobe_return_end) {
  475. *regs = kcb->jprobe_saved_regs;
  476. memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
  477. MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
  478. preempt_enable_no_resched();
  479. return 1;
  480. }
  481. return 0;
  482. }
  483. /*
  484. * Function return probe trampoline:
  485. * - init_kprobes() establishes a probepoint here
  486. * - When the probed function returns, this probe causes the
  487. * handlers to fire
  488. */
  489. static void __used kretprobe_trampoline_holder(void)
  490. {
  491. asm volatile(
  492. ".set push\n\t"
  493. /* Keep the assembler from reordering and placing JR here. */
  494. ".set noreorder\n\t"
  495. "nop\n\t"
  496. ".global kretprobe_trampoline\n"
  497. "kretprobe_trampoline:\n\t"
  498. "nop\n\t"
  499. ".set pop"
  500. : : : "memory");
  501. }
  502. void kretprobe_trampoline(void);
  503. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  504. struct pt_regs *regs)
  505. {
  506. ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
  507. /* Replace the return addr with trampoline addr */
  508. regs->regs[31] = (unsigned long)kretprobe_trampoline;
  509. }
  510. /*
  511. * Called when the probe at kretprobe trampoline is hit
  512. */
  513. static int __kprobes trampoline_probe_handler(struct kprobe *p,
  514. struct pt_regs *regs)
  515. {
  516. struct kretprobe_instance *ri = NULL;
  517. struct hlist_head *head, empty_rp;
  518. struct hlist_node *node, *tmp;
  519. unsigned long flags, orig_ret_address = 0;
  520. unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
  521. INIT_HLIST_HEAD(&empty_rp);
  522. kretprobe_hash_lock(current, &head, &flags);
  523. /*
  524. * It is possible to have multiple instances associated with a given
  525. * task either because an multiple functions in the call path
  526. * have a return probe installed on them, and/or more than one return
  527. * return probe was registered for a target function.
  528. *
  529. * We can handle this because:
  530. * - instances are always inserted at the head of the list
  531. * - when multiple return probes are registered for the same
  532. * function, the first instance's ret_addr will point to the
  533. * real return address, and all the rest will point to
  534. * kretprobe_trampoline
  535. */
  536. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  537. if (ri->task != current)
  538. /* another task is sharing our hash bucket */
  539. continue;
  540. if (ri->rp && ri->rp->handler)
  541. ri->rp->handler(ri, regs);
  542. orig_ret_address = (unsigned long)ri->ret_addr;
  543. recycle_rp_inst(ri, &empty_rp);
  544. if (orig_ret_address != trampoline_address)
  545. /*
  546. * This is the real return address. Any other
  547. * instances associated with this task are for
  548. * other calls deeper on the call stack
  549. */
  550. break;
  551. }
  552. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  553. instruction_pointer(regs) = orig_ret_address;
  554. reset_current_kprobe();
  555. kretprobe_hash_unlock(current, &flags);
  556. preempt_enable_no_resched();
  557. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  558. hlist_del(&ri->hlist);
  559. kfree(ri);
  560. }
  561. /*
  562. * By returning a non-zero value, we are telling
  563. * kprobe_handler() that we don't want the post_handler
  564. * to run (and have re-enabled preemption)
  565. */
  566. return 1;
  567. }
  568. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  569. {
  570. if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
  571. return 1;
  572. return 0;
  573. }
  574. static struct kprobe trampoline_p = {
  575. .addr = (kprobe_opcode_t *)kretprobe_trampoline,
  576. .pre_handler = trampoline_probe_handler
  577. };
  578. int __init arch_init_kprobes(void)
  579. {
  580. return register_kprobe(&trampoline_p);
  581. }