kprobes.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * arch/arm64/kernel/probes/kprobes.c
  3. *
  4. * Kprobes support for ARM64
  5. *
  6. * Copyright (C) 2013 Linaro Limited.
  7. * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. */
  19. #include <linux/kasan.h>
  20. #include <linux/kernel.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/extable.h>
  23. #include <linux/slab.h>
  24. #include <linux/stop_machine.h>
  25. #include <linux/stringify.h>
  26. #include <asm/traps.h>
  27. #include <asm/ptrace.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/debug-monitors.h>
  30. #include <asm/system_misc.h>
  31. #include <asm/insn.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/irq.h>
  34. #include <asm/sections.h>
  35. #include "decode-insn.h"
  36. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  37. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  38. static void __kprobes
  39. post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
  40. static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  41. {
  42. /* prepare insn slot */
  43. p->ainsn.insn[0] = cpu_to_le32(p->opcode);
  44. flush_icache_range((uintptr_t) (p->ainsn.insn),
  45. (uintptr_t) (p->ainsn.insn) +
  46. MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  47. /*
  48. * Needs restoring of return address after stepping xol.
  49. */
  50. p->ainsn.restore = (unsigned long) p->addr +
  51. sizeof(kprobe_opcode_t);
  52. }
  53. static void __kprobes arch_prepare_simulate(struct kprobe *p)
  54. {
  55. /* This instructions is not executed xol. No need to adjust the PC */
  56. p->ainsn.restore = 0;
  57. }
  58. static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  59. {
  60. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  61. if (p->ainsn.handler)
  62. p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
  63. /* single step simulated, now go for post processing */
  64. post_kprobe_handler(kcb, regs);
  65. }
  66. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  67. {
  68. unsigned long probe_addr = (unsigned long)p->addr;
  69. extern char __start_rodata[];
  70. extern char __end_rodata[];
  71. if (probe_addr & 0x3)
  72. return -EINVAL;
  73. /* copy instruction */
  74. p->opcode = le32_to_cpu(*p->addr);
  75. if (in_exception_text(probe_addr))
  76. return -EINVAL;
  77. if (probe_addr >= (unsigned long) __start_rodata &&
  78. probe_addr <= (unsigned long) __end_rodata)
  79. return -EINVAL;
  80. /* decode instruction */
  81. switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
  82. case INSN_REJECTED: /* insn not supported */
  83. return -EINVAL;
  84. case INSN_GOOD_NO_SLOT: /* insn need simulation */
  85. p->ainsn.insn = NULL;
  86. break;
  87. case INSN_GOOD: /* instruction uses slot */
  88. p->ainsn.insn = get_insn_slot();
  89. if (!p->ainsn.insn)
  90. return -ENOMEM;
  91. break;
  92. };
  93. /* prepare the instruction */
  94. if (p->ainsn.insn)
  95. arch_prepare_ss_slot(p);
  96. else
  97. arch_prepare_simulate(p);
  98. return 0;
  99. }
  100. static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
  101. {
  102. void *addrs[1];
  103. u32 insns[1];
  104. addrs[0] = (void *)addr;
  105. insns[0] = (u32)opcode;
  106. return aarch64_insn_patch_text(addrs, insns, 1);
  107. }
  108. /* arm kprobe: install breakpoint in text */
  109. void __kprobes arch_arm_kprobe(struct kprobe *p)
  110. {
  111. patch_text(p->addr, BRK64_OPCODE_KPROBES);
  112. }
  113. /* disarm kprobe: remove breakpoint from text */
  114. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  115. {
  116. patch_text(p->addr, p->opcode);
  117. }
  118. void __kprobes arch_remove_kprobe(struct kprobe *p)
  119. {
  120. if (p->ainsn.insn) {
  121. free_insn_slot(p->ainsn.insn, 0);
  122. p->ainsn.insn = NULL;
  123. }
  124. }
  125. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  126. {
  127. kcb->prev_kprobe.kp = kprobe_running();
  128. kcb->prev_kprobe.status = kcb->kprobe_status;
  129. }
  130. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  131. {
  132. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  133. kcb->kprobe_status = kcb->prev_kprobe.status;
  134. }
  135. static void __kprobes set_current_kprobe(struct kprobe *p)
  136. {
  137. __this_cpu_write(current_kprobe, p);
  138. }
  139. /*
  140. * When PSTATE.D is set (masked), then software step exceptions can not be
  141. * generated.
  142. * SPSR's D bit shows the value of PSTATE.D immediately before the
  143. * exception was taken. PSTATE.D is set while entering into any exception
  144. * mode, however software clears it for any normal (none-debug-exception)
  145. * mode in the exception entry. Therefore, when we are entering into kprobe
  146. * breakpoint handler from any normal mode then SPSR.D bit is already
  147. * cleared, however it is set when we are entering from any debug exception
  148. * mode.
  149. * Since we always need to generate single step exception after a kprobe
  150. * breakpoint exception therefore we need to clear it unconditionally, when
  151. * we become sure that the current breakpoint exception is for kprobe.
  152. */
  153. static void __kprobes
  154. spsr_set_debug_flag(struct pt_regs *regs, int mask)
  155. {
  156. unsigned long spsr = regs->pstate;
  157. if (mask)
  158. spsr |= PSR_D_BIT;
  159. else
  160. spsr &= ~PSR_D_BIT;
  161. regs->pstate = spsr;
  162. }
  163. /*
  164. * Interrupts need to be disabled before single-step mode is set, and not
  165. * reenabled until after single-step mode ends.
  166. * Without disabling interrupt on local CPU, there is a chance of
  167. * interrupt occurrence in the period of exception return and start of
  168. * out-of-line single-step, that result in wrongly single stepping
  169. * into the interrupt handler.
  170. */
  171. static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
  172. struct pt_regs *regs)
  173. {
  174. kcb->saved_irqflag = regs->pstate;
  175. regs->pstate |= PSR_I_BIT;
  176. }
  177. static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
  178. struct pt_regs *regs)
  179. {
  180. if (kcb->saved_irqflag & PSR_I_BIT)
  181. regs->pstate |= PSR_I_BIT;
  182. else
  183. regs->pstate &= ~PSR_I_BIT;
  184. }
  185. static void __kprobes
  186. set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
  187. {
  188. kcb->ss_ctx.ss_pending = true;
  189. kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
  190. }
  191. static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
  192. {
  193. kcb->ss_ctx.ss_pending = false;
  194. kcb->ss_ctx.match_addr = 0;
  195. }
  196. static void __kprobes setup_singlestep(struct kprobe *p,
  197. struct pt_regs *regs,
  198. struct kprobe_ctlblk *kcb, int reenter)
  199. {
  200. unsigned long slot;
  201. if (reenter) {
  202. save_previous_kprobe(kcb);
  203. set_current_kprobe(p);
  204. kcb->kprobe_status = KPROBE_REENTER;
  205. } else {
  206. kcb->kprobe_status = KPROBE_HIT_SS;
  207. }
  208. if (p->ainsn.insn) {
  209. /* prepare for single stepping */
  210. slot = (unsigned long)p->ainsn.insn;
  211. set_ss_context(kcb, slot); /* mark pending ss */
  212. spsr_set_debug_flag(regs, 0);
  213. /* IRQs and single stepping do not mix well. */
  214. kprobes_save_local_irqflag(kcb, regs);
  215. kernel_enable_single_step(regs);
  216. instruction_pointer_set(regs, slot);
  217. } else {
  218. /* insn simulation */
  219. arch_simulate_insn(p, regs);
  220. }
  221. }
  222. static int __kprobes reenter_kprobe(struct kprobe *p,
  223. struct pt_regs *regs,
  224. struct kprobe_ctlblk *kcb)
  225. {
  226. switch (kcb->kprobe_status) {
  227. case KPROBE_HIT_SSDONE:
  228. case KPROBE_HIT_ACTIVE:
  229. kprobes_inc_nmissed_count(p);
  230. setup_singlestep(p, regs, kcb, 1);
  231. break;
  232. case KPROBE_HIT_SS:
  233. case KPROBE_REENTER:
  234. pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
  235. dump_kprobe(p);
  236. BUG();
  237. break;
  238. default:
  239. WARN_ON(1);
  240. return 0;
  241. }
  242. return 1;
  243. }
  244. static void __kprobes
  245. post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
  246. {
  247. struct kprobe *cur = kprobe_running();
  248. if (!cur)
  249. return;
  250. /* return addr restore if non-branching insn */
  251. if (cur->ainsn.restore != 0)
  252. instruction_pointer_set(regs, cur->ainsn.restore);
  253. /* restore back original saved kprobe variables and continue */
  254. if (kcb->kprobe_status == KPROBE_REENTER) {
  255. restore_previous_kprobe(kcb);
  256. return;
  257. }
  258. /* call post handler */
  259. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  260. if (cur->post_handler) {
  261. /* post_handler can hit breakpoint and single step
  262. * again, so we enable D-flag for recursive exception.
  263. */
  264. cur->post_handler(cur, regs, 0);
  265. }
  266. reset_current_kprobe();
  267. }
  268. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
  269. {
  270. struct kprobe *cur = kprobe_running();
  271. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  272. switch (kcb->kprobe_status) {
  273. case KPROBE_HIT_SS:
  274. case KPROBE_REENTER:
  275. /*
  276. * We are here because the instruction being single
  277. * stepped caused a page fault. We reset the current
  278. * kprobe and the ip points back to the probe address
  279. * and allow the page fault handler to continue as a
  280. * normal page fault.
  281. */
  282. instruction_pointer_set(regs, (unsigned long) cur->addr);
  283. if (!instruction_pointer(regs))
  284. BUG();
  285. kernel_disable_single_step();
  286. if (kcb->kprobe_status == KPROBE_REENTER)
  287. restore_previous_kprobe(kcb);
  288. else
  289. reset_current_kprobe();
  290. break;
  291. case KPROBE_HIT_ACTIVE:
  292. case KPROBE_HIT_SSDONE:
  293. /*
  294. * We increment the nmissed count for accounting,
  295. * we can also use npre/npostfault count for accounting
  296. * these specific fault cases.
  297. */
  298. kprobes_inc_nmissed_count(cur);
  299. /*
  300. * We come here because instructions in the pre/post
  301. * handler caused the page_fault, this could happen
  302. * if handler tries to access user space by
  303. * copy_from_user(), get_user() etc. Let the
  304. * user-specified handler try to fix it first.
  305. */
  306. if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
  307. return 1;
  308. /*
  309. * In case the user-specified fault handler returned
  310. * zero, try to fix up.
  311. */
  312. if (fixup_exception(regs))
  313. return 1;
  314. }
  315. return 0;
  316. }
  317. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  318. unsigned long val, void *data)
  319. {
  320. return NOTIFY_DONE;
  321. }
  322. static void __kprobes kprobe_handler(struct pt_regs *regs)
  323. {
  324. struct kprobe *p, *cur_kprobe;
  325. struct kprobe_ctlblk *kcb;
  326. unsigned long addr = instruction_pointer(regs);
  327. kcb = get_kprobe_ctlblk();
  328. cur_kprobe = kprobe_running();
  329. p = get_kprobe((kprobe_opcode_t *) addr);
  330. if (p) {
  331. if (cur_kprobe) {
  332. if (reenter_kprobe(p, regs, kcb))
  333. return;
  334. } else {
  335. /* Probe hit */
  336. set_current_kprobe(p);
  337. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  338. /*
  339. * If we have no pre-handler or it returned 0, we
  340. * continue with normal processing. If we have a
  341. * pre-handler and it returned non-zero, it prepped
  342. * for calling the break_handler below on re-entry,
  343. * so get out doing nothing more here.
  344. *
  345. * pre_handler can hit a breakpoint and can step thru
  346. * before return, keep PSTATE D-flag enabled until
  347. * pre_handler return back.
  348. */
  349. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  350. setup_singlestep(p, regs, kcb, 0);
  351. return;
  352. }
  353. }
  354. } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
  355. BRK64_OPCODE_KPROBES) && cur_kprobe) {
  356. /* We probably hit a jprobe. Call its break handler. */
  357. if (cur_kprobe->break_handler &&
  358. cur_kprobe->break_handler(cur_kprobe, regs)) {
  359. setup_singlestep(cur_kprobe, regs, kcb, 0);
  360. return;
  361. }
  362. }
  363. /*
  364. * The breakpoint instruction was removed right
  365. * after we hit it. Another cpu has removed
  366. * either a probepoint or a debugger breakpoint
  367. * at this address. In either case, no further
  368. * handling of this interrupt is appropriate.
  369. * Return back to original instruction, and continue.
  370. */
  371. }
  372. static int __kprobes
  373. kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
  374. {
  375. if ((kcb->ss_ctx.ss_pending)
  376. && (kcb->ss_ctx.match_addr == addr)) {
  377. clear_ss_context(kcb); /* clear pending ss */
  378. return DBG_HOOK_HANDLED;
  379. }
  380. /* not ours, kprobes should ignore it */
  381. return DBG_HOOK_ERROR;
  382. }
  383. int __kprobes
  384. kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
  385. {
  386. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  387. int retval;
  388. /* return error if this is not our step */
  389. retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
  390. if (retval == DBG_HOOK_HANDLED) {
  391. kprobes_restore_local_irqflag(kcb, regs);
  392. kernel_disable_single_step();
  393. post_kprobe_handler(kcb, regs);
  394. }
  395. return retval;
  396. }
  397. int __kprobes
  398. kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
  399. {
  400. kprobe_handler(regs);
  401. return DBG_HOOK_HANDLED;
  402. }
  403. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  404. {
  405. struct jprobe *jp = container_of(p, struct jprobe, kp);
  406. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  407. kcb->jprobe_saved_regs = *regs;
  408. /*
  409. * Since we can't be sure where in the stack frame "stacked"
  410. * pass-by-value arguments are stored we just don't try to
  411. * duplicate any of the stack. Do not use jprobes on functions that
  412. * use more than 64 bytes (after padding each to an 8 byte boundary)
  413. * of arguments, or pass individual arguments larger than 16 bytes.
  414. */
  415. instruction_pointer_set(regs, (unsigned long) jp->entry);
  416. preempt_disable();
  417. pause_graph_tracing();
  418. return 1;
  419. }
  420. void __kprobes jprobe_return(void)
  421. {
  422. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  423. /*
  424. * Jprobe handler return by entering break exception,
  425. * encoded same as kprobe, but with following conditions
  426. * -a special PC to identify it from the other kprobes.
  427. * -restore stack addr to original saved pt_regs
  428. */
  429. asm volatile(" mov sp, %0 \n"
  430. "jprobe_return_break: brk %1 \n"
  431. :
  432. : "r" (kcb->jprobe_saved_regs.sp),
  433. "I" (BRK64_ESR_KPROBES)
  434. : "memory");
  435. unreachable();
  436. }
  437. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  438. {
  439. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  440. long stack_addr = kcb->jprobe_saved_regs.sp;
  441. long orig_sp = kernel_stack_pointer(regs);
  442. struct jprobe *jp = container_of(p, struct jprobe, kp);
  443. extern const char jprobe_return_break[];
  444. if (instruction_pointer(regs) != (u64) jprobe_return_break)
  445. return 0;
  446. if (orig_sp != stack_addr) {
  447. struct pt_regs *saved_regs =
  448. (struct pt_regs *)kcb->jprobe_saved_regs.sp;
  449. pr_err("current sp %lx does not match saved sp %lx\n",
  450. orig_sp, stack_addr);
  451. pr_err("Saved registers for jprobe %p\n", jp);
  452. show_regs(saved_regs);
  453. pr_err("Current registers\n");
  454. show_regs(regs);
  455. BUG();
  456. }
  457. unpause_graph_tracing();
  458. *regs = kcb->jprobe_saved_regs;
  459. preempt_enable_no_resched();
  460. return 1;
  461. }
  462. bool arch_within_kprobe_blacklist(unsigned long addr)
  463. {
  464. if ((addr >= (unsigned long)__kprobes_text_start &&
  465. addr < (unsigned long)__kprobes_text_end) ||
  466. (addr >= (unsigned long)__entry_text_start &&
  467. addr < (unsigned long)__entry_text_end) ||
  468. (addr >= (unsigned long)__idmap_text_start &&
  469. addr < (unsigned long)__idmap_text_end) ||
  470. !!search_exception_tables(addr))
  471. return true;
  472. if (!is_kernel_in_hyp_mode()) {
  473. if ((addr >= (unsigned long)__hyp_text_start &&
  474. addr < (unsigned long)__hyp_text_end) ||
  475. (addr >= (unsigned long)__hyp_idmap_text_start &&
  476. addr < (unsigned long)__hyp_idmap_text_end))
  477. return true;
  478. }
  479. return false;
  480. }
  481. void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
  482. {
  483. struct kretprobe_instance *ri = NULL;
  484. struct hlist_head *head, empty_rp;
  485. struct hlist_node *tmp;
  486. unsigned long flags, orig_ret_address = 0;
  487. unsigned long trampoline_address =
  488. (unsigned long)&kretprobe_trampoline;
  489. kprobe_opcode_t *correct_ret_addr = NULL;
  490. INIT_HLIST_HEAD(&empty_rp);
  491. kretprobe_hash_lock(current, &head, &flags);
  492. /*
  493. * It is possible to have multiple instances associated with a given
  494. * task either because multiple functions in the call path have
  495. * return probes installed on them, and/or more than one
  496. * return probe was registered for a target function.
  497. *
  498. * We can handle this because:
  499. * - instances are always pushed into the head of the list
  500. * - when multiple return probes are registered for the same
  501. * function, the (chronologically) first instance's ret_addr
  502. * will be the real return address, and all the rest will
  503. * point to kretprobe_trampoline.
  504. */
  505. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  506. if (ri->task != current)
  507. /* another task is sharing our hash bucket */
  508. continue;
  509. orig_ret_address = (unsigned long)ri->ret_addr;
  510. if (orig_ret_address != trampoline_address)
  511. /*
  512. * This is the real return address. Any other
  513. * instances associated with this task are for
  514. * other calls deeper on the call stack
  515. */
  516. break;
  517. }
  518. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  519. correct_ret_addr = ri->ret_addr;
  520. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  521. if (ri->task != current)
  522. /* another task is sharing our hash bucket */
  523. continue;
  524. orig_ret_address = (unsigned long)ri->ret_addr;
  525. if (ri->rp && ri->rp->handler) {
  526. __this_cpu_write(current_kprobe, &ri->rp->kp);
  527. get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
  528. ri->ret_addr = correct_ret_addr;
  529. ri->rp->handler(ri, regs);
  530. __this_cpu_write(current_kprobe, NULL);
  531. }
  532. recycle_rp_inst(ri, &empty_rp);
  533. if (orig_ret_address != trampoline_address)
  534. /*
  535. * This is the real return address. Any other
  536. * instances associated with this task are for
  537. * other calls deeper on the call stack
  538. */
  539. break;
  540. }
  541. kretprobe_hash_unlock(current, &flags);
  542. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  543. hlist_del(&ri->hlist);
  544. kfree(ri);
  545. }
  546. return (void *)orig_ret_address;
  547. }
  548. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  549. struct pt_regs *regs)
  550. {
  551. ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
  552. /* replace return addr (x30) with trampoline */
  553. regs->regs[30] = (long)&kretprobe_trampoline;
  554. }
  555. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  556. {
  557. return 0;
  558. }
  559. int __init arch_init_kprobes(void)
  560. {
  561. return 0;
  562. }