kprobes.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /* MN10300 Kernel probes implementation
  2. *
  3. * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
  4. * Written by Mark Salter (msalter@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public Licence as published by
  8. * the Free Software Foundation; either version 2 of the Licence, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public Licence for more details.
  15. *
  16. * You should have received a copy of the GNU General Public Licence
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/kprobes.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/preempt.h>
  24. #include <linux/kdebug.h>
  25. #include <asm/cacheflush.h>
  26. struct kretprobe_blackpoint kretprobe_blacklist[] = { { NULL, NULL } };
  27. const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
  28. /* kprobe_status settings */
  29. #define KPROBE_HIT_ACTIVE 0x00000001
  30. #define KPROBE_HIT_SS 0x00000002
  31. static struct kprobe *cur_kprobe;
  32. static unsigned long cur_kprobe_orig_pc;
  33. static unsigned long cur_kprobe_next_pc;
  34. static int cur_kprobe_ss_flags;
  35. static unsigned long kprobe_status;
  36. static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
  37. static unsigned long cur_kprobe_bp_addr;
  38. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  39. /* singlestep flag bits */
  40. #define SINGLESTEP_BRANCH 1
  41. #define SINGLESTEP_PCREL 2
  42. #define READ_BYTE(p, valp) \
  43. do { *(u8 *)(valp) = *(u8 *)(p); } while (0)
  44. #define READ_WORD16(p, valp) \
  45. do { \
  46. READ_BYTE((p), (valp)); \
  47. READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \
  48. } while (0)
  49. #define READ_WORD32(p, valp) \
  50. do { \
  51. READ_BYTE((p), (valp)); \
  52. READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \
  53. READ_BYTE((u8 *)(p) + 2, (u8 *)(valp) + 2); \
  54. READ_BYTE((u8 *)(p) + 3, (u8 *)(valp) + 3); \
  55. } while (0)
  56. static const u8 mn10300_insn_sizes[256] =
  57. {
  58. /* 1 2 3 4 5 6 7 8 9 a b c d e f */
  59. 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
  60. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
  61. 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
  62. 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
  63. 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
  64. 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
  65. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
  66. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
  67. 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
  68. 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
  69. 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
  70. 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
  71. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
  72. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
  73. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
  74. 0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */
  75. };
  76. #define LT (1 << 0)
  77. #define GT (1 << 1)
  78. #define GE (1 << 2)
  79. #define LE (1 << 3)
  80. #define CS (1 << 4)
  81. #define HI (1 << 5)
  82. #define CC (1 << 6)
  83. #define LS (1 << 7)
  84. #define EQ (1 << 8)
  85. #define NE (1 << 9)
  86. #define RA (1 << 10)
  87. #define VC (1 << 11)
  88. #define VS (1 << 12)
  89. #define NC (1 << 13)
  90. #define NS (1 << 14)
  91. static const u16 cond_table[] = {
  92. /* V C N Z */
  93. /* 0 0 0 0 */ (NE | NC | CC | VC | GE | GT | HI),
  94. /* 0 0 0 1 */ (EQ | NC | CC | VC | GE | LE | LS),
  95. /* 0 0 1 0 */ (NE | NS | CC | VC | LT | LE | HI),
  96. /* 0 0 1 1 */ (EQ | NS | CC | VC | LT | LE | LS),
  97. /* 0 1 0 0 */ (NE | NC | CS | VC | GE | GT | LS),
  98. /* 0 1 0 1 */ (EQ | NC | CS | VC | GE | LE | LS),
  99. /* 0 1 1 0 */ (NE | NS | CS | VC | LT | LE | LS),
  100. /* 0 1 1 1 */ (EQ | NS | CS | VC | LT | LE | LS),
  101. /* 1 0 0 0 */ (NE | NC | CC | VS | LT | LE | HI),
  102. /* 1 0 0 1 */ (EQ | NC | CC | VS | LT | LE | LS),
  103. /* 1 0 1 0 */ (NE | NS | CC | VS | GE | GT | HI),
  104. /* 1 0 1 1 */ (EQ | NS | CC | VS | GE | LE | LS),
  105. /* 1 1 0 0 */ (NE | NC | CS | VS | LT | LE | LS),
  106. /* 1 1 0 1 */ (EQ | NC | CS | VS | LT | LE | LS),
  107. /* 1 1 1 0 */ (NE | NS | CS | VS | GE | GT | LS),
  108. /* 1 1 1 1 */ (EQ | NS | CS | VS | GE | LE | LS),
  109. };
  110. /*
  111. * Calculate what the PC will be after executing next instruction
  112. */
  113. static unsigned find_nextpc(struct pt_regs *regs, int *flags)
  114. {
  115. unsigned size;
  116. s8 x8;
  117. s16 x16;
  118. s32 x32;
  119. u8 opc, *pc, *sp, *next;
  120. next = 0;
  121. *flags = SINGLESTEP_PCREL;
  122. pc = (u8 *) regs->pc;
  123. sp = (u8 *) (regs + 1);
  124. opc = *pc;
  125. size = mn10300_insn_sizes[opc];
  126. if (size > 0) {
  127. next = pc + size;
  128. } else {
  129. switch (opc) {
  130. /* Bxx (d8,PC) */
  131. case 0xc0 ... 0xca:
  132. x8 = 2;
  133. if (cond_table[regs->epsw & 0xf] & (1 << (opc & 0xf)))
  134. x8 = (s8)pc[1];
  135. next = pc + x8;
  136. *flags |= SINGLESTEP_BRANCH;
  137. break;
  138. /* JMP (d16,PC) or CALL (d16,PC) */
  139. case 0xcc:
  140. case 0xcd:
  141. READ_WORD16(pc + 1, &x16);
  142. next = pc + x16;
  143. *flags |= SINGLESTEP_BRANCH;
  144. break;
  145. /* JMP (d32,PC) or CALL (d32,PC) */
  146. case 0xdc:
  147. case 0xdd:
  148. READ_WORD32(pc + 1, &x32);
  149. next = pc + x32;
  150. *flags |= SINGLESTEP_BRANCH;
  151. break;
  152. /* RETF */
  153. case 0xde:
  154. next = (u8 *)regs->mdr;
  155. *flags &= ~SINGLESTEP_PCREL;
  156. *flags |= SINGLESTEP_BRANCH;
  157. break;
  158. /* RET */
  159. case 0xdf:
  160. sp += pc[2];
  161. READ_WORD32(sp, &x32);
  162. next = (u8 *)x32;
  163. *flags &= ~SINGLESTEP_PCREL;
  164. *flags |= SINGLESTEP_BRANCH;
  165. break;
  166. case 0xf0:
  167. next = pc + 2;
  168. opc = pc[1];
  169. if (opc >= 0xf0 && opc <= 0xf7) {
  170. /* JMP (An) / CALLS (An) */
  171. switch (opc & 3) {
  172. case 0:
  173. next = (u8 *)regs->a0;
  174. break;
  175. case 1:
  176. next = (u8 *)regs->a1;
  177. break;
  178. case 2:
  179. next = (u8 *)regs->a2;
  180. break;
  181. case 3:
  182. next = (u8 *)regs->a3;
  183. break;
  184. }
  185. *flags &= ~SINGLESTEP_PCREL;
  186. *flags |= SINGLESTEP_BRANCH;
  187. } else if (opc == 0xfc) {
  188. /* RETS */
  189. READ_WORD32(sp, &x32);
  190. next = (u8 *)x32;
  191. *flags &= ~SINGLESTEP_PCREL;
  192. *flags |= SINGLESTEP_BRANCH;
  193. } else if (opc == 0xfd) {
  194. /* RTI */
  195. READ_WORD32(sp + 4, &x32);
  196. next = (u8 *)x32;
  197. *flags &= ~SINGLESTEP_PCREL;
  198. *flags |= SINGLESTEP_BRANCH;
  199. }
  200. break;
  201. /* potential 3-byte conditional branches */
  202. case 0xf8:
  203. next = pc + 3;
  204. opc = pc[1];
  205. if (opc >= 0xe8 && opc <= 0xeb &&
  206. (cond_table[regs->epsw & 0xf] &
  207. (1 << ((opc & 0xf) + 3)))
  208. ) {
  209. READ_BYTE(pc+2, &x8);
  210. next = pc + x8;
  211. *flags |= SINGLESTEP_BRANCH;
  212. }
  213. break;
  214. case 0xfa:
  215. if (pc[1] == 0xff) {
  216. /* CALLS (d16,PC) */
  217. READ_WORD16(pc + 2, &x16);
  218. next = pc + x16;
  219. } else
  220. next = pc + 4;
  221. *flags |= SINGLESTEP_BRANCH;
  222. break;
  223. case 0xfc:
  224. x32 = 6;
  225. if (pc[1] == 0xff) {
  226. /* CALLS (d32,PC) */
  227. READ_WORD32(pc + 2, &x32);
  228. }
  229. next = pc + x32;
  230. *flags |= SINGLESTEP_BRANCH;
  231. break;
  232. /* LXX (d8,PC) */
  233. /* SETLB - loads the next four bytes into the LIR reg */
  234. case 0xd0 ... 0xda:
  235. case 0xdb:
  236. panic("Can't singlestep Lxx/SETLB\n");
  237. break;
  238. }
  239. }
  240. return (unsigned)next;
  241. }
  242. /*
  243. * set up out of place singlestep of some branching instructions
  244. */
  245. static unsigned __kprobes singlestep_branch_setup(struct pt_regs *regs)
  246. {
  247. u8 opc, *pc, *sp, *next;
  248. next = NULL;
  249. pc = (u8 *) regs->pc;
  250. sp = (u8 *) (regs + 1);
  251. switch (pc[0]) {
  252. case 0xc0 ... 0xca: /* Bxx (d8,PC) */
  253. case 0xcc: /* JMP (d16,PC) */
  254. case 0xdc: /* JMP (d32,PC) */
  255. case 0xf8: /* Bxx (d8,PC) 3-byte version */
  256. /* don't really need to do anything except cause trap */
  257. next = pc;
  258. break;
  259. case 0xcd: /* CALL (d16,PC) */
  260. pc[1] = 5;
  261. pc[2] = 0;
  262. next = pc + 5;
  263. break;
  264. case 0xdd: /* CALL (d32,PC) */
  265. pc[1] = 7;
  266. pc[2] = 0;
  267. pc[3] = 0;
  268. pc[4] = 0;
  269. next = pc + 7;
  270. break;
  271. case 0xde: /* RETF */
  272. next = pc + 3;
  273. regs->mdr = (unsigned) next;
  274. break;
  275. case 0xdf: /* RET */
  276. sp += pc[2];
  277. next = pc + 3;
  278. *(unsigned *)sp = (unsigned) next;
  279. break;
  280. case 0xf0:
  281. next = pc + 2;
  282. opc = pc[1];
  283. if (opc >= 0xf0 && opc <= 0xf3) {
  284. /* CALLS (An) */
  285. /* use CALLS (d16,PC) to avoid mucking with An */
  286. pc[0] = 0xfa;
  287. pc[1] = 0xff;
  288. pc[2] = 4;
  289. pc[3] = 0;
  290. next = pc + 4;
  291. } else if (opc >= 0xf4 && opc <= 0xf7) {
  292. /* JMP (An) */
  293. next = pc;
  294. } else if (opc == 0xfc) {
  295. /* RETS */
  296. next = pc + 2;
  297. *(unsigned *) sp = (unsigned) next;
  298. } else if (opc == 0xfd) {
  299. /* RTI */
  300. next = pc + 2;
  301. *(unsigned *)(sp + 4) = (unsigned) next;
  302. }
  303. break;
  304. case 0xfa: /* CALLS (d16,PC) */
  305. pc[2] = 4;
  306. pc[3] = 0;
  307. next = pc + 4;
  308. break;
  309. case 0xfc: /* CALLS (d32,PC) */
  310. pc[2] = 6;
  311. pc[3] = 0;
  312. pc[4] = 0;
  313. pc[5] = 0;
  314. next = pc + 6;
  315. break;
  316. case 0xd0 ... 0xda: /* LXX (d8,PC) */
  317. case 0xdb: /* SETLB */
  318. panic("Can't singlestep Lxx/SETLB\n");
  319. }
  320. return (unsigned) next;
  321. }
  322. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  323. {
  324. return 0;
  325. }
  326. void __kprobes arch_copy_kprobe(struct kprobe *p)
  327. {
  328. memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
  329. }
  330. void __kprobes arch_arm_kprobe(struct kprobe *p)
  331. {
  332. *p->addr = BREAKPOINT_INSTRUCTION;
  333. flush_icache_range((unsigned long) p->addr,
  334. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  335. }
  336. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  337. {
  338. #ifndef CONFIG_MN10300_CACHE_SNOOP
  339. mn10300_dcache_flush();
  340. mn10300_icache_inv();
  341. #endif
  342. }
  343. void arch_remove_kprobe(struct kprobe *p)
  344. {
  345. }
  346. static inline
  347. void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
  348. {
  349. *p->addr = p->opcode;
  350. regs->pc = (unsigned long) p->addr;
  351. #ifndef CONFIG_MN10300_CACHE_SNOOP
  352. mn10300_dcache_flush();
  353. mn10300_icache_inv();
  354. #endif
  355. }
  356. static inline
  357. void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  358. {
  359. unsigned long nextpc;
  360. cur_kprobe_orig_pc = regs->pc;
  361. memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
  362. regs->pc = (unsigned long) cur_kprobe_ss_buf;
  363. nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
  364. if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
  365. cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
  366. else
  367. cur_kprobe_next_pc = nextpc;
  368. /* branching instructions need special handling */
  369. if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
  370. nextpc = singlestep_branch_setup(regs);
  371. cur_kprobe_bp_addr = nextpc;
  372. *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
  373. mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
  374. sizeof(cur_kprobe_ss_buf));
  375. mn10300_icache_inv();
  376. }
  377. static inline int __kprobes kprobe_handler(struct pt_regs *regs)
  378. {
  379. struct kprobe *p;
  380. int ret = 0;
  381. unsigned int *addr = (unsigned int *) regs->pc;
  382. /* We're in an interrupt, but this is clear and BUG()-safe. */
  383. preempt_disable();
  384. /* Check we're not actually recursing */
  385. if (kprobe_running()) {
  386. /* We *are* holding lock here, so this is safe.
  387. Disarm the probe we just hit, and ignore it. */
  388. p = get_kprobe(addr);
  389. if (p) {
  390. disarm_kprobe(p, regs);
  391. ret = 1;
  392. } else {
  393. p = cur_kprobe;
  394. if (p->break_handler && p->break_handler(p, regs))
  395. goto ss_probe;
  396. }
  397. /* If it's not ours, can't be delete race, (we hold lock). */
  398. goto no_kprobe;
  399. }
  400. p = get_kprobe(addr);
  401. if (!p) {
  402. if (*addr != BREAKPOINT_INSTRUCTION) {
  403. /* The breakpoint instruction was removed right after
  404. * we hit it. Another cpu has removed either a
  405. * probepoint or a debugger breakpoint at this address.
  406. * In either case, no further handling of this
  407. * interrupt is appropriate.
  408. */
  409. ret = 1;
  410. }
  411. /* Not one of ours: let kernel handle it */
  412. goto no_kprobe;
  413. }
  414. kprobe_status = KPROBE_HIT_ACTIVE;
  415. cur_kprobe = p;
  416. if (p->pre_handler(p, regs)) {
  417. /* handler has already set things up, so skip ss setup */
  418. return 1;
  419. }
  420. ss_probe:
  421. prepare_singlestep(p, regs);
  422. kprobe_status = KPROBE_HIT_SS;
  423. return 1;
  424. no_kprobe:
  425. preempt_enable_no_resched();
  426. return ret;
  427. }
  428. /*
  429. * Called after single-stepping. p->addr is the address of the
  430. * instruction whose first byte has been replaced by the "breakpoint"
  431. * instruction. To avoid the SMP problems that can occur when we
  432. * temporarily put back the original opcode to single-step, we
  433. * single-stepped a copy of the instruction. The address of this
  434. * copy is p->ainsn.insn.
  435. */
  436. static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
  437. {
  438. /* we may need to fixup regs/stack after singlestepping a call insn */
  439. if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
  440. regs->pc = cur_kprobe_orig_pc;
  441. switch (p->ainsn.insn[0]) {
  442. case 0xcd: /* CALL (d16,PC) */
  443. *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
  444. break;
  445. case 0xdd: /* CALL (d32,PC) */
  446. /* fixup mdr and return address on stack */
  447. *(unsigned *) regs->sp = regs->mdr = regs->pc + 7;
  448. break;
  449. case 0xf0:
  450. if (p->ainsn.insn[1] >= 0xf0 &&
  451. p->ainsn.insn[1] <= 0xf3) {
  452. /* CALLS (An) */
  453. /* fixup MDR and return address on stack */
  454. regs->mdr = regs->pc + 2;
  455. *(unsigned *) regs->sp = regs->mdr;
  456. }
  457. break;
  458. case 0xfa: /* CALLS (d16,PC) */
  459. /* fixup MDR and return address on stack */
  460. *(unsigned *) regs->sp = regs->mdr = regs->pc + 4;
  461. break;
  462. case 0xfc: /* CALLS (d32,PC) */
  463. /* fixup MDR and return address on stack */
  464. *(unsigned *) regs->sp = regs->mdr = regs->pc + 6;
  465. break;
  466. }
  467. }
  468. regs->pc = cur_kprobe_next_pc;
  469. cur_kprobe_bp_addr = 0;
  470. }
  471. static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
  472. {
  473. if (!kprobe_running())
  474. return 0;
  475. if (cur_kprobe->post_handler)
  476. cur_kprobe->post_handler(cur_kprobe, regs, 0);
  477. resume_execution(cur_kprobe, regs);
  478. reset_current_kprobe();
  479. preempt_enable_no_resched();
  480. return 1;
  481. }
  482. /* Interrupts disabled, kprobe_lock held. */
  483. static inline
  484. int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  485. {
  486. if (cur_kprobe->fault_handler &&
  487. cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
  488. return 1;
  489. if (kprobe_status & KPROBE_HIT_SS) {
  490. resume_execution(cur_kprobe, regs);
  491. reset_current_kprobe();
  492. preempt_enable_no_resched();
  493. }
  494. return 0;
  495. }
  496. /*
  497. * Wrapper routine to for handling exceptions.
  498. */
  499. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  500. unsigned long val, void *data)
  501. {
  502. struct die_args *args = data;
  503. switch (val) {
  504. case DIE_BREAKPOINT:
  505. if (cur_kprobe_bp_addr != args->regs->pc) {
  506. if (kprobe_handler(args->regs))
  507. return NOTIFY_STOP;
  508. } else {
  509. if (post_kprobe_handler(args->regs))
  510. return NOTIFY_STOP;
  511. }
  512. break;
  513. case DIE_GPF:
  514. if (kprobe_running() &&
  515. kprobe_fault_handler(args->regs, args->trapnr))
  516. return NOTIFY_STOP;
  517. break;
  518. default:
  519. break;
  520. }
  521. return NOTIFY_DONE;
  522. }
  523. /* Jprobes support. */
  524. static struct pt_regs jprobe_saved_regs;
  525. static struct pt_regs *jprobe_saved_regs_location;
  526. static kprobe_opcode_t jprobe_saved_stack[MAX_STACK_SIZE];
  527. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  528. {
  529. struct jprobe *jp = container_of(p, struct jprobe, kp);
  530. jprobe_saved_regs_location = regs;
  531. memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
  532. /* Save a whole stack frame, this gets arguments
  533. * pushed onto the stack after using up all the
  534. * arg registers.
  535. */
  536. memcpy(&jprobe_saved_stack, regs + 1, sizeof(jprobe_saved_stack));
  537. /* setup return addr to the jprobe handler routine */
  538. regs->pc = (unsigned long) jp->entry;
  539. return 1;
  540. }
  541. void __kprobes jprobe_return(void)
  542. {
  543. void *orig_sp = jprobe_saved_regs_location + 1;
  544. preempt_enable_no_resched();
  545. asm volatile(" mov %0,sp\n"
  546. ".globl jprobe_return_bp_addr\n"
  547. "jprobe_return_bp_addr:\n\t"
  548. " .byte 0xff\n"
  549. : : "d" (orig_sp));
  550. }
  551. extern void jprobe_return_bp_addr(void);
  552. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  553. {
  554. u8 *addr = (u8 *) regs->pc;
  555. if (addr == (u8 *) jprobe_return_bp_addr) {
  556. if (jprobe_saved_regs_location != regs) {
  557. printk(KERN_ERR"JPROBE:"
  558. " Current regs (%p) does not match saved regs"
  559. " (%p).\n",
  560. regs, jprobe_saved_regs_location);
  561. BUG();
  562. }
  563. /* Restore old register state.
  564. */
  565. memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
  566. memcpy(regs + 1, &jprobe_saved_stack,
  567. sizeof(jprobe_saved_stack));
  568. return 1;
  569. }
  570. return 0;
  571. }
  572. int __init arch_init_kprobes(void)
  573. {
  574. return 0;
  575. }