ftrace.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes to Ingo Molnar, for suggesting the idea.
  7. * Mathieu Desnoyers, for suggesting postponing the modifications.
  8. * Arjan van de Ven, for keeping me straight, and explaining to me
  9. * the dangers of modifying code on the run.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/spinlock.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/percpu.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/init.h>
  20. #include <linux/list.h>
  21. #include <linux/module.h>
  22. #include <trace/syscall.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/kprobes.h>
  25. #include <asm/ftrace.h>
  26. #include <asm/nops.h>
  27. #ifdef CONFIG_DYNAMIC_FTRACE
  28. int ftrace_arch_code_modify_prepare(void)
  29. {
  30. set_kernel_text_rw();
  31. set_all_modules_text_rw();
  32. return 0;
  33. }
  34. int ftrace_arch_code_modify_post_process(void)
  35. {
  36. set_all_modules_text_ro();
  37. set_kernel_text_ro();
  38. return 0;
  39. }
  40. union ftrace_code_union {
  41. char code[MCOUNT_INSN_SIZE];
  42. struct {
  43. unsigned char e8;
  44. int offset;
  45. } __attribute__((packed));
  46. };
  47. static int ftrace_calc_offset(long ip, long addr)
  48. {
  49. return (int)(addr - ip);
  50. }
  51. static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  52. {
  53. static union ftrace_code_union calc;
  54. calc.e8 = 0xe8;
  55. calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  56. /*
  57. * No locking needed, this must be called via kstop_machine
  58. * which in essence is like running on a uniprocessor machine.
  59. */
  60. return calc.code;
  61. }
  62. static inline int
  63. within(unsigned long addr, unsigned long start, unsigned long end)
  64. {
  65. return addr >= start && addr < end;
  66. }
  67. static unsigned long text_ip_addr(unsigned long ip)
  68. {
  69. /*
  70. * On x86_64, kernel text mappings are mapped read-only, so we use
  71. * the kernel identity mapping instead of the kernel text mapping
  72. * to modify the kernel text.
  73. *
  74. * For 32bit kernels, these mappings are same and we can use
  75. * kernel identity mapping to modify code.
  76. */
  77. if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  78. ip = (unsigned long)__va(__pa_symbol(ip));
  79. return ip;
  80. }
  81. static const unsigned char *ftrace_nop_replace(void)
  82. {
  83. return ideal_nops[NOP_ATOMIC5];
  84. }
  85. static int
  86. ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
  87. unsigned const char *new_code)
  88. {
  89. unsigned char replaced[MCOUNT_INSN_SIZE];
  90. ftrace_expected = old_code;
  91. /*
  92. * Note:
  93. * We are paranoid about modifying text, as if a bug was to happen, it
  94. * could cause us to read or write to someplace that could cause harm.
  95. * Carefully read and modify the code with probe_kernel_*(), and make
  96. * sure what we read is what we expected it to be before modifying it.
  97. */
  98. /* read the text we want to modify */
  99. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  100. return -EFAULT;
  101. /* Make sure it is what we expect it to be */
  102. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  103. return -EINVAL;
  104. ip = text_ip_addr(ip);
  105. /* replace the text with the new text */
  106. if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
  107. return -EPERM;
  108. sync_core();
  109. return 0;
  110. }
  111. int ftrace_make_nop(struct module *mod,
  112. struct dyn_ftrace *rec, unsigned long addr)
  113. {
  114. unsigned const char *new, *old;
  115. unsigned long ip = rec->ip;
  116. old = ftrace_call_replace(ip, addr);
  117. new = ftrace_nop_replace();
  118. /*
  119. * On boot up, and when modules are loaded, the MCOUNT_ADDR
  120. * is converted to a nop, and will never become MCOUNT_ADDR
  121. * again. This code is either running before SMP (on boot up)
  122. * or before the code will ever be executed (module load).
  123. * We do not want to use the breakpoint version in this case,
  124. * just modify the code directly.
  125. */
  126. if (addr == MCOUNT_ADDR)
  127. return ftrace_modify_code_direct(rec->ip, old, new);
  128. ftrace_expected = NULL;
  129. /* Normal cases use add_brk_on_nop */
  130. WARN_ONCE(1, "invalid use of ftrace_make_nop");
  131. return -EINVAL;
  132. }
  133. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  134. {
  135. unsigned const char *new, *old;
  136. unsigned long ip = rec->ip;
  137. old = ftrace_nop_replace();
  138. new = ftrace_call_replace(ip, addr);
  139. /* Should only be called when module is loaded */
  140. return ftrace_modify_code_direct(rec->ip, old, new);
  141. }
  142. /*
  143. * The modifying_ftrace_code is used to tell the breakpoint
  144. * handler to call ftrace_int3_handler(). If it fails to
  145. * call this handler for a breakpoint added by ftrace, then
  146. * the kernel may crash.
  147. *
  148. * As atomic_writes on x86 do not need a barrier, we do not
  149. * need to add smp_mb()s for this to work. It is also considered
  150. * that we can not read the modifying_ftrace_code before
  151. * executing the breakpoint. That would be quite remarkable if
  152. * it could do that. Here's the flow that is required:
  153. *
  154. * CPU-0 CPU-1
  155. *
  156. * atomic_inc(mfc);
  157. * write int3s
  158. * <trap-int3> // implicit (r)mb
  159. * if (atomic_read(mfc))
  160. * call ftrace_int3_handler()
  161. *
  162. * Then when we are finished:
  163. *
  164. * atomic_dec(mfc);
  165. *
  166. * If we hit a breakpoint that was not set by ftrace, it does not
  167. * matter if ftrace_int3_handler() is called or not. It will
  168. * simply be ignored. But it is crucial that a ftrace nop/caller
  169. * breakpoint is handled. No other user should ever place a
  170. * breakpoint on an ftrace nop/caller location. It must only
  171. * be done by this code.
  172. */
  173. atomic_t modifying_ftrace_code __read_mostly;
  174. static int
  175. ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  176. unsigned const char *new_code);
  177. /*
  178. * Should never be called:
  179. * As it is only called by __ftrace_replace_code() which is called by
  180. * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
  181. * which is called to turn mcount into nops or nops into function calls
  182. * but not to convert a function from not using regs to one that uses
  183. * regs, which ftrace_modify_call() is for.
  184. */
  185. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  186. unsigned long addr)
  187. {
  188. WARN_ON(1);
  189. ftrace_expected = NULL;
  190. return -EINVAL;
  191. }
  192. static unsigned long ftrace_update_func;
  193. static int update_ftrace_func(unsigned long ip, void *new)
  194. {
  195. unsigned char old[MCOUNT_INSN_SIZE];
  196. int ret;
  197. memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
  198. ftrace_update_func = ip;
  199. /* Make sure the breakpoints see the ftrace_update_func update */
  200. smp_wmb();
  201. /* See comment above by declaration of modifying_ftrace_code */
  202. atomic_inc(&modifying_ftrace_code);
  203. ret = ftrace_modify_code(ip, old, new);
  204. atomic_dec(&modifying_ftrace_code);
  205. return ret;
  206. }
  207. int ftrace_update_ftrace_func(ftrace_func_t func)
  208. {
  209. unsigned long ip = (unsigned long)(&ftrace_call);
  210. unsigned char *new;
  211. int ret;
  212. new = ftrace_call_replace(ip, (unsigned long)func);
  213. ret = update_ftrace_func(ip, new);
  214. /* Also update the regs callback function */
  215. if (!ret) {
  216. ip = (unsigned long)(&ftrace_regs_call);
  217. new = ftrace_call_replace(ip, (unsigned long)func);
  218. ret = update_ftrace_func(ip, new);
  219. }
  220. return ret;
  221. }
  222. static int is_ftrace_caller(unsigned long ip)
  223. {
  224. if (ip == ftrace_update_func)
  225. return 1;
  226. return 0;
  227. }
  228. /*
  229. * A breakpoint was added to the code address we are about to
  230. * modify, and this is the handle that will just skip over it.
  231. * We are either changing a nop into a trace call, or a trace
  232. * call to a nop. While the change is taking place, we treat
  233. * it just like it was a nop.
  234. */
  235. int ftrace_int3_handler(struct pt_regs *regs)
  236. {
  237. unsigned long ip;
  238. if (WARN_ON_ONCE(!regs))
  239. return 0;
  240. ip = regs->ip - 1;
  241. if (!ftrace_location(ip) && !is_ftrace_caller(ip))
  242. return 0;
  243. regs->ip += MCOUNT_INSN_SIZE - 1;
  244. return 1;
  245. }
  246. static int ftrace_write(unsigned long ip, const char *val, int size)
  247. {
  248. ip = text_ip_addr(ip);
  249. if (probe_kernel_write((void *)ip, val, size))
  250. return -EPERM;
  251. return 0;
  252. }
  253. static int add_break(unsigned long ip, const char *old)
  254. {
  255. unsigned char replaced[MCOUNT_INSN_SIZE];
  256. unsigned char brk = BREAKPOINT_INSTRUCTION;
  257. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  258. return -EFAULT;
  259. ftrace_expected = old;
  260. /* Make sure it is what we expect it to be */
  261. if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
  262. return -EINVAL;
  263. return ftrace_write(ip, &brk, 1);
  264. }
  265. static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
  266. {
  267. unsigned const char *old;
  268. unsigned long ip = rec->ip;
  269. old = ftrace_call_replace(ip, addr);
  270. return add_break(rec->ip, old);
  271. }
  272. static int add_brk_on_nop(struct dyn_ftrace *rec)
  273. {
  274. unsigned const char *old;
  275. old = ftrace_nop_replace();
  276. return add_break(rec->ip, old);
  277. }
  278. static int add_breakpoints(struct dyn_ftrace *rec, int enable)
  279. {
  280. unsigned long ftrace_addr;
  281. int ret;
  282. ftrace_addr = ftrace_get_addr_curr(rec);
  283. ret = ftrace_test_record(rec, enable);
  284. switch (ret) {
  285. case FTRACE_UPDATE_IGNORE:
  286. return 0;
  287. case FTRACE_UPDATE_MAKE_CALL:
  288. /* converting nop to call */
  289. return add_brk_on_nop(rec);
  290. case FTRACE_UPDATE_MODIFY_CALL:
  291. case FTRACE_UPDATE_MAKE_NOP:
  292. /* converting a call to a nop */
  293. return add_brk_on_call(rec, ftrace_addr);
  294. }
  295. return 0;
  296. }
  297. /*
  298. * On error, we need to remove breakpoints. This needs to
  299. * be done caefully. If the address does not currently have a
  300. * breakpoint, we know we are done. Otherwise, we look at the
  301. * remaining 4 bytes of the instruction. If it matches a nop
  302. * we replace the breakpoint with the nop. Otherwise we replace
  303. * it with the call instruction.
  304. */
  305. static int remove_breakpoint(struct dyn_ftrace *rec)
  306. {
  307. unsigned char ins[MCOUNT_INSN_SIZE];
  308. unsigned char brk = BREAKPOINT_INSTRUCTION;
  309. const unsigned char *nop;
  310. unsigned long ftrace_addr;
  311. unsigned long ip = rec->ip;
  312. /* If we fail the read, just give up */
  313. if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
  314. return -EFAULT;
  315. /* If this does not have a breakpoint, we are done */
  316. if (ins[0] != brk)
  317. return 0;
  318. nop = ftrace_nop_replace();
  319. /*
  320. * If the last 4 bytes of the instruction do not match
  321. * a nop, then we assume that this is a call to ftrace_addr.
  322. */
  323. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
  324. /*
  325. * For extra paranoidism, we check if the breakpoint is on
  326. * a call that would actually jump to the ftrace_addr.
  327. * If not, don't touch the breakpoint, we make just create
  328. * a disaster.
  329. */
  330. ftrace_addr = ftrace_get_addr_new(rec);
  331. nop = ftrace_call_replace(ip, ftrace_addr);
  332. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
  333. goto update;
  334. /* Check both ftrace_addr and ftrace_old_addr */
  335. ftrace_addr = ftrace_get_addr_curr(rec);
  336. nop = ftrace_call_replace(ip, ftrace_addr);
  337. ftrace_expected = nop;
  338. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
  339. return -EINVAL;
  340. }
  341. update:
  342. return ftrace_write(ip, nop, 1);
  343. }
  344. static int add_update_code(unsigned long ip, unsigned const char *new)
  345. {
  346. /* skip breakpoint */
  347. ip++;
  348. new++;
  349. return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
  350. }
  351. static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
  352. {
  353. unsigned long ip = rec->ip;
  354. unsigned const char *new;
  355. new = ftrace_call_replace(ip, addr);
  356. return add_update_code(ip, new);
  357. }
  358. static int add_update_nop(struct dyn_ftrace *rec)
  359. {
  360. unsigned long ip = rec->ip;
  361. unsigned const char *new;
  362. new = ftrace_nop_replace();
  363. return add_update_code(ip, new);
  364. }
  365. static int add_update(struct dyn_ftrace *rec, int enable)
  366. {
  367. unsigned long ftrace_addr;
  368. int ret;
  369. ret = ftrace_test_record(rec, enable);
  370. ftrace_addr = ftrace_get_addr_new(rec);
  371. switch (ret) {
  372. case FTRACE_UPDATE_IGNORE:
  373. return 0;
  374. case FTRACE_UPDATE_MODIFY_CALL:
  375. case FTRACE_UPDATE_MAKE_CALL:
  376. /* converting nop to call */
  377. return add_update_call(rec, ftrace_addr);
  378. case FTRACE_UPDATE_MAKE_NOP:
  379. /* converting a call to a nop */
  380. return add_update_nop(rec);
  381. }
  382. return 0;
  383. }
  384. static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
  385. {
  386. unsigned long ip = rec->ip;
  387. unsigned const char *new;
  388. new = ftrace_call_replace(ip, addr);
  389. return ftrace_write(ip, new, 1);
  390. }
  391. static int finish_update_nop(struct dyn_ftrace *rec)
  392. {
  393. unsigned long ip = rec->ip;
  394. unsigned const char *new;
  395. new = ftrace_nop_replace();
  396. return ftrace_write(ip, new, 1);
  397. }
  398. static int finish_update(struct dyn_ftrace *rec, int enable)
  399. {
  400. unsigned long ftrace_addr;
  401. int ret;
  402. ret = ftrace_update_record(rec, enable);
  403. ftrace_addr = ftrace_get_addr_new(rec);
  404. switch (ret) {
  405. case FTRACE_UPDATE_IGNORE:
  406. return 0;
  407. case FTRACE_UPDATE_MODIFY_CALL:
  408. case FTRACE_UPDATE_MAKE_CALL:
  409. /* converting nop to call */
  410. return finish_update_call(rec, ftrace_addr);
  411. case FTRACE_UPDATE_MAKE_NOP:
  412. /* converting a call to a nop */
  413. return finish_update_nop(rec);
  414. }
  415. return 0;
  416. }
  417. static void do_sync_core(void *data)
  418. {
  419. sync_core();
  420. }
  421. static void run_sync(void)
  422. {
  423. int enable_irqs = irqs_disabled();
  424. /* We may be called with interrupts disbled (on bootup). */
  425. if (enable_irqs)
  426. local_irq_enable();
  427. on_each_cpu(do_sync_core, NULL, 1);
  428. if (enable_irqs)
  429. local_irq_disable();
  430. }
  431. void ftrace_replace_code(int enable)
  432. {
  433. struct ftrace_rec_iter *iter;
  434. struct dyn_ftrace *rec;
  435. const char *report = "adding breakpoints";
  436. int count = 0;
  437. int ret;
  438. for_ftrace_rec_iter(iter) {
  439. rec = ftrace_rec_iter_record(iter);
  440. ret = add_breakpoints(rec, enable);
  441. if (ret)
  442. goto remove_breakpoints;
  443. count++;
  444. }
  445. run_sync();
  446. report = "updating code";
  447. count = 0;
  448. for_ftrace_rec_iter(iter) {
  449. rec = ftrace_rec_iter_record(iter);
  450. ret = add_update(rec, enable);
  451. if (ret)
  452. goto remove_breakpoints;
  453. count++;
  454. }
  455. run_sync();
  456. report = "removing breakpoints";
  457. count = 0;
  458. for_ftrace_rec_iter(iter) {
  459. rec = ftrace_rec_iter_record(iter);
  460. ret = finish_update(rec, enable);
  461. if (ret)
  462. goto remove_breakpoints;
  463. count++;
  464. }
  465. run_sync();
  466. return;
  467. remove_breakpoints:
  468. pr_warn("Failed on %s (%d):\n", report, count);
  469. ftrace_bug(ret, rec);
  470. for_ftrace_rec_iter(iter) {
  471. rec = ftrace_rec_iter_record(iter);
  472. /*
  473. * Breakpoints are handled only when this function is in
  474. * progress. The system could not work with them.
  475. */
  476. if (remove_breakpoint(rec))
  477. BUG();
  478. }
  479. run_sync();
  480. }
  481. static int
  482. ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  483. unsigned const char *new_code)
  484. {
  485. int ret;
  486. ret = add_break(ip, old_code);
  487. if (ret)
  488. goto out;
  489. run_sync();
  490. ret = add_update_code(ip, new_code);
  491. if (ret)
  492. goto fail_update;
  493. run_sync();
  494. ret = ftrace_write(ip, new_code, 1);
  495. /*
  496. * The breakpoint is handled only when this function is in progress.
  497. * The system could not work if we could not remove it.
  498. */
  499. BUG_ON(ret);
  500. out:
  501. run_sync();
  502. return ret;
  503. fail_update:
  504. /* Also here the system could not work with the breakpoint */
  505. if (ftrace_write(ip, old_code, 1))
  506. BUG();
  507. goto out;
  508. }
  509. void arch_ftrace_update_code(int command)
  510. {
  511. /* See comment above by declaration of modifying_ftrace_code */
  512. atomic_inc(&modifying_ftrace_code);
  513. ftrace_modify_all_code(command);
  514. atomic_dec(&modifying_ftrace_code);
  515. }
  516. int __init ftrace_dyn_arch_init(void)
  517. {
  518. return 0;
  519. }
  520. #if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
  521. static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
  522. {
  523. static union ftrace_code_union calc;
  524. /* Jmp not a call (ignore the .e8) */
  525. calc.e8 = 0xe9;
  526. calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  527. /*
  528. * ftrace external locks synchronize the access to the static variable.
  529. */
  530. return calc.code;
  531. }
  532. #endif
  533. /* Currently only x86_64 supports dynamic trampolines */
  534. #ifdef CONFIG_X86_64
  535. #ifdef CONFIG_MODULES
  536. #include <linux/moduleloader.h>
  537. /* Module allocation simplifies allocating memory for code */
  538. static inline void *alloc_tramp(unsigned long size)
  539. {
  540. return module_alloc(size);
  541. }
  542. static inline void tramp_free(void *tramp)
  543. {
  544. module_memfree(tramp);
  545. }
  546. #else
  547. /* Trampolines can only be created if modules are supported */
  548. static inline void *alloc_tramp(unsigned long size)
  549. {
  550. return NULL;
  551. }
  552. static inline void tramp_free(void *tramp) { }
  553. #endif
  554. /* Defined as markers to the end of the ftrace default trampolines */
  555. extern void ftrace_regs_caller_end(void);
  556. extern void ftrace_epilogue(void);
  557. extern void ftrace_caller_op_ptr(void);
  558. extern void ftrace_regs_caller_op_ptr(void);
  559. /* movq function_trace_op(%rip), %rdx */
  560. /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
  561. #define OP_REF_SIZE 7
  562. /*
  563. * The ftrace_ops is passed to the function callback. Since the
  564. * trampoline only services a single ftrace_ops, we can pass in
  565. * that ops directly.
  566. *
  567. * The ftrace_op_code_union is used to create a pointer to the
  568. * ftrace_ops that will be passed to the callback function.
  569. */
  570. union ftrace_op_code_union {
  571. char code[OP_REF_SIZE];
  572. struct {
  573. char op[3];
  574. int offset;
  575. } __attribute__((packed));
  576. };
  577. static unsigned long
  578. create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
  579. {
  580. unsigned const char *jmp;
  581. unsigned long start_offset;
  582. unsigned long end_offset;
  583. unsigned long op_offset;
  584. unsigned long offset;
  585. unsigned long size;
  586. unsigned long ip;
  587. unsigned long *ptr;
  588. void *trampoline;
  589. /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
  590. unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
  591. union ftrace_op_code_union op_ptr;
  592. int ret;
  593. if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
  594. start_offset = (unsigned long)ftrace_regs_caller;
  595. end_offset = (unsigned long)ftrace_regs_caller_end;
  596. op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
  597. } else {
  598. start_offset = (unsigned long)ftrace_caller;
  599. end_offset = (unsigned long)ftrace_epilogue;
  600. op_offset = (unsigned long)ftrace_caller_op_ptr;
  601. }
  602. size = end_offset - start_offset;
  603. /*
  604. * Allocate enough size to store the ftrace_caller code,
  605. * the jmp to ftrace_epilogue, as well as the address of
  606. * the ftrace_ops this trampoline is used for.
  607. */
  608. trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
  609. if (!trampoline)
  610. return 0;
  611. *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
  612. /* Copy ftrace_caller onto the trampoline memory */
  613. ret = probe_kernel_read(trampoline, (void *)start_offset, size);
  614. if (WARN_ON(ret < 0)) {
  615. tramp_free(trampoline);
  616. return 0;
  617. }
  618. ip = (unsigned long)trampoline + size;
  619. /* The trampoline ends with a jmp to ftrace_epilogue */
  620. jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
  621. memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
  622. /*
  623. * The address of the ftrace_ops that is used for this trampoline
  624. * is stored at the end of the trampoline. This will be used to
  625. * load the third parameter for the callback. Basically, that
  626. * location at the end of the trampoline takes the place of
  627. * the global function_trace_op variable.
  628. */
  629. ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
  630. *ptr = (unsigned long)ops;
  631. op_offset -= start_offset;
  632. memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
  633. /* Are we pointing to the reference? */
  634. if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
  635. tramp_free(trampoline);
  636. return 0;
  637. }
  638. /* Load the contents of ptr into the callback parameter */
  639. offset = (unsigned long)ptr;
  640. offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
  641. op_ptr.offset = offset;
  642. /* put in the new offset to the ftrace_ops */
  643. memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
  644. /* ALLOC_TRAMP flags lets us know we created it */
  645. ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
  646. return (unsigned long)trampoline;
  647. }
  648. static unsigned long calc_trampoline_call_offset(bool save_regs)
  649. {
  650. unsigned long start_offset;
  651. unsigned long call_offset;
  652. if (save_regs) {
  653. start_offset = (unsigned long)ftrace_regs_caller;
  654. call_offset = (unsigned long)ftrace_regs_call;
  655. } else {
  656. start_offset = (unsigned long)ftrace_caller;
  657. call_offset = (unsigned long)ftrace_call;
  658. }
  659. return call_offset - start_offset;
  660. }
  661. void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
  662. {
  663. ftrace_func_t func;
  664. unsigned char *new;
  665. unsigned long offset;
  666. unsigned long ip;
  667. unsigned int size;
  668. int ret;
  669. if (ops->trampoline) {
  670. /*
  671. * The ftrace_ops caller may set up its own trampoline.
  672. * In such a case, this code must not modify it.
  673. */
  674. if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
  675. return;
  676. } else {
  677. ops->trampoline = create_trampoline(ops, &size);
  678. if (!ops->trampoline)
  679. return;
  680. ops->trampoline_size = size;
  681. }
  682. offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
  683. ip = ops->trampoline + offset;
  684. func = ftrace_ops_get_func(ops);
  685. /* Do a safe modify in case the trampoline is executing */
  686. new = ftrace_call_replace(ip, (unsigned long)func);
  687. ret = update_ftrace_func(ip, new);
  688. /* The update should never fail */
  689. WARN_ON(ret);
  690. }
  691. /* Return the address of the function the trampoline calls */
  692. static void *addr_from_call(void *ptr)
  693. {
  694. union ftrace_code_union calc;
  695. int ret;
  696. ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
  697. if (WARN_ON_ONCE(ret < 0))
  698. return NULL;
  699. /* Make sure this is a call */
  700. if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
  701. pr_warn("Expected e8, got %x\n", calc.e8);
  702. return NULL;
  703. }
  704. return ptr + MCOUNT_INSN_SIZE + calc.offset;
  705. }
  706. void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
  707. unsigned long frame_pointer);
  708. /*
  709. * If the ops->trampoline was not allocated, then it probably
  710. * has a static trampoline func, or is the ftrace caller itself.
  711. */
  712. static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
  713. {
  714. unsigned long offset;
  715. bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
  716. void *ptr;
  717. if (ops && ops->trampoline) {
  718. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  719. /*
  720. * We only know about function graph tracer setting as static
  721. * trampoline.
  722. */
  723. if (ops->trampoline == FTRACE_GRAPH_ADDR)
  724. return (void *)prepare_ftrace_return;
  725. #endif
  726. return NULL;
  727. }
  728. offset = calc_trampoline_call_offset(save_regs);
  729. if (save_regs)
  730. ptr = (void *)FTRACE_REGS_ADDR + offset;
  731. else
  732. ptr = (void *)FTRACE_ADDR + offset;
  733. return addr_from_call(ptr);
  734. }
  735. void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
  736. {
  737. unsigned long offset;
  738. /* If we didn't allocate this trampoline, consider it static */
  739. if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
  740. return static_tramp_func(ops, rec);
  741. offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
  742. return addr_from_call((void *)ops->trampoline + offset);
  743. }
  744. void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
  745. {
  746. if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
  747. return;
  748. tramp_free((void *)ops->trampoline);
  749. ops->trampoline = 0;
  750. }
  751. #endif /* CONFIG_X86_64 */
  752. #endif /* CONFIG_DYNAMIC_FTRACE */
  753. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  754. #ifdef CONFIG_DYNAMIC_FTRACE
  755. extern void ftrace_graph_call(void);
  756. static int ftrace_mod_jmp(unsigned long ip, void *func)
  757. {
  758. unsigned char *new;
  759. new = ftrace_jmp_replace(ip, (unsigned long)func);
  760. return update_ftrace_func(ip, new);
  761. }
  762. int ftrace_enable_ftrace_graph_caller(void)
  763. {
  764. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  765. return ftrace_mod_jmp(ip, &ftrace_graph_caller);
  766. }
  767. int ftrace_disable_ftrace_graph_caller(void)
  768. {
  769. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  770. return ftrace_mod_jmp(ip, &ftrace_stub);
  771. }
  772. #endif /* !CONFIG_DYNAMIC_FTRACE */
  773. /*
  774. * Hook the return address and push it in the stack of return addrs
  775. * in current thread info.
  776. */
  777. void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
  778. unsigned long frame_pointer)
  779. {
  780. unsigned long old;
  781. int faulted;
  782. struct ftrace_graph_ent trace;
  783. unsigned long return_hooker = (unsigned long)
  784. &return_to_handler;
  785. /*
  786. * When resuming from suspend-to-ram, this function can be indirectly
  787. * called from early CPU startup code while the CPU is in real mode,
  788. * which would fail miserably. Make sure the stack pointer is a
  789. * virtual address.
  790. *
  791. * This check isn't as accurate as virt_addr_valid(), but it should be
  792. * good enough for this purpose, and it's fast.
  793. */
  794. if (unlikely((long)__builtin_frame_address(0) >= 0))
  795. return;
  796. if (unlikely(ftrace_graph_is_dead()))
  797. return;
  798. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  799. return;
  800. /*
  801. * Protect against fault, even if it shouldn't
  802. * happen. This tool is too much intrusive to
  803. * ignore such a protection.
  804. */
  805. asm volatile(
  806. "1: " _ASM_MOV " (%[parent]), %[old]\n"
  807. "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
  808. " movl $0, %[faulted]\n"
  809. "3:\n"
  810. ".section .fixup, \"ax\"\n"
  811. "4: movl $1, %[faulted]\n"
  812. " jmp 3b\n"
  813. ".previous\n"
  814. _ASM_EXTABLE(1b, 4b)
  815. _ASM_EXTABLE(2b, 4b)
  816. : [old] "=&r" (old), [faulted] "=r" (faulted)
  817. : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
  818. : "memory"
  819. );
  820. if (unlikely(faulted)) {
  821. ftrace_graph_stop();
  822. WARN_ON(1);
  823. return;
  824. }
  825. trace.func = self_addr;
  826. trace.depth = current->curr_ret_stack + 1;
  827. /* Only trace if the calling function expects to */
  828. if (!ftrace_graph_entry(&trace)) {
  829. *parent = old;
  830. return;
  831. }
  832. if (ftrace_push_return_trace(old, self_addr, &trace.depth,
  833. frame_pointer, parent) == -EBUSY) {
  834. *parent = old;
  835. return;
  836. }
  837. }
  838. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */