trace_event_perf.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * trace event based perf event profiling/tracing
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
  5. * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/security.h>
  10. #include "trace.h"
  11. static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
  12. /*
  13. * Force it to be aligned to unsigned long to avoid misaligned accesses
  14. * suprises
  15. */
  16. typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  17. perf_trace_t;
  18. /* Count the events in use (per event id, not per instance) */
  19. static int total_ref_count;
  20. static int perf_trace_event_perm(struct trace_event_call *tp_event,
  21. struct perf_event *p_event)
  22. {
  23. int ret;
  24. if (tp_event->perf_perm) {
  25. ret = tp_event->perf_perm(tp_event, p_event);
  26. if (ret)
  27. return ret;
  28. }
  29. /*
  30. * We checked and allowed to create parent,
  31. * allow children without checking.
  32. */
  33. if (p_event->parent)
  34. return 0;
  35. /*
  36. * It's ok to check current process (owner) permissions in here,
  37. * because code below is called only via perf_event_open syscall.
  38. */
  39. /* The ftrace function trace is allowed only for root. */
  40. if (ftrace_event_is_function(tp_event)) {
  41. ret = perf_allow_tracepoint(&p_event->attr);
  42. if (ret)
  43. return ret;
  44. if (!is_sampling_event(p_event))
  45. return 0;
  46. /*
  47. * We don't allow user space callchains for function trace
  48. * event, due to issues with page faults while tracing page
  49. * fault handler and its overall trickiness nature.
  50. */
  51. if (!p_event->attr.exclude_callchain_user)
  52. return -EINVAL;
  53. /*
  54. * Same reason to disable user stack dump as for user space
  55. * callchains above.
  56. */
  57. if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
  58. return -EINVAL;
  59. }
  60. /* No tracing, just counting, so no obvious leak */
  61. if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
  62. return 0;
  63. /* Some events are ok to be traced by non-root users... */
  64. if (p_event->attach_state == PERF_ATTACH_TASK) {
  65. if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
  66. return 0;
  67. }
  68. /*
  69. * ...otherwise raw tracepoint data can be a severe data leak,
  70. * only allow root to have these.
  71. */
  72. ret = perf_allow_tracepoint(&p_event->attr);
  73. if (ret)
  74. return ret;
  75. return 0;
  76. }
  77. static int perf_trace_event_reg(struct trace_event_call *tp_event,
  78. struct perf_event *p_event)
  79. {
  80. struct hlist_head __percpu *list;
  81. int ret = -ENOMEM;
  82. int cpu;
  83. p_event->tp_event = tp_event;
  84. if (tp_event->perf_refcount++ > 0)
  85. return 0;
  86. list = alloc_percpu(struct hlist_head);
  87. if (!list)
  88. goto fail;
  89. for_each_possible_cpu(cpu)
  90. INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
  91. tp_event->perf_events = list;
  92. if (!total_ref_count) {
  93. char __percpu *buf;
  94. int i;
  95. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  96. buf = (char __percpu *)alloc_percpu(perf_trace_t);
  97. if (!buf)
  98. goto fail;
  99. perf_trace_buf[i] = buf;
  100. }
  101. }
  102. ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
  103. if (ret)
  104. goto fail;
  105. total_ref_count++;
  106. return 0;
  107. fail:
  108. if (!total_ref_count) {
  109. int i;
  110. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  111. free_percpu(perf_trace_buf[i]);
  112. perf_trace_buf[i] = NULL;
  113. }
  114. }
  115. if (!--tp_event->perf_refcount) {
  116. free_percpu(tp_event->perf_events);
  117. tp_event->perf_events = NULL;
  118. }
  119. return ret;
  120. }
  121. static void perf_trace_event_unreg(struct perf_event *p_event)
  122. {
  123. struct trace_event_call *tp_event = p_event->tp_event;
  124. int i;
  125. if (--tp_event->perf_refcount > 0)
  126. goto out;
  127. tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
  128. /*
  129. * Ensure our callback won't be called anymore. The buffers
  130. * will be freed after that.
  131. */
  132. tracepoint_synchronize_unregister();
  133. free_percpu(tp_event->perf_events);
  134. tp_event->perf_events = NULL;
  135. if (!--total_ref_count) {
  136. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  137. free_percpu(perf_trace_buf[i]);
  138. perf_trace_buf[i] = NULL;
  139. }
  140. }
  141. out:
  142. module_put(tp_event->mod);
  143. }
  144. static int perf_trace_event_open(struct perf_event *p_event)
  145. {
  146. struct trace_event_call *tp_event = p_event->tp_event;
  147. return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
  148. }
  149. static void perf_trace_event_close(struct perf_event *p_event)
  150. {
  151. struct trace_event_call *tp_event = p_event->tp_event;
  152. tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
  153. }
  154. static int perf_trace_event_init(struct trace_event_call *tp_event,
  155. struct perf_event *p_event)
  156. {
  157. int ret;
  158. ret = perf_trace_event_perm(tp_event, p_event);
  159. if (ret)
  160. return ret;
  161. ret = perf_trace_event_reg(tp_event, p_event);
  162. if (ret)
  163. return ret;
  164. ret = perf_trace_event_open(p_event);
  165. if (ret) {
  166. perf_trace_event_unreg(p_event);
  167. return ret;
  168. }
  169. return 0;
  170. }
  171. int perf_trace_init(struct perf_event *p_event)
  172. {
  173. struct trace_event_call *tp_event;
  174. u64 event_id = p_event->attr.config;
  175. int ret = -EINVAL;
  176. mutex_lock(&event_mutex);
  177. list_for_each_entry(tp_event, &ftrace_events, list) {
  178. if (tp_event->event.type == event_id &&
  179. tp_event->class && tp_event->class->reg &&
  180. try_module_get(tp_event->mod)) {
  181. ret = perf_trace_event_init(tp_event, p_event);
  182. if (ret)
  183. module_put(tp_event->mod);
  184. break;
  185. }
  186. }
  187. mutex_unlock(&event_mutex);
  188. return ret;
  189. }
  190. void perf_trace_destroy(struct perf_event *p_event)
  191. {
  192. mutex_lock(&event_mutex);
  193. perf_trace_event_close(p_event);
  194. perf_trace_event_unreg(p_event);
  195. mutex_unlock(&event_mutex);
  196. }
  197. int perf_trace_add(struct perf_event *p_event, int flags)
  198. {
  199. struct trace_event_call *tp_event = p_event->tp_event;
  200. struct hlist_head __percpu *pcpu_list;
  201. struct hlist_head *list;
  202. pcpu_list = tp_event->perf_events;
  203. if (WARN_ON_ONCE(!pcpu_list))
  204. return -EINVAL;
  205. if (!(flags & PERF_EF_START))
  206. p_event->hw.state = PERF_HES_STOPPED;
  207. list = this_cpu_ptr(pcpu_list);
  208. hlist_add_head_rcu(&p_event->hlist_entry, list);
  209. return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
  210. }
  211. void perf_trace_del(struct perf_event *p_event, int flags)
  212. {
  213. struct trace_event_call *tp_event = p_event->tp_event;
  214. hlist_del_rcu(&p_event->hlist_entry);
  215. tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
  216. }
  217. void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
  218. {
  219. char *raw_data;
  220. int rctx;
  221. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
  222. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  223. "perf buffer not large enough"))
  224. return NULL;
  225. *rctxp = rctx = perf_swevent_get_recursion_context();
  226. if (rctx < 0)
  227. return NULL;
  228. if (regs)
  229. *regs = this_cpu_ptr(&__perf_regs[rctx]);
  230. raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
  231. /* zero the dead bytes from align to not leak stack to user */
  232. memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
  233. return raw_data;
  234. }
  235. EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
  236. NOKPROBE_SYMBOL(perf_trace_buf_alloc);
  237. void perf_trace_buf_update(void *record, u16 type)
  238. {
  239. struct trace_entry *entry = record;
  240. int pc = preempt_count();
  241. unsigned long flags;
  242. local_save_flags(flags);
  243. tracing_generic_entry_update(entry, flags, pc);
  244. entry->type = type;
  245. }
  246. NOKPROBE_SYMBOL(perf_trace_buf_update);
  247. #ifdef CONFIG_FUNCTION_TRACER
  248. static void
  249. perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
  250. struct ftrace_ops *ops, struct pt_regs *pt_regs)
  251. {
  252. struct perf_event *event;
  253. struct ftrace_entry *entry;
  254. struct hlist_head *head;
  255. struct pt_regs regs;
  256. int rctx;
  257. head = this_cpu_ptr(event_function.perf_events);
  258. if (hlist_empty(head))
  259. return;
  260. #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
  261. sizeof(u64)) - sizeof(u32))
  262. BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
  263. memset(&regs, 0, sizeof(regs));
  264. perf_fetch_caller_regs(&regs);
  265. entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
  266. if (!entry)
  267. return;
  268. entry->ip = ip;
  269. entry->parent_ip = parent_ip;
  270. event = container_of(ops, struct perf_event, ftrace_ops);
  271. perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
  272. 1, &regs, head, NULL, event);
  273. #undef ENTRY_SIZE
  274. }
  275. static int perf_ftrace_function_register(struct perf_event *event)
  276. {
  277. struct ftrace_ops *ops = &event->ftrace_ops;
  278. ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
  279. ops->func = perf_ftrace_function_call;
  280. return register_ftrace_function(ops);
  281. }
  282. static int perf_ftrace_function_unregister(struct perf_event *event)
  283. {
  284. struct ftrace_ops *ops = &event->ftrace_ops;
  285. int ret = unregister_ftrace_function(ops);
  286. ftrace_free_filter(ops);
  287. return ret;
  288. }
  289. static void perf_ftrace_function_enable(struct perf_event *event)
  290. {
  291. ftrace_function_local_enable(&event->ftrace_ops);
  292. }
  293. static void perf_ftrace_function_disable(struct perf_event *event)
  294. {
  295. ftrace_function_local_disable(&event->ftrace_ops);
  296. }
  297. int perf_ftrace_event_register(struct trace_event_call *call,
  298. enum trace_reg type, void *data)
  299. {
  300. switch (type) {
  301. case TRACE_REG_REGISTER:
  302. case TRACE_REG_UNREGISTER:
  303. break;
  304. case TRACE_REG_PERF_REGISTER:
  305. case TRACE_REG_PERF_UNREGISTER:
  306. return 0;
  307. case TRACE_REG_PERF_OPEN:
  308. return perf_ftrace_function_register(data);
  309. case TRACE_REG_PERF_CLOSE:
  310. return perf_ftrace_function_unregister(data);
  311. case TRACE_REG_PERF_ADD:
  312. perf_ftrace_function_enable(data);
  313. return 0;
  314. case TRACE_REG_PERF_DEL:
  315. perf_ftrace_function_disable(data);
  316. return 0;
  317. }
  318. return -EINVAL;
  319. }
  320. #endif /* CONFIG_FUNCTION_TRACER */