trace_events_trigger.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. /*
  2. * trace_events_trigger - trace event triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/slab.h>
  24. #include <linux/rculist.h>
  25. #include "trace.h"
  26. static LIST_HEAD(trigger_commands);
  27. static DEFINE_MUTEX(trigger_cmd_mutex);
  28. void trigger_data_free(struct event_trigger_data *data)
  29. {
  30. if (data->cmd_ops->set_filter)
  31. data->cmd_ops->set_filter(NULL, data, NULL);
  32. synchronize_sched(); /* make sure current triggers exit before free */
  33. kfree(data);
  34. }
  35. /**
  36. * event_triggers_call - Call triggers associated with a trace event
  37. * @file: The trace_event_file associated with the event
  38. * @rec: The trace entry for the event, NULL for unconditional invocation
  39. *
  40. * For each trigger associated with an event, invoke the trigger
  41. * function registered with the associated trigger command. If rec is
  42. * non-NULL, it means that the trigger requires further processing and
  43. * shouldn't be unconditionally invoked. If rec is non-NULL and the
  44. * trigger has a filter associated with it, rec will checked against
  45. * the filter and if the record matches the trigger will be invoked.
  46. * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  47. * in any case until the current event is written, the trigger
  48. * function isn't invoked but the bit associated with the deferred
  49. * trigger is set in the return value.
  50. *
  51. * Returns an enum event_trigger_type value containing a set bit for
  52. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  53. *
  54. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  55. *
  56. * Return: an enum event_trigger_type value containing a set bit for
  57. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58. */
  59. enum event_trigger_type
  60. event_triggers_call(struct trace_event_file *file, void *rec)
  61. {
  62. struct event_trigger_data *data;
  63. enum event_trigger_type tt = ETT_NONE;
  64. struct event_filter *filter;
  65. if (list_empty(&file->triggers))
  66. return tt;
  67. list_for_each_entry_rcu(data, &file->triggers, list) {
  68. if (data->paused)
  69. continue;
  70. if (!rec) {
  71. data->ops->func(data, rec);
  72. continue;
  73. }
  74. filter = rcu_dereference_sched(data->filter);
  75. if (filter && !filter_match_preds(filter, rec))
  76. continue;
  77. if (event_command_post_trigger(data->cmd_ops)) {
  78. tt |= data->cmd_ops->trigger_type;
  79. continue;
  80. }
  81. data->ops->func(data, rec);
  82. }
  83. return tt;
  84. }
  85. EXPORT_SYMBOL_GPL(event_triggers_call);
  86. /**
  87. * event_triggers_post_call - Call 'post_triggers' for a trace event
  88. * @file: The trace_event_file associated with the event
  89. * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  90. * @rec: The trace entry for the event
  91. *
  92. * For each trigger associated with an event, invoke the trigger
  93. * function registered with the associated trigger command, if the
  94. * corresponding bit is set in the tt enum passed into this function.
  95. * See @event_triggers_call for details on how those bits are set.
  96. *
  97. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  98. */
  99. void
  100. event_triggers_post_call(struct trace_event_file *file,
  101. enum event_trigger_type tt,
  102. void *rec)
  103. {
  104. struct event_trigger_data *data;
  105. list_for_each_entry_rcu(data, &file->triggers, list) {
  106. if (data->paused)
  107. continue;
  108. if (data->cmd_ops->trigger_type & tt)
  109. data->ops->func(data, rec);
  110. }
  111. }
  112. EXPORT_SYMBOL_GPL(event_triggers_post_call);
  113. #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
  114. static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
  115. {
  116. struct trace_event_file *event_file = event_file_data(m->private);
  117. if (t == SHOW_AVAILABLE_TRIGGERS) {
  118. (*pos)++;
  119. return NULL;
  120. }
  121. return seq_list_next(t, &event_file->triggers, pos);
  122. }
  123. static void *trigger_start(struct seq_file *m, loff_t *pos)
  124. {
  125. struct trace_event_file *event_file;
  126. /* ->stop() is called even if ->start() fails */
  127. mutex_lock(&event_mutex);
  128. event_file = event_file_data(m->private);
  129. if (unlikely(!event_file))
  130. return ERR_PTR(-ENODEV);
  131. if (list_empty(&event_file->triggers))
  132. return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
  133. return seq_list_start(&event_file->triggers, *pos);
  134. }
  135. static void trigger_stop(struct seq_file *m, void *t)
  136. {
  137. mutex_unlock(&event_mutex);
  138. }
  139. static int trigger_show(struct seq_file *m, void *v)
  140. {
  141. struct event_trigger_data *data;
  142. struct event_command *p;
  143. if (v == SHOW_AVAILABLE_TRIGGERS) {
  144. seq_puts(m, "# Available triggers:\n");
  145. seq_putc(m, '#');
  146. mutex_lock(&trigger_cmd_mutex);
  147. list_for_each_entry_reverse(p, &trigger_commands, list)
  148. seq_printf(m, " %s", p->name);
  149. seq_putc(m, '\n');
  150. mutex_unlock(&trigger_cmd_mutex);
  151. return 0;
  152. }
  153. data = list_entry(v, struct event_trigger_data, list);
  154. data->ops->print(m, data->ops, data);
  155. return 0;
  156. }
  157. static const struct seq_operations event_triggers_seq_ops = {
  158. .start = trigger_start,
  159. .next = trigger_next,
  160. .stop = trigger_stop,
  161. .show = trigger_show,
  162. };
  163. static int event_trigger_regex_open(struct inode *inode, struct file *file)
  164. {
  165. int ret = 0;
  166. mutex_lock(&event_mutex);
  167. if (unlikely(!event_file_data(file))) {
  168. mutex_unlock(&event_mutex);
  169. return -ENODEV;
  170. }
  171. if ((file->f_mode & FMODE_WRITE) &&
  172. (file->f_flags & O_TRUNC)) {
  173. struct trace_event_file *event_file;
  174. struct event_command *p;
  175. event_file = event_file_data(file);
  176. list_for_each_entry(p, &trigger_commands, list) {
  177. if (p->unreg_all)
  178. p->unreg_all(event_file);
  179. }
  180. }
  181. if (file->f_mode & FMODE_READ) {
  182. ret = seq_open(file, &event_triggers_seq_ops);
  183. if (!ret) {
  184. struct seq_file *m = file->private_data;
  185. m->private = file;
  186. }
  187. }
  188. mutex_unlock(&event_mutex);
  189. return ret;
  190. }
  191. static int trigger_process_regex(struct trace_event_file *file, char *buff)
  192. {
  193. char *command, *next;
  194. struct event_command *p;
  195. int ret = -EINVAL;
  196. next = buff = skip_spaces(buff);
  197. command = strsep(&next, ": \t");
  198. if (next) {
  199. next = skip_spaces(next);
  200. if (!*next)
  201. next = NULL;
  202. }
  203. command = (command[0] != '!') ? command : command + 1;
  204. mutex_lock(&trigger_cmd_mutex);
  205. list_for_each_entry(p, &trigger_commands, list) {
  206. if (strcmp(p->name, command) == 0) {
  207. ret = p->func(p, file, buff, command, next);
  208. goto out_unlock;
  209. }
  210. }
  211. out_unlock:
  212. mutex_unlock(&trigger_cmd_mutex);
  213. return ret;
  214. }
  215. static ssize_t event_trigger_regex_write(struct file *file,
  216. const char __user *ubuf,
  217. size_t cnt, loff_t *ppos)
  218. {
  219. struct trace_event_file *event_file;
  220. ssize_t ret;
  221. char *buf;
  222. if (!cnt)
  223. return 0;
  224. if (cnt >= PAGE_SIZE)
  225. return -EINVAL;
  226. buf = memdup_user_nul(ubuf, cnt);
  227. if (IS_ERR(buf))
  228. return PTR_ERR(buf);
  229. strim(buf);
  230. mutex_lock(&event_mutex);
  231. event_file = event_file_data(file);
  232. if (unlikely(!event_file)) {
  233. mutex_unlock(&event_mutex);
  234. kfree(buf);
  235. return -ENODEV;
  236. }
  237. ret = trigger_process_regex(event_file, buf);
  238. mutex_unlock(&event_mutex);
  239. kfree(buf);
  240. if (ret < 0)
  241. goto out;
  242. *ppos += cnt;
  243. ret = cnt;
  244. out:
  245. return ret;
  246. }
  247. static int event_trigger_regex_release(struct inode *inode, struct file *file)
  248. {
  249. mutex_lock(&event_mutex);
  250. if (file->f_mode & FMODE_READ)
  251. seq_release(inode, file);
  252. mutex_unlock(&event_mutex);
  253. return 0;
  254. }
  255. static ssize_t
  256. event_trigger_write(struct file *filp, const char __user *ubuf,
  257. size_t cnt, loff_t *ppos)
  258. {
  259. return event_trigger_regex_write(filp, ubuf, cnt, ppos);
  260. }
  261. static int
  262. event_trigger_open(struct inode *inode, struct file *filp)
  263. {
  264. return event_trigger_regex_open(inode, filp);
  265. }
  266. static int
  267. event_trigger_release(struct inode *inode, struct file *file)
  268. {
  269. return event_trigger_regex_release(inode, file);
  270. }
  271. const struct file_operations event_trigger_fops = {
  272. .open = event_trigger_open,
  273. .read = seq_read,
  274. .write = event_trigger_write,
  275. .llseek = tracing_lseek,
  276. .release = event_trigger_release,
  277. };
  278. /*
  279. * Currently we only register event commands from __init, so mark this
  280. * __init too.
  281. */
  282. __init int register_event_command(struct event_command *cmd)
  283. {
  284. struct event_command *p;
  285. int ret = 0;
  286. mutex_lock(&trigger_cmd_mutex);
  287. list_for_each_entry(p, &trigger_commands, list) {
  288. if (strcmp(cmd->name, p->name) == 0) {
  289. ret = -EBUSY;
  290. goto out_unlock;
  291. }
  292. }
  293. list_add(&cmd->list, &trigger_commands);
  294. out_unlock:
  295. mutex_unlock(&trigger_cmd_mutex);
  296. return ret;
  297. }
  298. /*
  299. * Currently we only unregister event commands from __init, so mark
  300. * this __init too.
  301. */
  302. __init int unregister_event_command(struct event_command *cmd)
  303. {
  304. struct event_command *p, *n;
  305. int ret = -ENODEV;
  306. mutex_lock(&trigger_cmd_mutex);
  307. list_for_each_entry_safe(p, n, &trigger_commands, list) {
  308. if (strcmp(cmd->name, p->name) == 0) {
  309. ret = 0;
  310. list_del_init(&p->list);
  311. goto out_unlock;
  312. }
  313. }
  314. out_unlock:
  315. mutex_unlock(&trigger_cmd_mutex);
  316. return ret;
  317. }
  318. /**
  319. * event_trigger_print - Generic event_trigger_ops @print implementation
  320. * @name: The name of the event trigger
  321. * @m: The seq_file being printed to
  322. * @data: Trigger-specific data
  323. * @filter_str: filter_str to print, if present
  324. *
  325. * Common implementation for event triggers to print themselves.
  326. *
  327. * Usually wrapped by a function that simply sets the @name of the
  328. * trigger command and then invokes this.
  329. *
  330. * Return: 0 on success, errno otherwise
  331. */
  332. static int
  333. event_trigger_print(const char *name, struct seq_file *m,
  334. void *data, char *filter_str)
  335. {
  336. long count = (long)data;
  337. seq_puts(m, name);
  338. if (count == -1)
  339. seq_puts(m, ":unlimited");
  340. else
  341. seq_printf(m, ":count=%ld", count);
  342. if (filter_str)
  343. seq_printf(m, " if %s\n", filter_str);
  344. else
  345. seq_putc(m, '\n');
  346. return 0;
  347. }
  348. /**
  349. * event_trigger_init - Generic event_trigger_ops @init implementation
  350. * @ops: The trigger ops associated with the trigger
  351. * @data: Trigger-specific data
  352. *
  353. * Common implementation of event trigger initialization.
  354. *
  355. * Usually used directly as the @init method in event trigger
  356. * implementations.
  357. *
  358. * Return: 0 on success, errno otherwise
  359. */
  360. int event_trigger_init(struct event_trigger_ops *ops,
  361. struct event_trigger_data *data)
  362. {
  363. data->ref++;
  364. return 0;
  365. }
  366. /**
  367. * event_trigger_free - Generic event_trigger_ops @free implementation
  368. * @ops: The trigger ops associated with the trigger
  369. * @data: Trigger-specific data
  370. *
  371. * Common implementation of event trigger de-initialization.
  372. *
  373. * Usually used directly as the @free method in event trigger
  374. * implementations.
  375. */
  376. static void
  377. event_trigger_free(struct event_trigger_ops *ops,
  378. struct event_trigger_data *data)
  379. {
  380. if (WARN_ON_ONCE(data->ref <= 0))
  381. return;
  382. data->ref--;
  383. if (!data->ref)
  384. trigger_data_free(data);
  385. }
  386. int trace_event_trigger_enable_disable(struct trace_event_file *file,
  387. int trigger_enable)
  388. {
  389. int ret = 0;
  390. if (trigger_enable) {
  391. if (atomic_inc_return(&file->tm_ref) > 1)
  392. return ret;
  393. set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  394. ret = trace_event_enable_disable(file, 1, 1);
  395. } else {
  396. if (atomic_dec_return(&file->tm_ref) > 0)
  397. return ret;
  398. clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  399. ret = trace_event_enable_disable(file, 0, 1);
  400. }
  401. return ret;
  402. }
  403. /**
  404. * clear_event_triggers - Clear all triggers associated with a trace array
  405. * @tr: The trace array to clear
  406. *
  407. * For each trigger, the triggering event has its tm_ref decremented
  408. * via trace_event_trigger_enable_disable(), and any associated event
  409. * (in the case of enable/disable_event triggers) will have its sm_ref
  410. * decremented via free()->trace_event_enable_disable(). That
  411. * combination effectively reverses the soft-mode/trigger state added
  412. * by trigger registration.
  413. *
  414. * Must be called with event_mutex held.
  415. */
  416. void
  417. clear_event_triggers(struct trace_array *tr)
  418. {
  419. struct trace_event_file *file;
  420. list_for_each_entry(file, &tr->events, list) {
  421. struct event_trigger_data *data, *n;
  422. list_for_each_entry_safe(data, n, &file->triggers, list) {
  423. trace_event_trigger_enable_disable(file, 0);
  424. list_del_rcu(&data->list);
  425. if (data->ops->free)
  426. data->ops->free(data->ops, data);
  427. }
  428. }
  429. }
  430. /**
  431. * update_cond_flag - Set or reset the TRIGGER_COND bit
  432. * @file: The trace_event_file associated with the event
  433. *
  434. * If an event has triggers and any of those triggers has a filter or
  435. * a post_trigger, trigger invocation needs to be deferred until after
  436. * the current event has logged its data, and the event should have
  437. * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
  438. * cleared.
  439. */
  440. void update_cond_flag(struct trace_event_file *file)
  441. {
  442. struct event_trigger_data *data;
  443. bool set_cond = false;
  444. list_for_each_entry_rcu(data, &file->triggers, list) {
  445. if (data->filter || event_command_post_trigger(data->cmd_ops) ||
  446. event_command_needs_rec(data->cmd_ops)) {
  447. set_cond = true;
  448. break;
  449. }
  450. }
  451. if (set_cond)
  452. set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  453. else
  454. clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  455. }
  456. /**
  457. * register_trigger - Generic event_command @reg implementation
  458. * @glob: The raw string used to register the trigger
  459. * @ops: The trigger ops associated with the trigger
  460. * @data: Trigger-specific data to associate with the trigger
  461. * @file: The trace_event_file associated with the event
  462. *
  463. * Common implementation for event trigger registration.
  464. *
  465. * Usually used directly as the @reg method in event command
  466. * implementations.
  467. *
  468. * Return: 0 on success, errno otherwise
  469. */
  470. static int register_trigger(char *glob, struct event_trigger_ops *ops,
  471. struct event_trigger_data *data,
  472. struct trace_event_file *file)
  473. {
  474. struct event_trigger_data *test;
  475. int ret = 0;
  476. list_for_each_entry_rcu(test, &file->triggers, list) {
  477. if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
  478. ret = -EEXIST;
  479. goto out;
  480. }
  481. }
  482. if (data->ops->init) {
  483. ret = data->ops->init(data->ops, data);
  484. if (ret < 0)
  485. goto out;
  486. }
  487. list_add_rcu(&data->list, &file->triggers);
  488. ret++;
  489. update_cond_flag(file);
  490. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  491. list_del_rcu(&data->list);
  492. update_cond_flag(file);
  493. ret--;
  494. }
  495. out:
  496. return ret;
  497. }
  498. /**
  499. * unregister_trigger - Generic event_command @unreg implementation
  500. * @glob: The raw string used to register the trigger
  501. * @ops: The trigger ops associated with the trigger
  502. * @test: Trigger-specific data used to find the trigger to remove
  503. * @file: The trace_event_file associated with the event
  504. *
  505. * Common implementation for event trigger unregistration.
  506. *
  507. * Usually used directly as the @unreg method in event command
  508. * implementations.
  509. */
  510. void unregister_trigger(char *glob, struct event_trigger_ops *ops,
  511. struct event_trigger_data *test,
  512. struct trace_event_file *file)
  513. {
  514. struct event_trigger_data *data;
  515. bool unregistered = false;
  516. list_for_each_entry_rcu(data, &file->triggers, list) {
  517. if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
  518. unregistered = true;
  519. list_del_rcu(&data->list);
  520. trace_event_trigger_enable_disable(file, 0);
  521. update_cond_flag(file);
  522. break;
  523. }
  524. }
  525. if (unregistered && data->ops->free)
  526. data->ops->free(data->ops, data);
  527. }
  528. /**
  529. * event_trigger_callback - Generic event_command @func implementation
  530. * @cmd_ops: The command ops, used for trigger registration
  531. * @file: The trace_event_file associated with the event
  532. * @glob: The raw string used to register the trigger
  533. * @cmd: The cmd portion of the string used to register the trigger
  534. * @param: The params portion of the string used to register the trigger
  535. *
  536. * Common implementation for event command parsing and trigger
  537. * instantiation.
  538. *
  539. * Usually used directly as the @func method in event command
  540. * implementations.
  541. *
  542. * Return: 0 on success, errno otherwise
  543. */
  544. static int
  545. event_trigger_callback(struct event_command *cmd_ops,
  546. struct trace_event_file *file,
  547. char *glob, char *cmd, char *param)
  548. {
  549. struct event_trigger_data *trigger_data;
  550. struct event_trigger_ops *trigger_ops;
  551. char *trigger = NULL;
  552. char *number;
  553. int ret;
  554. /* separate the trigger from the filter (t:n [if filter]) */
  555. if (param && isdigit(param[0])) {
  556. trigger = strsep(&param, " \t");
  557. if (param) {
  558. param = skip_spaces(param);
  559. if (!*param)
  560. param = NULL;
  561. }
  562. }
  563. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  564. ret = -ENOMEM;
  565. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  566. if (!trigger_data)
  567. goto out;
  568. trigger_data->count = -1;
  569. trigger_data->ops = trigger_ops;
  570. trigger_data->cmd_ops = cmd_ops;
  571. trigger_data->private_data = file;
  572. INIT_LIST_HEAD(&trigger_data->list);
  573. INIT_LIST_HEAD(&trigger_data->named_list);
  574. if (glob[0] == '!') {
  575. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  576. kfree(trigger_data);
  577. ret = 0;
  578. goto out;
  579. }
  580. if (trigger) {
  581. number = strsep(&trigger, ":");
  582. ret = -EINVAL;
  583. if (!strlen(number))
  584. goto out_free;
  585. /*
  586. * We use the callback data field (which is a pointer)
  587. * as our counter.
  588. */
  589. ret = kstrtoul(number, 0, &trigger_data->count);
  590. if (ret)
  591. goto out_free;
  592. }
  593. if (!param) /* if param is non-empty, it's supposed to be a filter */
  594. goto out_reg;
  595. if (!cmd_ops->set_filter)
  596. goto out_reg;
  597. ret = cmd_ops->set_filter(param, trigger_data, file);
  598. if (ret < 0)
  599. goto out_free;
  600. out_reg:
  601. /* Up the trigger_data count to make sure reg doesn't free it on failure */
  602. event_trigger_init(trigger_ops, trigger_data);
  603. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  604. /*
  605. * The above returns on success the # of functions enabled,
  606. * but if it didn't find any functions it returns zero.
  607. * Consider no functions a failure too.
  608. */
  609. if (!ret) {
  610. cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
  611. ret = -ENOENT;
  612. } else if (ret > 0)
  613. ret = 0;
  614. /* Down the counter of trigger_data or free it if not used anymore */
  615. event_trigger_free(trigger_ops, trigger_data);
  616. out:
  617. return ret;
  618. out_free:
  619. if (cmd_ops->set_filter)
  620. cmd_ops->set_filter(NULL, trigger_data, NULL);
  621. kfree(trigger_data);
  622. goto out;
  623. }
  624. /**
  625. * set_trigger_filter - Generic event_command @set_filter implementation
  626. * @filter_str: The filter string for the trigger, NULL to remove filter
  627. * @trigger_data: Trigger-specific data
  628. * @file: The trace_event_file associated with the event
  629. *
  630. * Common implementation for event command filter parsing and filter
  631. * instantiation.
  632. *
  633. * Usually used directly as the @set_filter method in event command
  634. * implementations.
  635. *
  636. * Also used to remove a filter (if filter_str = NULL).
  637. *
  638. * Return: 0 on success, errno otherwise
  639. */
  640. int set_trigger_filter(char *filter_str,
  641. struct event_trigger_data *trigger_data,
  642. struct trace_event_file *file)
  643. {
  644. struct event_trigger_data *data = trigger_data;
  645. struct event_filter *filter = NULL, *tmp;
  646. int ret = -EINVAL;
  647. char *s;
  648. if (!filter_str) /* clear the current filter */
  649. goto assign;
  650. s = strsep(&filter_str, " \t");
  651. if (!strlen(s) || strcmp(s, "if") != 0)
  652. goto out;
  653. if (!filter_str)
  654. goto out;
  655. /* The filter is for the 'trigger' event, not the triggered event */
  656. ret = create_event_filter(file->event_call, filter_str, false, &filter);
  657. /*
  658. * If create_event_filter() fails, filter still needs to be freed.
  659. * Which the calling code will do with data->filter.
  660. */
  661. assign:
  662. tmp = rcu_access_pointer(data->filter);
  663. rcu_assign_pointer(data->filter, filter);
  664. if (tmp) {
  665. /* Make sure the call is done with the filter */
  666. synchronize_sched();
  667. free_event_filter(tmp);
  668. }
  669. kfree(data->filter_str);
  670. data->filter_str = NULL;
  671. if (filter_str) {
  672. data->filter_str = kstrdup(filter_str, GFP_KERNEL);
  673. if (!data->filter_str) {
  674. free_event_filter(rcu_access_pointer(data->filter));
  675. data->filter = NULL;
  676. ret = -ENOMEM;
  677. }
  678. }
  679. out:
  680. return ret;
  681. }
  682. static LIST_HEAD(named_triggers);
  683. /**
  684. * find_named_trigger - Find the common named trigger associated with @name
  685. * @name: The name of the set of named triggers to find the common data for
  686. *
  687. * Named triggers are sets of triggers that share a common set of
  688. * trigger data. The first named trigger registered with a given name
  689. * owns the common trigger data that the others subsequently
  690. * registered with the same name will reference. This function
  691. * returns the common trigger data associated with that first
  692. * registered instance.
  693. *
  694. * Return: the common trigger data for the given named trigger on
  695. * success, NULL otherwise.
  696. */
  697. struct event_trigger_data *find_named_trigger(const char *name)
  698. {
  699. struct event_trigger_data *data;
  700. if (!name)
  701. return NULL;
  702. list_for_each_entry(data, &named_triggers, named_list) {
  703. if (data->named_data)
  704. continue;
  705. if (strcmp(data->name, name) == 0)
  706. return data;
  707. }
  708. return NULL;
  709. }
  710. /**
  711. * is_named_trigger - determine if a given trigger is a named trigger
  712. * @test: The trigger data to test
  713. *
  714. * Return: true if 'test' is a named trigger, false otherwise.
  715. */
  716. bool is_named_trigger(struct event_trigger_data *test)
  717. {
  718. struct event_trigger_data *data;
  719. list_for_each_entry(data, &named_triggers, named_list) {
  720. if (test == data)
  721. return true;
  722. }
  723. return false;
  724. }
  725. /**
  726. * save_named_trigger - save the trigger in the named trigger list
  727. * @name: The name of the named trigger set
  728. * @data: The trigger data to save
  729. *
  730. * Return: 0 if successful, negative error otherwise.
  731. */
  732. int save_named_trigger(const char *name, struct event_trigger_data *data)
  733. {
  734. data->name = kstrdup(name, GFP_KERNEL);
  735. if (!data->name)
  736. return -ENOMEM;
  737. list_add(&data->named_list, &named_triggers);
  738. return 0;
  739. }
  740. /**
  741. * del_named_trigger - delete a trigger from the named trigger list
  742. * @data: The trigger data to delete
  743. */
  744. void del_named_trigger(struct event_trigger_data *data)
  745. {
  746. kfree(data->name);
  747. data->name = NULL;
  748. list_del(&data->named_list);
  749. }
  750. static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
  751. {
  752. struct event_trigger_data *test;
  753. list_for_each_entry(test, &named_triggers, named_list) {
  754. if (strcmp(test->name, data->name) == 0) {
  755. if (pause) {
  756. test->paused_tmp = test->paused;
  757. test->paused = true;
  758. } else {
  759. test->paused = test->paused_tmp;
  760. }
  761. }
  762. }
  763. }
  764. /**
  765. * pause_named_trigger - Pause all named triggers with the same name
  766. * @data: The trigger data of a named trigger to pause
  767. *
  768. * Pauses a named trigger along with all other triggers having the
  769. * same name. Because named triggers share a common set of data,
  770. * pausing only one is meaningless, so pausing one named trigger needs
  771. * to pause all triggers with the same name.
  772. */
  773. void pause_named_trigger(struct event_trigger_data *data)
  774. {
  775. __pause_named_trigger(data, true);
  776. }
  777. /**
  778. * unpause_named_trigger - Un-pause all named triggers with the same name
  779. * @data: The trigger data of a named trigger to unpause
  780. *
  781. * Un-pauses a named trigger along with all other triggers having the
  782. * same name. Because named triggers share a common set of data,
  783. * unpausing only one is meaningless, so unpausing one named trigger
  784. * needs to unpause all triggers with the same name.
  785. */
  786. void unpause_named_trigger(struct event_trigger_data *data)
  787. {
  788. __pause_named_trigger(data, false);
  789. }
  790. /**
  791. * set_named_trigger_data - Associate common named trigger data
  792. * @data: The trigger data of a named trigger to unpause
  793. *
  794. * Named triggers are sets of triggers that share a common set of
  795. * trigger data. The first named trigger registered with a given name
  796. * owns the common trigger data that the others subsequently
  797. * registered with the same name will reference. This function
  798. * associates the common trigger data from the first trigger with the
  799. * given trigger.
  800. */
  801. void set_named_trigger_data(struct event_trigger_data *data,
  802. struct event_trigger_data *named_data)
  803. {
  804. data->named_data = named_data;
  805. }
  806. static void
  807. traceon_trigger(struct event_trigger_data *data, void *rec)
  808. {
  809. if (tracing_is_on())
  810. return;
  811. tracing_on();
  812. }
  813. static void
  814. traceon_count_trigger(struct event_trigger_data *data, void *rec)
  815. {
  816. if (tracing_is_on())
  817. return;
  818. if (!data->count)
  819. return;
  820. if (data->count != -1)
  821. (data->count)--;
  822. tracing_on();
  823. }
  824. static void
  825. traceoff_trigger(struct event_trigger_data *data, void *rec)
  826. {
  827. if (!tracing_is_on())
  828. return;
  829. tracing_off();
  830. }
  831. static void
  832. traceoff_count_trigger(struct event_trigger_data *data, void *rec)
  833. {
  834. if (!tracing_is_on())
  835. return;
  836. if (!data->count)
  837. return;
  838. if (data->count != -1)
  839. (data->count)--;
  840. tracing_off();
  841. }
  842. static int
  843. traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  844. struct event_trigger_data *data)
  845. {
  846. return event_trigger_print("traceon", m, (void *)data->count,
  847. data->filter_str);
  848. }
  849. static int
  850. traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  851. struct event_trigger_data *data)
  852. {
  853. return event_trigger_print("traceoff", m, (void *)data->count,
  854. data->filter_str);
  855. }
  856. static struct event_trigger_ops traceon_trigger_ops = {
  857. .func = traceon_trigger,
  858. .print = traceon_trigger_print,
  859. .init = event_trigger_init,
  860. .free = event_trigger_free,
  861. };
  862. static struct event_trigger_ops traceon_count_trigger_ops = {
  863. .func = traceon_count_trigger,
  864. .print = traceon_trigger_print,
  865. .init = event_trigger_init,
  866. .free = event_trigger_free,
  867. };
  868. static struct event_trigger_ops traceoff_trigger_ops = {
  869. .func = traceoff_trigger,
  870. .print = traceoff_trigger_print,
  871. .init = event_trigger_init,
  872. .free = event_trigger_free,
  873. };
  874. static struct event_trigger_ops traceoff_count_trigger_ops = {
  875. .func = traceoff_count_trigger,
  876. .print = traceoff_trigger_print,
  877. .init = event_trigger_init,
  878. .free = event_trigger_free,
  879. };
  880. static struct event_trigger_ops *
  881. onoff_get_trigger_ops(char *cmd, char *param)
  882. {
  883. struct event_trigger_ops *ops;
  884. /* we register both traceon and traceoff to this callback */
  885. if (strcmp(cmd, "traceon") == 0)
  886. ops = param ? &traceon_count_trigger_ops :
  887. &traceon_trigger_ops;
  888. else
  889. ops = param ? &traceoff_count_trigger_ops :
  890. &traceoff_trigger_ops;
  891. return ops;
  892. }
  893. static struct event_command trigger_traceon_cmd = {
  894. .name = "traceon",
  895. .trigger_type = ETT_TRACE_ONOFF,
  896. .func = event_trigger_callback,
  897. .reg = register_trigger,
  898. .unreg = unregister_trigger,
  899. .get_trigger_ops = onoff_get_trigger_ops,
  900. .set_filter = set_trigger_filter,
  901. };
  902. static struct event_command trigger_traceoff_cmd = {
  903. .name = "traceoff",
  904. .trigger_type = ETT_TRACE_ONOFF,
  905. .flags = EVENT_CMD_FL_POST_TRIGGER,
  906. .func = event_trigger_callback,
  907. .reg = register_trigger,
  908. .unreg = unregister_trigger,
  909. .get_trigger_ops = onoff_get_trigger_ops,
  910. .set_filter = set_trigger_filter,
  911. };
  912. #ifdef CONFIG_TRACER_SNAPSHOT
  913. static void
  914. snapshot_trigger(struct event_trigger_data *data, void *rec)
  915. {
  916. struct trace_event_file *file = data->private_data;
  917. if (file)
  918. tracing_snapshot_instance(file->tr);
  919. else
  920. tracing_snapshot();
  921. }
  922. static void
  923. snapshot_count_trigger(struct event_trigger_data *data, void *rec)
  924. {
  925. if (!data->count)
  926. return;
  927. if (data->count != -1)
  928. (data->count)--;
  929. snapshot_trigger(data, rec);
  930. }
  931. static int
  932. register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
  933. struct event_trigger_data *data,
  934. struct trace_event_file *file)
  935. {
  936. if (tracing_alloc_snapshot_instance(file->tr) != 0)
  937. return 0;
  938. return register_trigger(glob, ops, data, file);
  939. }
  940. static int
  941. snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  942. struct event_trigger_data *data)
  943. {
  944. return event_trigger_print("snapshot", m, (void *)data->count,
  945. data->filter_str);
  946. }
  947. static struct event_trigger_ops snapshot_trigger_ops = {
  948. .func = snapshot_trigger,
  949. .print = snapshot_trigger_print,
  950. .init = event_trigger_init,
  951. .free = event_trigger_free,
  952. };
  953. static struct event_trigger_ops snapshot_count_trigger_ops = {
  954. .func = snapshot_count_trigger,
  955. .print = snapshot_trigger_print,
  956. .init = event_trigger_init,
  957. .free = event_trigger_free,
  958. };
  959. static struct event_trigger_ops *
  960. snapshot_get_trigger_ops(char *cmd, char *param)
  961. {
  962. return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
  963. }
  964. static struct event_command trigger_snapshot_cmd = {
  965. .name = "snapshot",
  966. .trigger_type = ETT_SNAPSHOT,
  967. .func = event_trigger_callback,
  968. .reg = register_snapshot_trigger,
  969. .unreg = unregister_trigger,
  970. .get_trigger_ops = snapshot_get_trigger_ops,
  971. .set_filter = set_trigger_filter,
  972. };
  973. static __init int register_trigger_snapshot_cmd(void)
  974. {
  975. int ret;
  976. ret = register_event_command(&trigger_snapshot_cmd);
  977. WARN_ON(ret < 0);
  978. return ret;
  979. }
  980. #else
  981. static __init int register_trigger_snapshot_cmd(void) { return 0; }
  982. #endif /* CONFIG_TRACER_SNAPSHOT */
  983. #ifdef CONFIG_STACKTRACE
  984. /*
  985. * Skip 3:
  986. * stacktrace_trigger()
  987. * event_triggers_post_call()
  988. * trace_event_raw_event_xxx()
  989. */
  990. #define STACK_SKIP 3
  991. static void
  992. stacktrace_trigger(struct event_trigger_data *data, void *rec)
  993. {
  994. trace_dump_stack(STACK_SKIP);
  995. }
  996. static void
  997. stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
  998. {
  999. if (!data->count)
  1000. return;
  1001. if (data->count != -1)
  1002. (data->count)--;
  1003. stacktrace_trigger(data, rec);
  1004. }
  1005. static int
  1006. stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  1007. struct event_trigger_data *data)
  1008. {
  1009. return event_trigger_print("stacktrace", m, (void *)data->count,
  1010. data->filter_str);
  1011. }
  1012. static struct event_trigger_ops stacktrace_trigger_ops = {
  1013. .func = stacktrace_trigger,
  1014. .print = stacktrace_trigger_print,
  1015. .init = event_trigger_init,
  1016. .free = event_trigger_free,
  1017. };
  1018. static struct event_trigger_ops stacktrace_count_trigger_ops = {
  1019. .func = stacktrace_count_trigger,
  1020. .print = stacktrace_trigger_print,
  1021. .init = event_trigger_init,
  1022. .free = event_trigger_free,
  1023. };
  1024. static struct event_trigger_ops *
  1025. stacktrace_get_trigger_ops(char *cmd, char *param)
  1026. {
  1027. return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
  1028. }
  1029. static struct event_command trigger_stacktrace_cmd = {
  1030. .name = "stacktrace",
  1031. .trigger_type = ETT_STACKTRACE,
  1032. .flags = EVENT_CMD_FL_POST_TRIGGER,
  1033. .func = event_trigger_callback,
  1034. .reg = register_trigger,
  1035. .unreg = unregister_trigger,
  1036. .get_trigger_ops = stacktrace_get_trigger_ops,
  1037. .set_filter = set_trigger_filter,
  1038. };
  1039. static __init int register_trigger_stacktrace_cmd(void)
  1040. {
  1041. int ret;
  1042. ret = register_event_command(&trigger_stacktrace_cmd);
  1043. WARN_ON(ret < 0);
  1044. return ret;
  1045. }
  1046. #else
  1047. static __init int register_trigger_stacktrace_cmd(void) { return 0; }
  1048. #endif /* CONFIG_STACKTRACE */
  1049. static __init void unregister_trigger_traceon_traceoff_cmds(void)
  1050. {
  1051. unregister_event_command(&trigger_traceon_cmd);
  1052. unregister_event_command(&trigger_traceoff_cmd);
  1053. }
  1054. static void
  1055. event_enable_trigger(struct event_trigger_data *data, void *rec)
  1056. {
  1057. struct enable_trigger_data *enable_data = data->private_data;
  1058. if (enable_data->enable)
  1059. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1060. else
  1061. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1062. }
  1063. static void
  1064. event_enable_count_trigger(struct event_trigger_data *data, void *rec)
  1065. {
  1066. struct enable_trigger_data *enable_data = data->private_data;
  1067. if (!data->count)
  1068. return;
  1069. /* Skip if the event is in a state we want to switch to */
  1070. if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
  1071. return;
  1072. if (data->count != -1)
  1073. (data->count)--;
  1074. event_enable_trigger(data, rec);
  1075. }
  1076. int event_enable_trigger_print(struct seq_file *m,
  1077. struct event_trigger_ops *ops,
  1078. struct event_trigger_data *data)
  1079. {
  1080. struct enable_trigger_data *enable_data = data->private_data;
  1081. seq_printf(m, "%s:%s:%s",
  1082. enable_data->hist ?
  1083. (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
  1084. (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
  1085. enable_data->file->event_call->class->system,
  1086. trace_event_name(enable_data->file->event_call));
  1087. if (data->count == -1)
  1088. seq_puts(m, ":unlimited");
  1089. else
  1090. seq_printf(m, ":count=%ld", data->count);
  1091. if (data->filter_str)
  1092. seq_printf(m, " if %s\n", data->filter_str);
  1093. else
  1094. seq_putc(m, '\n');
  1095. return 0;
  1096. }
  1097. void event_enable_trigger_free(struct event_trigger_ops *ops,
  1098. struct event_trigger_data *data)
  1099. {
  1100. struct enable_trigger_data *enable_data = data->private_data;
  1101. if (WARN_ON_ONCE(data->ref <= 0))
  1102. return;
  1103. data->ref--;
  1104. if (!data->ref) {
  1105. /* Remove the SOFT_MODE flag */
  1106. trace_event_enable_disable(enable_data->file, 0, 1);
  1107. module_put(enable_data->file->event_call->mod);
  1108. trigger_data_free(data);
  1109. kfree(enable_data);
  1110. }
  1111. }
  1112. static struct event_trigger_ops event_enable_trigger_ops = {
  1113. .func = event_enable_trigger,
  1114. .print = event_enable_trigger_print,
  1115. .init = event_trigger_init,
  1116. .free = event_enable_trigger_free,
  1117. };
  1118. static struct event_trigger_ops event_enable_count_trigger_ops = {
  1119. .func = event_enable_count_trigger,
  1120. .print = event_enable_trigger_print,
  1121. .init = event_trigger_init,
  1122. .free = event_enable_trigger_free,
  1123. };
  1124. static struct event_trigger_ops event_disable_trigger_ops = {
  1125. .func = event_enable_trigger,
  1126. .print = event_enable_trigger_print,
  1127. .init = event_trigger_init,
  1128. .free = event_enable_trigger_free,
  1129. };
  1130. static struct event_trigger_ops event_disable_count_trigger_ops = {
  1131. .func = event_enable_count_trigger,
  1132. .print = event_enable_trigger_print,
  1133. .init = event_trigger_init,
  1134. .free = event_enable_trigger_free,
  1135. };
  1136. int event_enable_trigger_func(struct event_command *cmd_ops,
  1137. struct trace_event_file *file,
  1138. char *glob, char *cmd, char *param)
  1139. {
  1140. struct trace_event_file *event_enable_file;
  1141. struct enable_trigger_data *enable_data;
  1142. struct event_trigger_data *trigger_data;
  1143. struct event_trigger_ops *trigger_ops;
  1144. struct trace_array *tr = file->tr;
  1145. const char *system;
  1146. const char *event;
  1147. bool hist = false;
  1148. char *trigger;
  1149. char *number;
  1150. bool enable;
  1151. int ret;
  1152. if (!param)
  1153. return -EINVAL;
  1154. /* separate the trigger from the filter (s:e:n [if filter]) */
  1155. trigger = strsep(&param, " \t");
  1156. if (!trigger)
  1157. return -EINVAL;
  1158. if (param) {
  1159. param = skip_spaces(param);
  1160. if (!*param)
  1161. param = NULL;
  1162. }
  1163. system = strsep(&trigger, ":");
  1164. if (!trigger)
  1165. return -EINVAL;
  1166. event = strsep(&trigger, ":");
  1167. ret = -EINVAL;
  1168. event_enable_file = find_event_file(tr, system, event);
  1169. if (!event_enable_file)
  1170. goto out;
  1171. #ifdef CONFIG_HIST_TRIGGERS
  1172. hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
  1173. (strcmp(cmd, DISABLE_HIST_STR) == 0));
  1174. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1175. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1176. #else
  1177. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1178. #endif
  1179. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1180. ret = -ENOMEM;
  1181. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1182. if (!trigger_data)
  1183. goto out;
  1184. enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
  1185. if (!enable_data) {
  1186. kfree(trigger_data);
  1187. goto out;
  1188. }
  1189. trigger_data->count = -1;
  1190. trigger_data->ops = trigger_ops;
  1191. trigger_data->cmd_ops = cmd_ops;
  1192. INIT_LIST_HEAD(&trigger_data->list);
  1193. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1194. enable_data->hist = hist;
  1195. enable_data->enable = enable;
  1196. enable_data->file = event_enable_file;
  1197. trigger_data->private_data = enable_data;
  1198. if (glob[0] == '!') {
  1199. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1200. kfree(trigger_data);
  1201. kfree(enable_data);
  1202. ret = 0;
  1203. goto out;
  1204. }
  1205. /* Up the trigger_data count to make sure nothing frees it on failure */
  1206. event_trigger_init(trigger_ops, trigger_data);
  1207. if (trigger) {
  1208. number = strsep(&trigger, ":");
  1209. ret = -EINVAL;
  1210. if (!strlen(number))
  1211. goto out_free;
  1212. /*
  1213. * We use the callback data field (which is a pointer)
  1214. * as our counter.
  1215. */
  1216. ret = kstrtoul(number, 0, &trigger_data->count);
  1217. if (ret)
  1218. goto out_free;
  1219. }
  1220. if (!param) /* if param is non-empty, it's supposed to be a filter */
  1221. goto out_reg;
  1222. if (!cmd_ops->set_filter)
  1223. goto out_reg;
  1224. ret = cmd_ops->set_filter(param, trigger_data, file);
  1225. if (ret < 0)
  1226. goto out_free;
  1227. out_reg:
  1228. /* Don't let event modules unload while probe registered */
  1229. ret = try_module_get(event_enable_file->event_call->mod);
  1230. if (!ret) {
  1231. ret = -EBUSY;
  1232. goto out_free;
  1233. }
  1234. ret = trace_event_enable_disable(event_enable_file, 1, 1);
  1235. if (ret < 0)
  1236. goto out_put;
  1237. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1238. /*
  1239. * The above returns on success the # of functions enabled,
  1240. * but if it didn't find any functions it returns zero.
  1241. * Consider no functions a failure too.
  1242. */
  1243. if (!ret) {
  1244. ret = -ENOENT;
  1245. goto out_disable;
  1246. } else if (ret < 0)
  1247. goto out_disable;
  1248. /* Just return zero, not the number of enabled functions */
  1249. ret = 0;
  1250. event_trigger_free(trigger_ops, trigger_data);
  1251. out:
  1252. return ret;
  1253. out_disable:
  1254. trace_event_enable_disable(event_enable_file, 0, 1);
  1255. out_put:
  1256. module_put(event_enable_file->event_call->mod);
  1257. out_free:
  1258. if (cmd_ops->set_filter)
  1259. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1260. event_trigger_free(trigger_ops, trigger_data);
  1261. kfree(enable_data);
  1262. goto out;
  1263. }
  1264. int event_enable_register_trigger(char *glob,
  1265. struct event_trigger_ops *ops,
  1266. struct event_trigger_data *data,
  1267. struct trace_event_file *file)
  1268. {
  1269. struct enable_trigger_data *enable_data = data->private_data;
  1270. struct enable_trigger_data *test_enable_data;
  1271. struct event_trigger_data *test;
  1272. int ret = 0;
  1273. list_for_each_entry_rcu(test, &file->triggers, list) {
  1274. test_enable_data = test->private_data;
  1275. if (test_enable_data &&
  1276. (test->cmd_ops->trigger_type ==
  1277. data->cmd_ops->trigger_type) &&
  1278. (test_enable_data->file == enable_data->file)) {
  1279. ret = -EEXIST;
  1280. goto out;
  1281. }
  1282. }
  1283. if (data->ops->init) {
  1284. ret = data->ops->init(data->ops, data);
  1285. if (ret < 0)
  1286. goto out;
  1287. }
  1288. list_add_rcu(&data->list, &file->triggers);
  1289. ret++;
  1290. update_cond_flag(file);
  1291. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1292. list_del_rcu(&data->list);
  1293. update_cond_flag(file);
  1294. ret--;
  1295. }
  1296. out:
  1297. return ret;
  1298. }
  1299. void event_enable_unregister_trigger(char *glob,
  1300. struct event_trigger_ops *ops,
  1301. struct event_trigger_data *test,
  1302. struct trace_event_file *file)
  1303. {
  1304. struct enable_trigger_data *test_enable_data = test->private_data;
  1305. struct enable_trigger_data *enable_data;
  1306. struct event_trigger_data *data;
  1307. bool unregistered = false;
  1308. list_for_each_entry_rcu(data, &file->triggers, list) {
  1309. enable_data = data->private_data;
  1310. if (enable_data &&
  1311. (data->cmd_ops->trigger_type ==
  1312. test->cmd_ops->trigger_type) &&
  1313. (enable_data->file == test_enable_data->file)) {
  1314. unregistered = true;
  1315. list_del_rcu(&data->list);
  1316. trace_event_trigger_enable_disable(file, 0);
  1317. update_cond_flag(file);
  1318. break;
  1319. }
  1320. }
  1321. if (unregistered && data->ops->free)
  1322. data->ops->free(data->ops, data);
  1323. }
  1324. static struct event_trigger_ops *
  1325. event_enable_get_trigger_ops(char *cmd, char *param)
  1326. {
  1327. struct event_trigger_ops *ops;
  1328. bool enable;
  1329. #ifdef CONFIG_HIST_TRIGGERS
  1330. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1331. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1332. #else
  1333. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1334. #endif
  1335. if (enable)
  1336. ops = param ? &event_enable_count_trigger_ops :
  1337. &event_enable_trigger_ops;
  1338. else
  1339. ops = param ? &event_disable_count_trigger_ops :
  1340. &event_disable_trigger_ops;
  1341. return ops;
  1342. }
  1343. static struct event_command trigger_enable_cmd = {
  1344. .name = ENABLE_EVENT_STR,
  1345. .trigger_type = ETT_EVENT_ENABLE,
  1346. .func = event_enable_trigger_func,
  1347. .reg = event_enable_register_trigger,
  1348. .unreg = event_enable_unregister_trigger,
  1349. .get_trigger_ops = event_enable_get_trigger_ops,
  1350. .set_filter = set_trigger_filter,
  1351. };
  1352. static struct event_command trigger_disable_cmd = {
  1353. .name = DISABLE_EVENT_STR,
  1354. .trigger_type = ETT_EVENT_ENABLE,
  1355. .func = event_enable_trigger_func,
  1356. .reg = event_enable_register_trigger,
  1357. .unreg = event_enable_unregister_trigger,
  1358. .get_trigger_ops = event_enable_get_trigger_ops,
  1359. .set_filter = set_trigger_filter,
  1360. };
  1361. static __init void unregister_trigger_enable_disable_cmds(void)
  1362. {
  1363. unregister_event_command(&trigger_enable_cmd);
  1364. unregister_event_command(&trigger_disable_cmd);
  1365. }
  1366. static __init int register_trigger_enable_disable_cmds(void)
  1367. {
  1368. int ret;
  1369. ret = register_event_command(&trigger_enable_cmd);
  1370. if (WARN_ON(ret < 0))
  1371. return ret;
  1372. ret = register_event_command(&trigger_disable_cmd);
  1373. if (WARN_ON(ret < 0))
  1374. unregister_trigger_enable_disable_cmds();
  1375. return ret;
  1376. }
  1377. static __init int register_trigger_traceon_traceoff_cmds(void)
  1378. {
  1379. int ret;
  1380. ret = register_event_command(&trigger_traceon_cmd);
  1381. if (WARN_ON(ret < 0))
  1382. return ret;
  1383. ret = register_event_command(&trigger_traceoff_cmd);
  1384. if (WARN_ON(ret < 0))
  1385. unregister_trigger_traceon_traceoff_cmds();
  1386. return ret;
  1387. }
  1388. __init int register_trigger_cmds(void)
  1389. {
  1390. register_trigger_traceon_traceoff_cmds();
  1391. register_trigger_snapshot_cmd();
  1392. register_trigger_stacktrace_cmd();
  1393. register_trigger_enable_disable_cmds();
  1394. register_trigger_hist_enable_disable_cmds();
  1395. register_trigger_hist_cmd();
  1396. return 0;
  1397. }