trace_events.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #define pr_fmt(fmt) fmt
  11. #include <linux/workqueue.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/kthread.h>
  14. #include <linux/tracefs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <linux/ctype.h>
  18. #include <linux/sort.h>
  19. #include <linux/slab.h>
  20. #include <linux/delay.h>
  21. #include <trace/events/sched.h>
  22. #include <asm/setup.h>
  23. #include "trace_output.h"
  24. #undef TRACE_SYSTEM
  25. #define TRACE_SYSTEM "TRACE_SYSTEM"
  26. DEFINE_MUTEX(event_mutex);
  27. LIST_HEAD(ftrace_events);
  28. static LIST_HEAD(ftrace_generic_fields);
  29. static LIST_HEAD(ftrace_common_fields);
  30. #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
  31. static struct kmem_cache *field_cachep;
  32. static struct kmem_cache *file_cachep;
  33. static inline int system_refcount(struct event_subsystem *system)
  34. {
  35. return system->ref_count;
  36. }
  37. static int system_refcount_inc(struct event_subsystem *system)
  38. {
  39. return system->ref_count++;
  40. }
  41. static int system_refcount_dec(struct event_subsystem *system)
  42. {
  43. return --system->ref_count;
  44. }
  45. /* Double loops, do not use break, only goto's work */
  46. #define do_for_each_event_file(tr, file) \
  47. list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
  48. list_for_each_entry(file, &tr->events, list)
  49. #define do_for_each_event_file_safe(tr, file) \
  50. list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
  51. struct trace_event_file *___n; \
  52. list_for_each_entry_safe(file, ___n, &tr->events, list)
  53. #define while_for_each_event_file() \
  54. }
  55. static struct list_head *
  56. trace_get_fields(struct trace_event_call *event_call)
  57. {
  58. if (!event_call->class->get_fields)
  59. return &event_call->class->fields;
  60. return event_call->class->get_fields(event_call);
  61. }
  62. static struct ftrace_event_field *
  63. __find_event_field(struct list_head *head, char *name)
  64. {
  65. struct ftrace_event_field *field;
  66. list_for_each_entry(field, head, link) {
  67. if (!strcmp(field->name, name))
  68. return field;
  69. }
  70. return NULL;
  71. }
  72. struct ftrace_event_field *
  73. trace_find_event_field(struct trace_event_call *call, char *name)
  74. {
  75. struct ftrace_event_field *field;
  76. struct list_head *head;
  77. head = trace_get_fields(call);
  78. field = __find_event_field(head, name);
  79. if (field)
  80. return field;
  81. field = __find_event_field(&ftrace_generic_fields, name);
  82. if (field)
  83. return field;
  84. return __find_event_field(&ftrace_common_fields, name);
  85. }
  86. static int __trace_define_field(struct list_head *head, const char *type,
  87. const char *name, int offset, int size,
  88. int is_signed, int filter_type)
  89. {
  90. struct ftrace_event_field *field;
  91. field = kmem_cache_alloc(field_cachep, GFP_TRACE);
  92. if (!field)
  93. return -ENOMEM;
  94. field->name = name;
  95. field->type = type;
  96. if (filter_type == FILTER_OTHER)
  97. field->filter_type = filter_assign_type(type);
  98. else
  99. field->filter_type = filter_type;
  100. field->offset = offset;
  101. field->size = size;
  102. field->is_signed = is_signed;
  103. list_add(&field->link, head);
  104. return 0;
  105. }
  106. int trace_define_field(struct trace_event_call *call, const char *type,
  107. const char *name, int offset, int size, int is_signed,
  108. int filter_type)
  109. {
  110. struct list_head *head;
  111. if (WARN_ON(!call->class))
  112. return 0;
  113. head = trace_get_fields(call);
  114. return __trace_define_field(head, type, name, offset, size,
  115. is_signed, filter_type);
  116. }
  117. EXPORT_SYMBOL_GPL(trace_define_field);
  118. #define __generic_field(type, item, filter_type) \
  119. ret = __trace_define_field(&ftrace_generic_fields, #type, \
  120. #item, 0, 0, is_signed_type(type), \
  121. filter_type); \
  122. if (ret) \
  123. return ret;
  124. #define __common_field(type, item) \
  125. ret = __trace_define_field(&ftrace_common_fields, #type, \
  126. "common_" #item, \
  127. offsetof(typeof(ent), item), \
  128. sizeof(ent.item), \
  129. is_signed_type(type), FILTER_OTHER); \
  130. if (ret) \
  131. return ret;
  132. static int trace_define_generic_fields(void)
  133. {
  134. int ret;
  135. __generic_field(int, CPU, FILTER_CPU);
  136. __generic_field(int, cpu, FILTER_CPU);
  137. __generic_field(char *, COMM, FILTER_COMM);
  138. __generic_field(char *, comm, FILTER_COMM);
  139. return ret;
  140. }
  141. static int trace_define_common_fields(void)
  142. {
  143. int ret;
  144. struct trace_entry ent;
  145. __common_field(unsigned short, type);
  146. __common_field(unsigned char, flags);
  147. __common_field(unsigned char, preempt_count);
  148. __common_field(int, pid);
  149. return ret;
  150. }
  151. static void trace_destroy_fields(struct trace_event_call *call)
  152. {
  153. struct ftrace_event_field *field, *next;
  154. struct list_head *head;
  155. head = trace_get_fields(call);
  156. list_for_each_entry_safe(field, next, head, link) {
  157. list_del(&field->link);
  158. kmem_cache_free(field_cachep, field);
  159. }
  160. }
  161. /*
  162. * run-time version of trace_event_get_offsets_<call>() that returns the last
  163. * accessible offset of trace fields excluding __dynamic_array bytes
  164. */
  165. int trace_event_get_offsets(struct trace_event_call *call)
  166. {
  167. struct ftrace_event_field *tail;
  168. struct list_head *head;
  169. head = trace_get_fields(call);
  170. /*
  171. * head->next points to the last field with the largest offset,
  172. * since it was added last by trace_define_field()
  173. */
  174. tail = list_first_entry(head, struct ftrace_event_field, link);
  175. return tail->offset + tail->size;
  176. }
  177. int trace_event_raw_init(struct trace_event_call *call)
  178. {
  179. int id;
  180. id = register_trace_event(&call->event);
  181. if (!id)
  182. return -ENODEV;
  183. return 0;
  184. }
  185. EXPORT_SYMBOL_GPL(trace_event_raw_init);
  186. bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
  187. {
  188. struct trace_array *tr = trace_file->tr;
  189. struct trace_array_cpu *data;
  190. struct trace_pid_list *pid_list;
  191. pid_list = rcu_dereference_sched(tr->filtered_pids);
  192. if (!pid_list)
  193. return false;
  194. data = this_cpu_ptr(tr->trace_buffer.data);
  195. return data->ignore_pid;
  196. }
  197. EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
  198. void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
  199. struct trace_event_file *trace_file,
  200. unsigned long len)
  201. {
  202. struct trace_event_call *event_call = trace_file->event_call;
  203. if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
  204. trace_event_ignore_this_pid(trace_file))
  205. return NULL;
  206. local_save_flags(fbuffer->flags);
  207. fbuffer->pc = preempt_count();
  208. /*
  209. * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
  210. * preemption (adding one to the preempt_count). Since we are
  211. * interested in the preempt_count at the time the tracepoint was
  212. * hit, we need to subtract one to offset the increment.
  213. */
  214. if (IS_ENABLED(CONFIG_PREEMPT))
  215. fbuffer->pc--;
  216. fbuffer->trace_file = trace_file;
  217. fbuffer->event =
  218. trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
  219. event_call->event.type, len,
  220. fbuffer->flags, fbuffer->pc);
  221. if (!fbuffer->event)
  222. return NULL;
  223. fbuffer->entry = ring_buffer_event_data(fbuffer->event);
  224. return fbuffer->entry;
  225. }
  226. EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
  227. static DEFINE_SPINLOCK(tracepoint_iter_lock);
  228. static void output_printk(struct trace_event_buffer *fbuffer)
  229. {
  230. struct trace_event_call *event_call;
  231. struct trace_event *event;
  232. unsigned long flags;
  233. struct trace_iterator *iter = tracepoint_print_iter;
  234. if (!iter)
  235. return;
  236. event_call = fbuffer->trace_file->event_call;
  237. if (!event_call || !event_call->event.funcs ||
  238. !event_call->event.funcs->trace)
  239. return;
  240. event = &fbuffer->trace_file->event_call->event;
  241. spin_lock_irqsave(&tracepoint_iter_lock, flags);
  242. trace_seq_init(&iter->seq);
  243. iter->ent = fbuffer->entry;
  244. event_call->event.funcs->trace(iter, 0, event);
  245. trace_seq_putc(&iter->seq, 0);
  246. printk("%s", iter->seq.buffer);
  247. spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
  248. }
  249. void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
  250. {
  251. if (tracepoint_printk)
  252. output_printk(fbuffer);
  253. event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
  254. fbuffer->event, fbuffer->entry,
  255. fbuffer->flags, fbuffer->pc);
  256. }
  257. EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
  258. int trace_event_reg(struct trace_event_call *call,
  259. enum trace_reg type, void *data)
  260. {
  261. struct trace_event_file *file = data;
  262. WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
  263. switch (type) {
  264. case TRACE_REG_REGISTER:
  265. return tracepoint_probe_register(call->tp,
  266. call->class->probe,
  267. file);
  268. case TRACE_REG_UNREGISTER:
  269. tracepoint_probe_unregister(call->tp,
  270. call->class->probe,
  271. file);
  272. return 0;
  273. #ifdef CONFIG_PERF_EVENTS
  274. case TRACE_REG_PERF_REGISTER:
  275. return tracepoint_probe_register(call->tp,
  276. call->class->perf_probe,
  277. call);
  278. case TRACE_REG_PERF_UNREGISTER:
  279. tracepoint_probe_unregister(call->tp,
  280. call->class->perf_probe,
  281. call);
  282. return 0;
  283. case TRACE_REG_PERF_OPEN:
  284. case TRACE_REG_PERF_CLOSE:
  285. case TRACE_REG_PERF_ADD:
  286. case TRACE_REG_PERF_DEL:
  287. return 0;
  288. #endif
  289. }
  290. return 0;
  291. }
  292. EXPORT_SYMBOL_GPL(trace_event_reg);
  293. void trace_event_enable_cmd_record(bool enable)
  294. {
  295. struct trace_event_file *file;
  296. struct trace_array *tr;
  297. mutex_lock(&event_mutex);
  298. do_for_each_event_file(tr, file) {
  299. if (!(file->flags & EVENT_FILE_FL_ENABLED))
  300. continue;
  301. if (enable) {
  302. tracing_start_cmdline_record();
  303. set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
  304. } else {
  305. tracing_stop_cmdline_record();
  306. clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
  307. }
  308. } while_for_each_event_file();
  309. mutex_unlock(&event_mutex);
  310. }
  311. static int __ftrace_event_enable_disable(struct trace_event_file *file,
  312. int enable, int soft_disable)
  313. {
  314. struct trace_event_call *call = file->event_call;
  315. struct trace_array *tr = file->tr;
  316. unsigned long file_flags = file->flags;
  317. int ret = 0;
  318. int disable;
  319. switch (enable) {
  320. case 0:
  321. /*
  322. * When soft_disable is set and enable is cleared, the sm_ref
  323. * reference counter is decremented. If it reaches 0, we want
  324. * to clear the SOFT_DISABLED flag but leave the event in the
  325. * state that it was. That is, if the event was enabled and
  326. * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
  327. * is set we do not want the event to be enabled before we
  328. * clear the bit.
  329. *
  330. * When soft_disable is not set but the SOFT_MODE flag is,
  331. * we do nothing. Do not disable the tracepoint, otherwise
  332. * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
  333. */
  334. if (soft_disable) {
  335. if (atomic_dec_return(&file->sm_ref) > 0)
  336. break;
  337. disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
  338. clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
  339. } else
  340. disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
  341. if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
  342. clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
  343. if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
  344. tracing_stop_cmdline_record();
  345. clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
  346. }
  347. call->class->reg(call, TRACE_REG_UNREGISTER, file);
  348. }
  349. /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
  350. if (file->flags & EVENT_FILE_FL_SOFT_MODE)
  351. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
  352. else
  353. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
  354. break;
  355. case 1:
  356. /*
  357. * When soft_disable is set and enable is set, we want to
  358. * register the tracepoint for the event, but leave the event
  359. * as is. That means, if the event was already enabled, we do
  360. * nothing (but set SOFT_MODE). If the event is disabled, we
  361. * set SOFT_DISABLED before enabling the event tracepoint, so
  362. * it still seems to be disabled.
  363. */
  364. if (!soft_disable)
  365. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
  366. else {
  367. if (atomic_inc_return(&file->sm_ref) > 1)
  368. break;
  369. set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
  370. }
  371. if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
  372. /* Keep the event disabled, when going to SOFT_MODE. */
  373. if (soft_disable)
  374. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
  375. if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
  376. tracing_start_cmdline_record();
  377. set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
  378. }
  379. ret = call->class->reg(call, TRACE_REG_REGISTER, file);
  380. if (ret) {
  381. tracing_stop_cmdline_record();
  382. pr_info("event trace: Could not enable event "
  383. "%s\n", trace_event_name(call));
  384. break;
  385. }
  386. set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
  387. /* WAS_ENABLED gets set but never cleared. */
  388. call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
  389. }
  390. break;
  391. }
  392. /* Enable or disable use of trace_buffered_event */
  393. if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
  394. (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
  395. if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
  396. trace_buffered_event_enable();
  397. else
  398. trace_buffered_event_disable();
  399. }
  400. return ret;
  401. }
  402. int trace_event_enable_disable(struct trace_event_file *file,
  403. int enable, int soft_disable)
  404. {
  405. return __ftrace_event_enable_disable(file, enable, soft_disable);
  406. }
  407. static int ftrace_event_enable_disable(struct trace_event_file *file,
  408. int enable)
  409. {
  410. return __ftrace_event_enable_disable(file, enable, 0);
  411. }
  412. static void ftrace_clear_events(struct trace_array *tr)
  413. {
  414. struct trace_event_file *file;
  415. mutex_lock(&event_mutex);
  416. list_for_each_entry(file, &tr->events, list) {
  417. ftrace_event_enable_disable(file, 0);
  418. }
  419. mutex_unlock(&event_mutex);
  420. }
  421. static void
  422. event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
  423. {
  424. struct trace_pid_list *pid_list;
  425. struct trace_array *tr = data;
  426. pid_list = rcu_dereference_sched(tr->filtered_pids);
  427. trace_filter_add_remove_task(pid_list, NULL, task);
  428. }
  429. static void
  430. event_filter_pid_sched_process_fork(void *data,
  431. struct task_struct *self,
  432. struct task_struct *task)
  433. {
  434. struct trace_pid_list *pid_list;
  435. struct trace_array *tr = data;
  436. pid_list = rcu_dereference_sched(tr->filtered_pids);
  437. trace_filter_add_remove_task(pid_list, self, task);
  438. }
  439. void trace_event_follow_fork(struct trace_array *tr, bool enable)
  440. {
  441. if (enable) {
  442. register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
  443. tr, INT_MIN);
  444. register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
  445. tr, INT_MAX);
  446. } else {
  447. unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
  448. tr);
  449. unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
  450. tr);
  451. }
  452. }
  453. static void
  454. event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
  455. struct task_struct *prev, struct task_struct *next)
  456. {
  457. struct trace_array *tr = data;
  458. struct trace_pid_list *pid_list;
  459. pid_list = rcu_dereference_sched(tr->filtered_pids);
  460. this_cpu_write(tr->trace_buffer.data->ignore_pid,
  461. trace_ignore_this_task(pid_list, prev) &&
  462. trace_ignore_this_task(pid_list, next));
  463. }
  464. static void
  465. event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
  466. struct task_struct *prev, struct task_struct *next)
  467. {
  468. struct trace_array *tr = data;
  469. struct trace_pid_list *pid_list;
  470. pid_list = rcu_dereference_sched(tr->filtered_pids);
  471. this_cpu_write(tr->trace_buffer.data->ignore_pid,
  472. trace_ignore_this_task(pid_list, next));
  473. }
  474. static void
  475. event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
  476. {
  477. struct trace_array *tr = data;
  478. struct trace_pid_list *pid_list;
  479. /* Nothing to do if we are already tracing */
  480. if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
  481. return;
  482. pid_list = rcu_dereference_sched(tr->filtered_pids);
  483. this_cpu_write(tr->trace_buffer.data->ignore_pid,
  484. trace_ignore_this_task(pid_list, task));
  485. }
  486. static void
  487. event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
  488. {
  489. struct trace_array *tr = data;
  490. struct trace_pid_list *pid_list;
  491. /* Nothing to do if we are not tracing */
  492. if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
  493. return;
  494. pid_list = rcu_dereference_sched(tr->filtered_pids);
  495. /* Set tracing if current is enabled */
  496. this_cpu_write(tr->trace_buffer.data->ignore_pid,
  497. trace_ignore_this_task(pid_list, current));
  498. }
  499. static void __ftrace_clear_event_pids(struct trace_array *tr)
  500. {
  501. struct trace_pid_list *pid_list;
  502. struct trace_event_file *file;
  503. int cpu;
  504. pid_list = rcu_dereference_protected(tr->filtered_pids,
  505. lockdep_is_held(&event_mutex));
  506. if (!pid_list)
  507. return;
  508. unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
  509. unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
  510. unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
  511. unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
  512. unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
  513. unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
  514. unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
  515. unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
  516. list_for_each_entry(file, &tr->events, list) {
  517. clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
  518. }
  519. for_each_possible_cpu(cpu)
  520. per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
  521. rcu_assign_pointer(tr->filtered_pids, NULL);
  522. /* Wait till all users are no longer using pid filtering */
  523. synchronize_sched();
  524. trace_free_pid_list(pid_list);
  525. }
  526. static void ftrace_clear_event_pids(struct trace_array *tr)
  527. {
  528. mutex_lock(&event_mutex);
  529. __ftrace_clear_event_pids(tr);
  530. mutex_unlock(&event_mutex);
  531. }
  532. static void __put_system(struct event_subsystem *system)
  533. {
  534. struct event_filter *filter = system->filter;
  535. WARN_ON_ONCE(system_refcount(system) == 0);
  536. if (system_refcount_dec(system))
  537. return;
  538. list_del(&system->list);
  539. if (filter) {
  540. kfree(filter->filter_string);
  541. kfree(filter);
  542. }
  543. kfree_const(system->name);
  544. kfree(system);
  545. }
  546. static void __get_system(struct event_subsystem *system)
  547. {
  548. WARN_ON_ONCE(system_refcount(system) == 0);
  549. system_refcount_inc(system);
  550. }
  551. static void __get_system_dir(struct trace_subsystem_dir *dir)
  552. {
  553. WARN_ON_ONCE(dir->ref_count == 0);
  554. dir->ref_count++;
  555. __get_system(dir->subsystem);
  556. }
  557. static void __put_system_dir(struct trace_subsystem_dir *dir)
  558. {
  559. WARN_ON_ONCE(dir->ref_count == 0);
  560. /* If the subsystem is about to be freed, the dir must be too */
  561. WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
  562. __put_system(dir->subsystem);
  563. if (!--dir->ref_count)
  564. kfree(dir);
  565. }
  566. static void put_system(struct trace_subsystem_dir *dir)
  567. {
  568. mutex_lock(&event_mutex);
  569. __put_system_dir(dir);
  570. mutex_unlock(&event_mutex);
  571. }
  572. static void remove_subsystem(struct trace_subsystem_dir *dir)
  573. {
  574. if (!dir)
  575. return;
  576. if (!--dir->nr_events) {
  577. tracefs_remove_recursive(dir->entry);
  578. list_del(&dir->list);
  579. __put_system_dir(dir);
  580. }
  581. }
  582. static void remove_event_file_dir(struct trace_event_file *file)
  583. {
  584. struct dentry *dir = file->dir;
  585. struct dentry *child;
  586. if (dir) {
  587. spin_lock(&dir->d_lock); /* probably unneeded */
  588. list_for_each_entry(child, &dir->d_subdirs, d_child) {
  589. if (d_really_is_positive(child)) /* probably unneeded */
  590. d_inode(child)->i_private = NULL;
  591. }
  592. spin_unlock(&dir->d_lock);
  593. tracefs_remove_recursive(dir);
  594. }
  595. list_del(&file->list);
  596. remove_subsystem(file->system);
  597. free_event_filter(file->filter);
  598. kmem_cache_free(file_cachep, file);
  599. }
  600. /*
  601. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  602. */
  603. static int
  604. __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
  605. const char *sub, const char *event, int set)
  606. {
  607. struct trace_event_file *file;
  608. struct trace_event_call *call;
  609. const char *name;
  610. int ret = -EINVAL;
  611. list_for_each_entry(file, &tr->events, list) {
  612. call = file->event_call;
  613. name = trace_event_name(call);
  614. if (!name || !call->class || !call->class->reg)
  615. continue;
  616. if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
  617. continue;
  618. if (match &&
  619. strcmp(match, name) != 0 &&
  620. strcmp(match, call->class->system) != 0)
  621. continue;
  622. if (sub && strcmp(sub, call->class->system) != 0)
  623. continue;
  624. if (event && strcmp(event, name) != 0)
  625. continue;
  626. ftrace_event_enable_disable(file, set);
  627. ret = 0;
  628. }
  629. return ret;
  630. }
  631. static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
  632. const char *sub, const char *event, int set)
  633. {
  634. int ret;
  635. mutex_lock(&event_mutex);
  636. ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
  637. mutex_unlock(&event_mutex);
  638. return ret;
  639. }
  640. static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
  641. {
  642. char *event = NULL, *sub = NULL, *match;
  643. int ret;
  644. /*
  645. * The buf format can be <subsystem>:<event-name>
  646. * *:<event-name> means any event by that name.
  647. * :<event-name> is the same.
  648. *
  649. * <subsystem>:* means all events in that subsystem
  650. * <subsystem>: means the same.
  651. *
  652. * <name> (no ':') means all events in a subsystem with
  653. * the name <name> or any event that matches <name>
  654. */
  655. match = strsep(&buf, ":");
  656. if (buf) {
  657. sub = match;
  658. event = buf;
  659. match = NULL;
  660. if (!strlen(sub) || strcmp(sub, "*") == 0)
  661. sub = NULL;
  662. if (!strlen(event) || strcmp(event, "*") == 0)
  663. event = NULL;
  664. }
  665. ret = __ftrace_set_clr_event(tr, match, sub, event, set);
  666. /* Put back the colon to allow this to be called again */
  667. if (buf)
  668. *(buf - 1) = ':';
  669. return ret;
  670. }
  671. /**
  672. * trace_set_clr_event - enable or disable an event
  673. * @system: system name to match (NULL for any system)
  674. * @event: event name to match (NULL for all events, within system)
  675. * @set: 1 to enable, 0 to disable
  676. *
  677. * This is a way for other parts of the kernel to enable or disable
  678. * event recording.
  679. *
  680. * Returns 0 on success, -EINVAL if the parameters do not match any
  681. * registered events.
  682. */
  683. int trace_set_clr_event(const char *system, const char *event, int set)
  684. {
  685. struct trace_array *tr = top_trace_array();
  686. if (!tr)
  687. return -ENODEV;
  688. return __ftrace_set_clr_event(tr, NULL, system, event, set);
  689. }
  690. EXPORT_SYMBOL_GPL(trace_set_clr_event);
  691. /* 128 should be much more than enough */
  692. #define EVENT_BUF_SIZE 127
  693. static ssize_t
  694. ftrace_event_write(struct file *file, const char __user *ubuf,
  695. size_t cnt, loff_t *ppos)
  696. {
  697. struct trace_parser parser;
  698. struct seq_file *m = file->private_data;
  699. struct trace_array *tr = m->private;
  700. ssize_t read, ret;
  701. if (!cnt)
  702. return 0;
  703. ret = tracing_update_buffers();
  704. if (ret < 0)
  705. return ret;
  706. if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
  707. return -ENOMEM;
  708. read = trace_get_user(&parser, ubuf, cnt, ppos);
  709. if (read >= 0 && trace_parser_loaded((&parser))) {
  710. int set = 1;
  711. if (*parser.buffer == '!')
  712. set = 0;
  713. parser.buffer[parser.idx] = 0;
  714. ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
  715. if (ret)
  716. goto out_put;
  717. }
  718. ret = read;
  719. out_put:
  720. trace_parser_put(&parser);
  721. return ret;
  722. }
  723. static void *
  724. t_next(struct seq_file *m, void *v, loff_t *pos)
  725. {
  726. struct trace_event_file *file = v;
  727. struct trace_event_call *call;
  728. struct trace_array *tr = m->private;
  729. (*pos)++;
  730. list_for_each_entry_continue(file, &tr->events, list) {
  731. call = file->event_call;
  732. /*
  733. * The ftrace subsystem is for showing formats only.
  734. * They can not be enabled or disabled via the event files.
  735. */
  736. if (call->class && call->class->reg &&
  737. !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
  738. return file;
  739. }
  740. return NULL;
  741. }
  742. static void *t_start(struct seq_file *m, loff_t *pos)
  743. {
  744. struct trace_event_file *file;
  745. struct trace_array *tr = m->private;
  746. loff_t l;
  747. mutex_lock(&event_mutex);
  748. file = list_entry(&tr->events, struct trace_event_file, list);
  749. for (l = 0; l <= *pos; ) {
  750. file = t_next(m, file, &l);
  751. if (!file)
  752. break;
  753. }
  754. return file;
  755. }
  756. static void *
  757. s_next(struct seq_file *m, void *v, loff_t *pos)
  758. {
  759. struct trace_event_file *file = v;
  760. struct trace_array *tr = m->private;
  761. (*pos)++;
  762. list_for_each_entry_continue(file, &tr->events, list) {
  763. if (file->flags & EVENT_FILE_FL_ENABLED)
  764. return file;
  765. }
  766. return NULL;
  767. }
  768. static void *s_start(struct seq_file *m, loff_t *pos)
  769. {
  770. struct trace_event_file *file;
  771. struct trace_array *tr = m->private;
  772. loff_t l;
  773. mutex_lock(&event_mutex);
  774. file = list_entry(&tr->events, struct trace_event_file, list);
  775. for (l = 0; l <= *pos; ) {
  776. file = s_next(m, file, &l);
  777. if (!file)
  778. break;
  779. }
  780. return file;
  781. }
  782. static int t_show(struct seq_file *m, void *v)
  783. {
  784. struct trace_event_file *file = v;
  785. struct trace_event_call *call = file->event_call;
  786. if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
  787. seq_printf(m, "%s:", call->class->system);
  788. seq_printf(m, "%s\n", trace_event_name(call));
  789. return 0;
  790. }
  791. static void t_stop(struct seq_file *m, void *p)
  792. {
  793. mutex_unlock(&event_mutex);
  794. }
  795. static void *
  796. p_next(struct seq_file *m, void *v, loff_t *pos)
  797. {
  798. struct trace_array *tr = m->private;
  799. struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
  800. return trace_pid_next(pid_list, v, pos);
  801. }
  802. static void *p_start(struct seq_file *m, loff_t *pos)
  803. __acquires(RCU)
  804. {
  805. struct trace_pid_list *pid_list;
  806. struct trace_array *tr = m->private;
  807. /*
  808. * Grab the mutex, to keep calls to p_next() having the same
  809. * tr->filtered_pids as p_start() has.
  810. * If we just passed the tr->filtered_pids around, then RCU would
  811. * have been enough, but doing that makes things more complex.
  812. */
  813. mutex_lock(&event_mutex);
  814. rcu_read_lock_sched();
  815. pid_list = rcu_dereference_sched(tr->filtered_pids);
  816. if (!pid_list)
  817. return NULL;
  818. return trace_pid_start(pid_list, pos);
  819. }
  820. static void p_stop(struct seq_file *m, void *p)
  821. __releases(RCU)
  822. {
  823. rcu_read_unlock_sched();
  824. mutex_unlock(&event_mutex);
  825. }
  826. static ssize_t
  827. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  828. loff_t *ppos)
  829. {
  830. struct trace_event_file *file;
  831. unsigned long flags;
  832. char buf[4] = "0";
  833. mutex_lock(&event_mutex);
  834. file = event_file_data(filp);
  835. if (likely(file))
  836. flags = file->flags;
  837. mutex_unlock(&event_mutex);
  838. if (!file)
  839. return -ENODEV;
  840. if (flags & EVENT_FILE_FL_ENABLED &&
  841. !(flags & EVENT_FILE_FL_SOFT_DISABLED))
  842. strcpy(buf, "1");
  843. if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
  844. flags & EVENT_FILE_FL_SOFT_MODE)
  845. strcat(buf, "*");
  846. strcat(buf, "\n");
  847. return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
  848. }
  849. static ssize_t
  850. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  851. loff_t *ppos)
  852. {
  853. struct trace_event_file *file;
  854. unsigned long val;
  855. int ret;
  856. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  857. if (ret)
  858. return ret;
  859. ret = tracing_update_buffers();
  860. if (ret < 0)
  861. return ret;
  862. switch (val) {
  863. case 0:
  864. case 1:
  865. ret = -ENODEV;
  866. mutex_lock(&event_mutex);
  867. file = event_file_data(filp);
  868. if (likely(file))
  869. ret = ftrace_event_enable_disable(file, val);
  870. mutex_unlock(&event_mutex);
  871. break;
  872. default:
  873. return -EINVAL;
  874. }
  875. *ppos += cnt;
  876. return ret ? ret : cnt;
  877. }
  878. static ssize_t
  879. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  880. loff_t *ppos)
  881. {
  882. const char set_to_char[4] = { '?', '0', '1', 'X' };
  883. struct trace_subsystem_dir *dir = filp->private_data;
  884. struct event_subsystem *system = dir->subsystem;
  885. struct trace_event_call *call;
  886. struct trace_event_file *file;
  887. struct trace_array *tr = dir->tr;
  888. char buf[2];
  889. int set = 0;
  890. int ret;
  891. mutex_lock(&event_mutex);
  892. list_for_each_entry(file, &tr->events, list) {
  893. call = file->event_call;
  894. if (!trace_event_name(call) || !call->class || !call->class->reg)
  895. continue;
  896. if (system && strcmp(call->class->system, system->name) != 0)
  897. continue;
  898. /*
  899. * We need to find out if all the events are set
  900. * or if all events or cleared, or if we have
  901. * a mixture.
  902. */
  903. set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
  904. /*
  905. * If we have a mixture, no need to look further.
  906. */
  907. if (set == 3)
  908. break;
  909. }
  910. mutex_unlock(&event_mutex);
  911. buf[0] = set_to_char[set];
  912. buf[1] = '\n';
  913. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  914. return ret;
  915. }
  916. static ssize_t
  917. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  918. loff_t *ppos)
  919. {
  920. struct trace_subsystem_dir *dir = filp->private_data;
  921. struct event_subsystem *system = dir->subsystem;
  922. const char *name = NULL;
  923. unsigned long val;
  924. ssize_t ret;
  925. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  926. if (ret)
  927. return ret;
  928. ret = tracing_update_buffers();
  929. if (ret < 0)
  930. return ret;
  931. if (val != 0 && val != 1)
  932. return -EINVAL;
  933. /*
  934. * Opening of "enable" adds a ref count to system,
  935. * so the name is safe to use.
  936. */
  937. if (system)
  938. name = system->name;
  939. ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
  940. if (ret)
  941. goto out;
  942. ret = cnt;
  943. out:
  944. *ppos += cnt;
  945. return ret;
  946. }
  947. enum {
  948. FORMAT_HEADER = 1,
  949. FORMAT_FIELD_SEPERATOR = 2,
  950. FORMAT_PRINTFMT = 3,
  951. };
  952. static void *f_next(struct seq_file *m, void *v, loff_t *pos)
  953. {
  954. struct trace_event_call *call = event_file_data(m->private);
  955. struct list_head *common_head = &ftrace_common_fields;
  956. struct list_head *head = trace_get_fields(call);
  957. struct list_head *node = v;
  958. (*pos)++;
  959. switch ((unsigned long)v) {
  960. case FORMAT_HEADER:
  961. node = common_head;
  962. break;
  963. case FORMAT_FIELD_SEPERATOR:
  964. node = head;
  965. break;
  966. case FORMAT_PRINTFMT:
  967. /* all done */
  968. return NULL;
  969. }
  970. node = node->prev;
  971. if (node == common_head)
  972. return (void *)FORMAT_FIELD_SEPERATOR;
  973. else if (node == head)
  974. return (void *)FORMAT_PRINTFMT;
  975. else
  976. return node;
  977. }
  978. static int f_show(struct seq_file *m, void *v)
  979. {
  980. struct trace_event_call *call = event_file_data(m->private);
  981. struct ftrace_event_field *field;
  982. const char *array_descriptor;
  983. switch ((unsigned long)v) {
  984. case FORMAT_HEADER:
  985. seq_printf(m, "name: %s\n", trace_event_name(call));
  986. seq_printf(m, "ID: %d\n", call->event.type);
  987. seq_puts(m, "format:\n");
  988. return 0;
  989. case FORMAT_FIELD_SEPERATOR:
  990. seq_putc(m, '\n');
  991. return 0;
  992. case FORMAT_PRINTFMT:
  993. seq_printf(m, "\nprint fmt: %s\n",
  994. call->print_fmt);
  995. return 0;
  996. }
  997. field = list_entry(v, struct ftrace_event_field, link);
  998. /*
  999. * Smartly shows the array type(except dynamic array).
  1000. * Normal:
  1001. * field:TYPE VAR
  1002. * If TYPE := TYPE[LEN], it is shown:
  1003. * field:TYPE VAR[LEN]
  1004. */
  1005. array_descriptor = strchr(field->type, '[');
  1006. if (!strncmp(field->type, "__data_loc", 10))
  1007. array_descriptor = NULL;
  1008. if (!array_descriptor)
  1009. seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
  1010. field->type, field->name, field->offset,
  1011. field->size, !!field->is_signed);
  1012. else
  1013. seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
  1014. (int)(array_descriptor - field->type),
  1015. field->type, field->name,
  1016. array_descriptor, field->offset,
  1017. field->size, !!field->is_signed);
  1018. return 0;
  1019. }
  1020. static void *f_start(struct seq_file *m, loff_t *pos)
  1021. {
  1022. void *p = (void *)FORMAT_HEADER;
  1023. loff_t l = 0;
  1024. /* ->stop() is called even if ->start() fails */
  1025. mutex_lock(&event_mutex);
  1026. if (!event_file_data(m->private))
  1027. return ERR_PTR(-ENODEV);
  1028. while (l < *pos && p)
  1029. p = f_next(m, p, &l);
  1030. return p;
  1031. }
  1032. static void f_stop(struct seq_file *m, void *p)
  1033. {
  1034. mutex_unlock(&event_mutex);
  1035. }
  1036. static const struct seq_operations trace_format_seq_ops = {
  1037. .start = f_start,
  1038. .next = f_next,
  1039. .stop = f_stop,
  1040. .show = f_show,
  1041. };
  1042. static int trace_format_open(struct inode *inode, struct file *file)
  1043. {
  1044. struct seq_file *m;
  1045. int ret;
  1046. ret = seq_open(file, &trace_format_seq_ops);
  1047. if (ret < 0)
  1048. return ret;
  1049. m = file->private_data;
  1050. m->private = file;
  1051. return 0;
  1052. }
  1053. static ssize_t
  1054. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  1055. {
  1056. int id = (long)event_file_data(filp);
  1057. char buf[32];
  1058. int len;
  1059. if (*ppos)
  1060. return 0;
  1061. if (unlikely(!id))
  1062. return -ENODEV;
  1063. len = sprintf(buf, "%d\n", id);
  1064. return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
  1065. }
  1066. static ssize_t
  1067. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  1068. loff_t *ppos)
  1069. {
  1070. struct trace_event_file *file;
  1071. struct trace_seq *s;
  1072. int r = -ENODEV;
  1073. if (*ppos)
  1074. return 0;
  1075. s = kmalloc(sizeof(*s), GFP_KERNEL);
  1076. if (!s)
  1077. return -ENOMEM;
  1078. trace_seq_init(s);
  1079. mutex_lock(&event_mutex);
  1080. file = event_file_data(filp);
  1081. if (file)
  1082. print_event_filter(file, s);
  1083. mutex_unlock(&event_mutex);
  1084. if (file)
  1085. r = simple_read_from_buffer(ubuf, cnt, ppos,
  1086. s->buffer, trace_seq_used(s));
  1087. kfree(s);
  1088. return r;
  1089. }
  1090. static ssize_t
  1091. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1092. loff_t *ppos)
  1093. {
  1094. struct trace_event_file *file;
  1095. char *buf;
  1096. int err = -ENODEV;
  1097. if (cnt >= PAGE_SIZE)
  1098. return -EINVAL;
  1099. buf = memdup_user_nul(ubuf, cnt);
  1100. if (IS_ERR(buf))
  1101. return PTR_ERR(buf);
  1102. mutex_lock(&event_mutex);
  1103. file = event_file_data(filp);
  1104. if (file)
  1105. err = apply_event_filter(file, buf);
  1106. mutex_unlock(&event_mutex);
  1107. kfree(buf);
  1108. if (err < 0)
  1109. return err;
  1110. *ppos += cnt;
  1111. return cnt;
  1112. }
  1113. static LIST_HEAD(event_subsystems);
  1114. static int subsystem_open(struct inode *inode, struct file *filp)
  1115. {
  1116. struct event_subsystem *system = NULL;
  1117. struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
  1118. struct trace_array *tr;
  1119. int ret;
  1120. if (tracing_is_disabled())
  1121. return -ENODEV;
  1122. /* Make sure the system still exists */
  1123. mutex_lock(&trace_types_lock);
  1124. mutex_lock(&event_mutex);
  1125. list_for_each_entry(tr, &ftrace_trace_arrays, list) {
  1126. list_for_each_entry(dir, &tr->systems, list) {
  1127. if (dir == inode->i_private) {
  1128. /* Don't open systems with no events */
  1129. if (dir->nr_events) {
  1130. __get_system_dir(dir);
  1131. system = dir->subsystem;
  1132. }
  1133. goto exit_loop;
  1134. }
  1135. }
  1136. }
  1137. exit_loop:
  1138. mutex_unlock(&event_mutex);
  1139. mutex_unlock(&trace_types_lock);
  1140. if (!system)
  1141. return -ENODEV;
  1142. /* Some versions of gcc think dir can be uninitialized here */
  1143. WARN_ON(!dir);
  1144. /* Still need to increment the ref count of the system */
  1145. if (trace_array_get(tr) < 0) {
  1146. put_system(dir);
  1147. return -ENODEV;
  1148. }
  1149. ret = tracing_open_generic(inode, filp);
  1150. if (ret < 0) {
  1151. trace_array_put(tr);
  1152. put_system(dir);
  1153. }
  1154. return ret;
  1155. }
  1156. static int system_tr_open(struct inode *inode, struct file *filp)
  1157. {
  1158. struct trace_subsystem_dir *dir;
  1159. struct trace_array *tr = inode->i_private;
  1160. int ret;
  1161. if (tracing_is_disabled())
  1162. return -ENODEV;
  1163. if (trace_array_get(tr) < 0)
  1164. return -ENODEV;
  1165. /* Make a temporary dir that has no system but points to tr */
  1166. dir = kzalloc(sizeof(*dir), GFP_KERNEL);
  1167. if (!dir) {
  1168. trace_array_put(tr);
  1169. return -ENOMEM;
  1170. }
  1171. dir->tr = tr;
  1172. ret = tracing_open_generic(inode, filp);
  1173. if (ret < 0) {
  1174. trace_array_put(tr);
  1175. kfree(dir);
  1176. return ret;
  1177. }
  1178. filp->private_data = dir;
  1179. return 0;
  1180. }
  1181. static int subsystem_release(struct inode *inode, struct file *file)
  1182. {
  1183. struct trace_subsystem_dir *dir = file->private_data;
  1184. trace_array_put(dir->tr);
  1185. /*
  1186. * If dir->subsystem is NULL, then this is a temporary
  1187. * descriptor that was made for a trace_array to enable
  1188. * all subsystems.
  1189. */
  1190. if (dir->subsystem)
  1191. put_system(dir);
  1192. else
  1193. kfree(dir);
  1194. return 0;
  1195. }
  1196. static ssize_t
  1197. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  1198. loff_t *ppos)
  1199. {
  1200. struct trace_subsystem_dir *dir = filp->private_data;
  1201. struct event_subsystem *system = dir->subsystem;
  1202. struct trace_seq *s;
  1203. int r;
  1204. if (*ppos)
  1205. return 0;
  1206. s = kmalloc(sizeof(*s), GFP_KERNEL);
  1207. if (!s)
  1208. return -ENOMEM;
  1209. trace_seq_init(s);
  1210. print_subsystem_event_filter(system, s);
  1211. r = simple_read_from_buffer(ubuf, cnt, ppos,
  1212. s->buffer, trace_seq_used(s));
  1213. kfree(s);
  1214. return r;
  1215. }
  1216. static ssize_t
  1217. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1218. loff_t *ppos)
  1219. {
  1220. struct trace_subsystem_dir *dir = filp->private_data;
  1221. char *buf;
  1222. int err;
  1223. if (cnt >= PAGE_SIZE)
  1224. return -EINVAL;
  1225. buf = memdup_user_nul(ubuf, cnt);
  1226. if (IS_ERR(buf))
  1227. return PTR_ERR(buf);
  1228. err = apply_subsystem_event_filter(dir, buf);
  1229. kfree(buf);
  1230. if (err < 0)
  1231. return err;
  1232. *ppos += cnt;
  1233. return cnt;
  1234. }
  1235. static ssize_t
  1236. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  1237. {
  1238. int (*func)(struct trace_seq *s) = filp->private_data;
  1239. struct trace_seq *s;
  1240. int r;
  1241. if (*ppos)
  1242. return 0;
  1243. s = kmalloc(sizeof(*s), GFP_KERNEL);
  1244. if (!s)
  1245. return -ENOMEM;
  1246. trace_seq_init(s);
  1247. func(s);
  1248. r = simple_read_from_buffer(ubuf, cnt, ppos,
  1249. s->buffer, trace_seq_used(s));
  1250. kfree(s);
  1251. return r;
  1252. }
  1253. static void ignore_task_cpu(void *data)
  1254. {
  1255. struct trace_array *tr = data;
  1256. struct trace_pid_list *pid_list;
  1257. /*
  1258. * This function is called by on_each_cpu() while the
  1259. * event_mutex is held.
  1260. */
  1261. pid_list = rcu_dereference_protected(tr->filtered_pids,
  1262. mutex_is_locked(&event_mutex));
  1263. this_cpu_write(tr->trace_buffer.data->ignore_pid,
  1264. trace_ignore_this_task(pid_list, current));
  1265. }
  1266. static ssize_t
  1267. ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
  1268. size_t cnt, loff_t *ppos)
  1269. {
  1270. struct seq_file *m = filp->private_data;
  1271. struct trace_array *tr = m->private;
  1272. struct trace_pid_list *filtered_pids = NULL;
  1273. struct trace_pid_list *pid_list;
  1274. struct trace_event_file *file;
  1275. ssize_t ret;
  1276. if (!cnt)
  1277. return 0;
  1278. ret = tracing_update_buffers();
  1279. if (ret < 0)
  1280. return ret;
  1281. mutex_lock(&event_mutex);
  1282. filtered_pids = rcu_dereference_protected(tr->filtered_pids,
  1283. lockdep_is_held(&event_mutex));
  1284. ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
  1285. if (ret < 0)
  1286. goto out;
  1287. rcu_assign_pointer(tr->filtered_pids, pid_list);
  1288. list_for_each_entry(file, &tr->events, list) {
  1289. set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
  1290. }
  1291. if (filtered_pids) {
  1292. synchronize_sched();
  1293. trace_free_pid_list(filtered_pids);
  1294. } else if (pid_list) {
  1295. /*
  1296. * Register a probe that is called before all other probes
  1297. * to set ignore_pid if next or prev do not match.
  1298. * Register a probe this is called after all other probes
  1299. * to only keep ignore_pid set if next pid matches.
  1300. */
  1301. register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
  1302. tr, INT_MAX);
  1303. register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
  1304. tr, 0);
  1305. register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
  1306. tr, INT_MAX);
  1307. register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
  1308. tr, 0);
  1309. register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
  1310. tr, INT_MAX);
  1311. register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
  1312. tr, 0);
  1313. register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
  1314. tr, INT_MAX);
  1315. register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
  1316. tr, 0);
  1317. }
  1318. /*
  1319. * Ignoring of pids is done at task switch. But we have to
  1320. * check for those tasks that are currently running.
  1321. * Always do this in case a pid was appended or removed.
  1322. */
  1323. on_each_cpu(ignore_task_cpu, tr, 1);
  1324. out:
  1325. mutex_unlock(&event_mutex);
  1326. if (ret > 0)
  1327. *ppos += ret;
  1328. return ret;
  1329. }
  1330. static int ftrace_event_avail_open(struct inode *inode, struct file *file);
  1331. static int ftrace_event_set_open(struct inode *inode, struct file *file);
  1332. static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
  1333. static int ftrace_event_release(struct inode *inode, struct file *file);
  1334. static const struct seq_operations show_event_seq_ops = {
  1335. .start = t_start,
  1336. .next = t_next,
  1337. .show = t_show,
  1338. .stop = t_stop,
  1339. };
  1340. static const struct seq_operations show_set_event_seq_ops = {
  1341. .start = s_start,
  1342. .next = s_next,
  1343. .show = t_show,
  1344. .stop = t_stop,
  1345. };
  1346. static const struct seq_operations show_set_pid_seq_ops = {
  1347. .start = p_start,
  1348. .next = p_next,
  1349. .show = trace_pid_show,
  1350. .stop = p_stop,
  1351. };
  1352. static const struct file_operations ftrace_avail_fops = {
  1353. .open = ftrace_event_avail_open,
  1354. .read = seq_read,
  1355. .llseek = seq_lseek,
  1356. .release = seq_release,
  1357. };
  1358. static const struct file_operations ftrace_set_event_fops = {
  1359. .open = ftrace_event_set_open,
  1360. .read = seq_read,
  1361. .write = ftrace_event_write,
  1362. .llseek = seq_lseek,
  1363. .release = ftrace_event_release,
  1364. };
  1365. static const struct file_operations ftrace_set_event_pid_fops = {
  1366. .open = ftrace_event_set_pid_open,
  1367. .read = seq_read,
  1368. .write = ftrace_event_pid_write,
  1369. .llseek = seq_lseek,
  1370. .release = ftrace_event_release,
  1371. };
  1372. static const struct file_operations ftrace_enable_fops = {
  1373. .open = tracing_open_generic,
  1374. .read = event_enable_read,
  1375. .write = event_enable_write,
  1376. .llseek = default_llseek,
  1377. };
  1378. static const struct file_operations ftrace_event_format_fops = {
  1379. .open = trace_format_open,
  1380. .read = seq_read,
  1381. .llseek = seq_lseek,
  1382. .release = seq_release,
  1383. };
  1384. static const struct file_operations ftrace_event_id_fops = {
  1385. .read = event_id_read,
  1386. .llseek = default_llseek,
  1387. };
  1388. static const struct file_operations ftrace_event_filter_fops = {
  1389. .open = tracing_open_generic,
  1390. .read = event_filter_read,
  1391. .write = event_filter_write,
  1392. .llseek = default_llseek,
  1393. };
  1394. static const struct file_operations ftrace_subsystem_filter_fops = {
  1395. .open = subsystem_open,
  1396. .read = subsystem_filter_read,
  1397. .write = subsystem_filter_write,
  1398. .llseek = default_llseek,
  1399. .release = subsystem_release,
  1400. };
  1401. static const struct file_operations ftrace_system_enable_fops = {
  1402. .open = subsystem_open,
  1403. .read = system_enable_read,
  1404. .write = system_enable_write,
  1405. .llseek = default_llseek,
  1406. .release = subsystem_release,
  1407. };
  1408. static const struct file_operations ftrace_tr_enable_fops = {
  1409. .open = system_tr_open,
  1410. .read = system_enable_read,
  1411. .write = system_enable_write,
  1412. .llseek = default_llseek,
  1413. .release = subsystem_release,
  1414. };
  1415. static const struct file_operations ftrace_show_header_fops = {
  1416. .open = tracing_open_generic,
  1417. .read = show_header,
  1418. .llseek = default_llseek,
  1419. };
  1420. static int
  1421. ftrace_event_open(struct inode *inode, struct file *file,
  1422. const struct seq_operations *seq_ops)
  1423. {
  1424. struct seq_file *m;
  1425. int ret;
  1426. ret = seq_open(file, seq_ops);
  1427. if (ret < 0)
  1428. return ret;
  1429. m = file->private_data;
  1430. /* copy tr over to seq ops */
  1431. m->private = inode->i_private;
  1432. return ret;
  1433. }
  1434. static int ftrace_event_release(struct inode *inode, struct file *file)
  1435. {
  1436. struct trace_array *tr = inode->i_private;
  1437. trace_array_put(tr);
  1438. return seq_release(inode, file);
  1439. }
  1440. static int
  1441. ftrace_event_avail_open(struct inode *inode, struct file *file)
  1442. {
  1443. const struct seq_operations *seq_ops = &show_event_seq_ops;
  1444. return ftrace_event_open(inode, file, seq_ops);
  1445. }
  1446. static int
  1447. ftrace_event_set_open(struct inode *inode, struct file *file)
  1448. {
  1449. const struct seq_operations *seq_ops = &show_set_event_seq_ops;
  1450. struct trace_array *tr = inode->i_private;
  1451. int ret;
  1452. if (trace_array_get(tr) < 0)
  1453. return -ENODEV;
  1454. if ((file->f_mode & FMODE_WRITE) &&
  1455. (file->f_flags & O_TRUNC))
  1456. ftrace_clear_events(tr);
  1457. ret = ftrace_event_open(inode, file, seq_ops);
  1458. if (ret < 0)
  1459. trace_array_put(tr);
  1460. return ret;
  1461. }
  1462. static int
  1463. ftrace_event_set_pid_open(struct inode *inode, struct file *file)
  1464. {
  1465. const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
  1466. struct trace_array *tr = inode->i_private;
  1467. int ret;
  1468. if (trace_array_get(tr) < 0)
  1469. return -ENODEV;
  1470. if ((file->f_mode & FMODE_WRITE) &&
  1471. (file->f_flags & O_TRUNC))
  1472. ftrace_clear_event_pids(tr);
  1473. ret = ftrace_event_open(inode, file, seq_ops);
  1474. if (ret < 0)
  1475. trace_array_put(tr);
  1476. return ret;
  1477. }
  1478. static struct event_subsystem *
  1479. create_new_subsystem(const char *name)
  1480. {
  1481. struct event_subsystem *system;
  1482. /* need to create new entry */
  1483. system = kmalloc(sizeof(*system), GFP_KERNEL);
  1484. if (!system)
  1485. return NULL;
  1486. system->ref_count = 1;
  1487. /* Only allocate if dynamic (kprobes and modules) */
  1488. system->name = kstrdup_const(name, GFP_KERNEL);
  1489. if (!system->name)
  1490. goto out_free;
  1491. system->filter = NULL;
  1492. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  1493. if (!system->filter)
  1494. goto out_free;
  1495. list_add(&system->list, &event_subsystems);
  1496. return system;
  1497. out_free:
  1498. kfree_const(system->name);
  1499. kfree(system);
  1500. return NULL;
  1501. }
  1502. static struct dentry *
  1503. event_subsystem_dir(struct trace_array *tr, const char *name,
  1504. struct trace_event_file *file, struct dentry *parent)
  1505. {
  1506. struct trace_subsystem_dir *dir;
  1507. struct event_subsystem *system;
  1508. struct dentry *entry;
  1509. /* First see if we did not already create this dir */
  1510. list_for_each_entry(dir, &tr->systems, list) {
  1511. system = dir->subsystem;
  1512. if (strcmp(system->name, name) == 0) {
  1513. dir->nr_events++;
  1514. file->system = dir;
  1515. return dir->entry;
  1516. }
  1517. }
  1518. /* Now see if the system itself exists. */
  1519. list_for_each_entry(system, &event_subsystems, list) {
  1520. if (strcmp(system->name, name) == 0)
  1521. break;
  1522. }
  1523. /* Reset system variable when not found */
  1524. if (&system->list == &event_subsystems)
  1525. system = NULL;
  1526. dir = kmalloc(sizeof(*dir), GFP_KERNEL);
  1527. if (!dir)
  1528. goto out_fail;
  1529. if (!system) {
  1530. system = create_new_subsystem(name);
  1531. if (!system)
  1532. goto out_free;
  1533. } else
  1534. __get_system(system);
  1535. dir->entry = tracefs_create_dir(name, parent);
  1536. if (!dir->entry) {
  1537. pr_warn("Failed to create system directory %s\n", name);
  1538. __put_system(system);
  1539. goto out_free;
  1540. }
  1541. dir->tr = tr;
  1542. dir->ref_count = 1;
  1543. dir->nr_events = 1;
  1544. dir->subsystem = system;
  1545. file->system = dir;
  1546. entry = tracefs_create_file("filter", 0644, dir->entry, dir,
  1547. &ftrace_subsystem_filter_fops);
  1548. if (!entry) {
  1549. kfree(system->filter);
  1550. system->filter = NULL;
  1551. pr_warn("Could not create tracefs '%s/filter' entry\n", name);
  1552. }
  1553. trace_create_file("enable", 0644, dir->entry, dir,
  1554. &ftrace_system_enable_fops);
  1555. list_add(&dir->list, &tr->systems);
  1556. return dir->entry;
  1557. out_free:
  1558. kfree(dir);
  1559. out_fail:
  1560. /* Only print this message if failed on memory allocation */
  1561. if (!dir || !system)
  1562. pr_warn("No memory to create event subsystem %s\n", name);
  1563. return NULL;
  1564. }
  1565. static int
  1566. event_create_dir(struct dentry *parent, struct trace_event_file *file)
  1567. {
  1568. struct trace_event_call *call = file->event_call;
  1569. struct trace_array *tr = file->tr;
  1570. struct list_head *head;
  1571. struct dentry *d_events;
  1572. const char *name;
  1573. int ret;
  1574. /*
  1575. * If the trace point header did not define TRACE_SYSTEM
  1576. * then the system would be called "TRACE_SYSTEM".
  1577. */
  1578. if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
  1579. d_events = event_subsystem_dir(tr, call->class->system, file, parent);
  1580. if (!d_events)
  1581. return -ENOMEM;
  1582. } else
  1583. d_events = parent;
  1584. name = trace_event_name(call);
  1585. file->dir = tracefs_create_dir(name, d_events);
  1586. if (!file->dir) {
  1587. pr_warn("Could not create tracefs '%s' directory\n", name);
  1588. return -1;
  1589. }
  1590. if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
  1591. trace_create_file("enable", 0644, file->dir, file,
  1592. &ftrace_enable_fops);
  1593. #ifdef CONFIG_PERF_EVENTS
  1594. if (call->event.type && call->class->reg)
  1595. trace_create_file("id", 0444, file->dir,
  1596. (void *)(long)call->event.type,
  1597. &ftrace_event_id_fops);
  1598. #endif
  1599. /*
  1600. * Other events may have the same class. Only update
  1601. * the fields if they are not already defined.
  1602. */
  1603. head = trace_get_fields(call);
  1604. if (list_empty(head)) {
  1605. ret = call->class->define_fields(call);
  1606. if (ret < 0) {
  1607. pr_warn("Could not initialize trace point events/%s\n",
  1608. name);
  1609. return -1;
  1610. }
  1611. }
  1612. trace_create_file("filter", 0644, file->dir, file,
  1613. &ftrace_event_filter_fops);
  1614. /*
  1615. * Only event directories that can be enabled should have
  1616. * triggers.
  1617. */
  1618. if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
  1619. trace_create_file("trigger", 0644, file->dir, file,
  1620. &event_trigger_fops);
  1621. #ifdef CONFIG_HIST_TRIGGERS
  1622. trace_create_file("hist", 0444, file->dir, file,
  1623. &event_hist_fops);
  1624. #endif
  1625. trace_create_file("format", 0444, file->dir, call,
  1626. &ftrace_event_format_fops);
  1627. return 0;
  1628. }
  1629. static void remove_event_from_tracers(struct trace_event_call *call)
  1630. {
  1631. struct trace_event_file *file;
  1632. struct trace_array *tr;
  1633. do_for_each_event_file_safe(tr, file) {
  1634. if (file->event_call != call)
  1635. continue;
  1636. remove_event_file_dir(file);
  1637. /*
  1638. * The do_for_each_event_file_safe() is
  1639. * a double loop. After finding the call for this
  1640. * trace_array, we use break to jump to the next
  1641. * trace_array.
  1642. */
  1643. break;
  1644. } while_for_each_event_file();
  1645. }
  1646. static void event_remove(struct trace_event_call *call)
  1647. {
  1648. struct trace_array *tr;
  1649. struct trace_event_file *file;
  1650. do_for_each_event_file(tr, file) {
  1651. if (file->event_call != call)
  1652. continue;
  1653. ftrace_event_enable_disable(file, 0);
  1654. /*
  1655. * The do_for_each_event_file() is
  1656. * a double loop. After finding the call for this
  1657. * trace_array, we use break to jump to the next
  1658. * trace_array.
  1659. */
  1660. break;
  1661. } while_for_each_event_file();
  1662. if (call->event.funcs)
  1663. __unregister_trace_event(&call->event);
  1664. remove_event_from_tracers(call);
  1665. list_del(&call->list);
  1666. }
  1667. static int event_init(struct trace_event_call *call)
  1668. {
  1669. int ret = 0;
  1670. const char *name;
  1671. name = trace_event_name(call);
  1672. if (WARN_ON(!name))
  1673. return -EINVAL;
  1674. if (call->class->raw_init) {
  1675. ret = call->class->raw_init(call);
  1676. if (ret < 0 && ret != -ENOSYS)
  1677. pr_warn("Could not initialize trace events/%s\n", name);
  1678. }
  1679. return ret;
  1680. }
  1681. static int
  1682. __register_event(struct trace_event_call *call, struct module *mod)
  1683. {
  1684. int ret;
  1685. ret = event_init(call);
  1686. if (ret < 0)
  1687. return ret;
  1688. list_add(&call->list, &ftrace_events);
  1689. call->mod = mod;
  1690. return 0;
  1691. }
  1692. static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
  1693. {
  1694. int rlen;
  1695. int elen;
  1696. /* Find the length of the enum value as a string */
  1697. elen = snprintf(ptr, 0, "%ld", map->enum_value);
  1698. /* Make sure there's enough room to replace the string with the value */
  1699. if (len < elen)
  1700. return NULL;
  1701. snprintf(ptr, elen + 1, "%ld", map->enum_value);
  1702. /* Get the rest of the string of ptr */
  1703. rlen = strlen(ptr + len);
  1704. memmove(ptr + elen, ptr + len, rlen);
  1705. /* Make sure we end the new string */
  1706. ptr[elen + rlen] = 0;
  1707. return ptr + elen;
  1708. }
  1709. static void update_event_printk(struct trace_event_call *call,
  1710. struct trace_enum_map *map)
  1711. {
  1712. char *ptr;
  1713. int quote = 0;
  1714. int len = strlen(map->enum_string);
  1715. for (ptr = call->print_fmt; *ptr; ptr++) {
  1716. if (*ptr == '\\') {
  1717. ptr++;
  1718. /* paranoid */
  1719. if (!*ptr)
  1720. break;
  1721. continue;
  1722. }
  1723. if (*ptr == '"') {
  1724. quote ^= 1;
  1725. continue;
  1726. }
  1727. if (quote)
  1728. continue;
  1729. if (isdigit(*ptr)) {
  1730. /* skip numbers */
  1731. do {
  1732. ptr++;
  1733. /* Check for alpha chars like ULL */
  1734. } while (isalnum(*ptr));
  1735. if (!*ptr)
  1736. break;
  1737. /*
  1738. * A number must have some kind of delimiter after
  1739. * it, and we can ignore that too.
  1740. */
  1741. continue;
  1742. }
  1743. if (isalpha(*ptr) || *ptr == '_') {
  1744. if (strncmp(map->enum_string, ptr, len) == 0 &&
  1745. !isalnum(ptr[len]) && ptr[len] != '_') {
  1746. ptr = enum_replace(ptr, map, len);
  1747. /* Hmm, enum string smaller than value */
  1748. if (WARN_ON_ONCE(!ptr))
  1749. return;
  1750. /*
  1751. * No need to decrement here, as enum_replace()
  1752. * returns the pointer to the character passed
  1753. * the enum, and two enums can not be placed
  1754. * back to back without something in between.
  1755. * We can skip that something in between.
  1756. */
  1757. continue;
  1758. }
  1759. skip_more:
  1760. do {
  1761. ptr++;
  1762. } while (isalnum(*ptr) || *ptr == '_');
  1763. if (!*ptr)
  1764. break;
  1765. /*
  1766. * If what comes after this variable is a '.' or
  1767. * '->' then we can continue to ignore that string.
  1768. */
  1769. if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
  1770. ptr += *ptr == '.' ? 1 : 2;
  1771. if (!*ptr)
  1772. break;
  1773. goto skip_more;
  1774. }
  1775. /*
  1776. * Once again, we can skip the delimiter that came
  1777. * after the string.
  1778. */
  1779. continue;
  1780. }
  1781. }
  1782. }
  1783. void trace_event_enum_update(struct trace_enum_map **map, int len)
  1784. {
  1785. struct trace_event_call *call, *p;
  1786. const char *last_system = NULL;
  1787. bool first = false;
  1788. int last_i;
  1789. int i;
  1790. down_write(&trace_event_sem);
  1791. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  1792. /* events are usually grouped together with systems */
  1793. if (!last_system || call->class->system != last_system) {
  1794. first = true;
  1795. last_i = 0;
  1796. last_system = call->class->system;
  1797. }
  1798. /*
  1799. * Since calls are grouped by systems, the likelyhood that the
  1800. * next call in the iteration belongs to the same system as the
  1801. * previous call is high. As an optimization, we skip seaching
  1802. * for a map[] that matches the call's system if the last call
  1803. * was from the same system. That's what last_i is for. If the
  1804. * call has the same system as the previous call, then last_i
  1805. * will be the index of the first map[] that has a matching
  1806. * system.
  1807. */
  1808. for (i = last_i; i < len; i++) {
  1809. if (call->class->system == map[i]->system) {
  1810. /* Save the first system if need be */
  1811. if (first) {
  1812. last_i = i;
  1813. first = false;
  1814. }
  1815. update_event_printk(call, map[i]);
  1816. }
  1817. }
  1818. }
  1819. up_write(&trace_event_sem);
  1820. }
  1821. static struct trace_event_file *
  1822. trace_create_new_event(struct trace_event_call *call,
  1823. struct trace_array *tr)
  1824. {
  1825. struct trace_event_file *file;
  1826. file = kmem_cache_alloc(file_cachep, GFP_TRACE);
  1827. if (!file)
  1828. return NULL;
  1829. file->event_call = call;
  1830. file->tr = tr;
  1831. atomic_set(&file->sm_ref, 0);
  1832. atomic_set(&file->tm_ref, 0);
  1833. INIT_LIST_HEAD(&file->triggers);
  1834. list_add(&file->list, &tr->events);
  1835. return file;
  1836. }
  1837. /* Add an event to a trace directory */
  1838. static int
  1839. __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
  1840. {
  1841. struct trace_event_file *file;
  1842. file = trace_create_new_event(call, tr);
  1843. if (!file)
  1844. return -ENOMEM;
  1845. return event_create_dir(tr->event_dir, file);
  1846. }
  1847. /*
  1848. * Just create a decriptor for early init. A descriptor is required
  1849. * for enabling events at boot. We want to enable events before
  1850. * the filesystem is initialized.
  1851. */
  1852. static __init int
  1853. __trace_early_add_new_event(struct trace_event_call *call,
  1854. struct trace_array *tr)
  1855. {
  1856. struct trace_event_file *file;
  1857. file = trace_create_new_event(call, tr);
  1858. if (!file)
  1859. return -ENOMEM;
  1860. return 0;
  1861. }
  1862. struct ftrace_module_file_ops;
  1863. static void __add_event_to_tracers(struct trace_event_call *call);
  1864. /* Add an additional event_call dynamically */
  1865. int trace_add_event_call(struct trace_event_call *call)
  1866. {
  1867. int ret;
  1868. mutex_lock(&trace_types_lock);
  1869. mutex_lock(&event_mutex);
  1870. ret = __register_event(call, NULL);
  1871. if (ret >= 0)
  1872. __add_event_to_tracers(call);
  1873. mutex_unlock(&event_mutex);
  1874. mutex_unlock(&trace_types_lock);
  1875. return ret;
  1876. }
  1877. /*
  1878. * Must be called under locking of trace_types_lock, event_mutex and
  1879. * trace_event_sem.
  1880. */
  1881. static void __trace_remove_event_call(struct trace_event_call *call)
  1882. {
  1883. event_remove(call);
  1884. trace_destroy_fields(call);
  1885. free_event_filter(call->filter);
  1886. call->filter = NULL;
  1887. }
  1888. static int probe_remove_event_call(struct trace_event_call *call)
  1889. {
  1890. struct trace_array *tr;
  1891. struct trace_event_file *file;
  1892. #ifdef CONFIG_PERF_EVENTS
  1893. if (call->perf_refcount)
  1894. return -EBUSY;
  1895. #endif
  1896. do_for_each_event_file(tr, file) {
  1897. if (file->event_call != call)
  1898. continue;
  1899. /*
  1900. * We can't rely on ftrace_event_enable_disable(enable => 0)
  1901. * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
  1902. * TRACE_REG_UNREGISTER.
  1903. */
  1904. if (file->flags & EVENT_FILE_FL_ENABLED)
  1905. return -EBUSY;
  1906. /*
  1907. * The do_for_each_event_file_safe() is
  1908. * a double loop. After finding the call for this
  1909. * trace_array, we use break to jump to the next
  1910. * trace_array.
  1911. */
  1912. break;
  1913. } while_for_each_event_file();
  1914. __trace_remove_event_call(call);
  1915. return 0;
  1916. }
  1917. /* Remove an event_call */
  1918. int trace_remove_event_call(struct trace_event_call *call)
  1919. {
  1920. int ret;
  1921. mutex_lock(&trace_types_lock);
  1922. mutex_lock(&event_mutex);
  1923. down_write(&trace_event_sem);
  1924. ret = probe_remove_event_call(call);
  1925. up_write(&trace_event_sem);
  1926. mutex_unlock(&event_mutex);
  1927. mutex_unlock(&trace_types_lock);
  1928. return ret;
  1929. }
  1930. #define for_each_event(event, start, end) \
  1931. for (event = start; \
  1932. (unsigned long)event < (unsigned long)end; \
  1933. event++)
  1934. #ifdef CONFIG_MODULES
  1935. static void trace_module_add_events(struct module *mod)
  1936. {
  1937. struct trace_event_call **call, **start, **end;
  1938. if (!mod->num_trace_events)
  1939. return;
  1940. /* Don't add infrastructure for mods without tracepoints */
  1941. if (trace_module_has_bad_taint(mod)) {
  1942. pr_err("%s: module has bad taint, not creating trace events\n",
  1943. mod->name);
  1944. return;
  1945. }
  1946. start = mod->trace_events;
  1947. end = mod->trace_events + mod->num_trace_events;
  1948. for_each_event(call, start, end) {
  1949. __register_event(*call, mod);
  1950. __add_event_to_tracers(*call);
  1951. }
  1952. }
  1953. static void trace_module_remove_events(struct module *mod)
  1954. {
  1955. struct trace_event_call *call, *p;
  1956. bool clear_trace = false;
  1957. down_write(&trace_event_sem);
  1958. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  1959. if (call->mod == mod) {
  1960. if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
  1961. clear_trace = true;
  1962. __trace_remove_event_call(call);
  1963. }
  1964. }
  1965. up_write(&trace_event_sem);
  1966. /*
  1967. * It is safest to reset the ring buffer if the module being unloaded
  1968. * registered any events that were used. The only worry is if
  1969. * a new module gets loaded, and takes on the same id as the events
  1970. * of this module. When printing out the buffer, traced events left
  1971. * over from this module may be passed to the new module events and
  1972. * unexpected results may occur.
  1973. */
  1974. if (clear_trace)
  1975. tracing_reset_all_online_cpus();
  1976. }
  1977. static int trace_module_notify(struct notifier_block *self,
  1978. unsigned long val, void *data)
  1979. {
  1980. struct module *mod = data;
  1981. mutex_lock(&trace_types_lock);
  1982. mutex_lock(&event_mutex);
  1983. switch (val) {
  1984. case MODULE_STATE_COMING:
  1985. trace_module_add_events(mod);
  1986. break;
  1987. case MODULE_STATE_GOING:
  1988. trace_module_remove_events(mod);
  1989. break;
  1990. }
  1991. mutex_unlock(&event_mutex);
  1992. mutex_unlock(&trace_types_lock);
  1993. return 0;
  1994. }
  1995. static struct notifier_block trace_module_nb = {
  1996. .notifier_call = trace_module_notify,
  1997. .priority = 1, /* higher than trace.c module notify */
  1998. };
  1999. #endif /* CONFIG_MODULES */
  2000. /* Create a new event directory structure for a trace directory. */
  2001. static void
  2002. __trace_add_event_dirs(struct trace_array *tr)
  2003. {
  2004. struct trace_event_call *call;
  2005. int ret;
  2006. list_for_each_entry(call, &ftrace_events, list) {
  2007. ret = __trace_add_new_event(call, tr);
  2008. if (ret < 0)
  2009. pr_warn("Could not create directory for event %s\n",
  2010. trace_event_name(call));
  2011. }
  2012. }
  2013. struct trace_event_file *
  2014. find_event_file(struct trace_array *tr, const char *system, const char *event)
  2015. {
  2016. struct trace_event_file *file;
  2017. struct trace_event_call *call;
  2018. const char *name;
  2019. list_for_each_entry(file, &tr->events, list) {
  2020. call = file->event_call;
  2021. name = trace_event_name(call);
  2022. if (!name || !call->class || !call->class->reg)
  2023. continue;
  2024. if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
  2025. continue;
  2026. if (strcmp(event, name) == 0 &&
  2027. strcmp(system, call->class->system) == 0)
  2028. return file;
  2029. }
  2030. return NULL;
  2031. }
  2032. #ifdef CONFIG_DYNAMIC_FTRACE
  2033. /* Avoid typos */
  2034. #define ENABLE_EVENT_STR "enable_event"
  2035. #define DISABLE_EVENT_STR "disable_event"
  2036. struct event_probe_data {
  2037. struct trace_event_file *file;
  2038. unsigned long count;
  2039. int ref;
  2040. bool enable;
  2041. };
  2042. static void
  2043. event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
  2044. {
  2045. struct event_probe_data **pdata = (struct event_probe_data **)_data;
  2046. struct event_probe_data *data = *pdata;
  2047. if (!data)
  2048. return;
  2049. if (data->enable)
  2050. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
  2051. else
  2052. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
  2053. }
  2054. static void
  2055. event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
  2056. {
  2057. struct event_probe_data **pdata = (struct event_probe_data **)_data;
  2058. struct event_probe_data *data = *pdata;
  2059. if (!data)
  2060. return;
  2061. if (!data->count)
  2062. return;
  2063. /* Skip if the event is in a state we want to switch to */
  2064. if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
  2065. return;
  2066. if (data->count != -1)
  2067. (data->count)--;
  2068. event_enable_probe(ip, parent_ip, _data);
  2069. }
  2070. static int
  2071. event_enable_print(struct seq_file *m, unsigned long ip,
  2072. struct ftrace_probe_ops *ops, void *_data)
  2073. {
  2074. struct event_probe_data *data = _data;
  2075. seq_printf(m, "%ps:", (void *)ip);
  2076. seq_printf(m, "%s:%s:%s",
  2077. data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
  2078. data->file->event_call->class->system,
  2079. trace_event_name(data->file->event_call));
  2080. if (data->count == -1)
  2081. seq_puts(m, ":unlimited\n");
  2082. else
  2083. seq_printf(m, ":count=%ld\n", data->count);
  2084. return 0;
  2085. }
  2086. static int
  2087. event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
  2088. void **_data)
  2089. {
  2090. struct event_probe_data **pdata = (struct event_probe_data **)_data;
  2091. struct event_probe_data *data = *pdata;
  2092. data->ref++;
  2093. return 0;
  2094. }
  2095. static void
  2096. event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
  2097. void **_data)
  2098. {
  2099. struct event_probe_data **pdata = (struct event_probe_data **)_data;
  2100. struct event_probe_data *data = *pdata;
  2101. if (WARN_ON_ONCE(data->ref <= 0))
  2102. return;
  2103. data->ref--;
  2104. if (!data->ref) {
  2105. /* Remove the SOFT_MODE flag */
  2106. __ftrace_event_enable_disable(data->file, 0, 1);
  2107. module_put(data->file->event_call->mod);
  2108. kfree(data);
  2109. }
  2110. *pdata = NULL;
  2111. }
  2112. static struct ftrace_probe_ops event_enable_probe_ops = {
  2113. .func = event_enable_probe,
  2114. .print = event_enable_print,
  2115. .init = event_enable_init,
  2116. .free = event_enable_free,
  2117. };
  2118. static struct ftrace_probe_ops event_enable_count_probe_ops = {
  2119. .func = event_enable_count_probe,
  2120. .print = event_enable_print,
  2121. .init = event_enable_init,
  2122. .free = event_enable_free,
  2123. };
  2124. static struct ftrace_probe_ops event_disable_probe_ops = {
  2125. .func = event_enable_probe,
  2126. .print = event_enable_print,
  2127. .init = event_enable_init,
  2128. .free = event_enable_free,
  2129. };
  2130. static struct ftrace_probe_ops event_disable_count_probe_ops = {
  2131. .func = event_enable_count_probe,
  2132. .print = event_enable_print,
  2133. .init = event_enable_init,
  2134. .free = event_enable_free,
  2135. };
  2136. static int
  2137. event_enable_func(struct ftrace_hash *hash,
  2138. char *glob, char *cmd, char *param, int enabled)
  2139. {
  2140. struct trace_array *tr = top_trace_array();
  2141. struct trace_event_file *file;
  2142. struct ftrace_probe_ops *ops;
  2143. struct event_probe_data *data;
  2144. const char *system;
  2145. const char *event;
  2146. char *number;
  2147. bool enable;
  2148. int ret;
  2149. if (!tr)
  2150. return -ENODEV;
  2151. /* hash funcs only work with set_ftrace_filter */
  2152. if (!enabled || !param)
  2153. return -EINVAL;
  2154. system = strsep(&param, ":");
  2155. if (!param)
  2156. return -EINVAL;
  2157. event = strsep(&param, ":");
  2158. mutex_lock(&event_mutex);
  2159. ret = -EINVAL;
  2160. file = find_event_file(tr, system, event);
  2161. if (!file)
  2162. goto out;
  2163. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  2164. if (enable)
  2165. ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
  2166. else
  2167. ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
  2168. if (glob[0] == '!') {
  2169. unregister_ftrace_function_probe_func(glob+1, ops);
  2170. ret = 0;
  2171. goto out;
  2172. }
  2173. ret = -ENOMEM;
  2174. data = kzalloc(sizeof(*data), GFP_KERNEL);
  2175. if (!data)
  2176. goto out;
  2177. data->enable = enable;
  2178. data->count = -1;
  2179. data->file = file;
  2180. if (!param)
  2181. goto out_reg;
  2182. number = strsep(&param, ":");
  2183. ret = -EINVAL;
  2184. if (!strlen(number))
  2185. goto out_free;
  2186. /*
  2187. * We use the callback data field (which is a pointer)
  2188. * as our counter.
  2189. */
  2190. ret = kstrtoul(number, 0, &data->count);
  2191. if (ret)
  2192. goto out_free;
  2193. out_reg:
  2194. /* Don't let event modules unload while probe registered */
  2195. ret = try_module_get(file->event_call->mod);
  2196. if (!ret) {
  2197. ret = -EBUSY;
  2198. goto out_free;
  2199. }
  2200. ret = __ftrace_event_enable_disable(file, 1, 1);
  2201. if (ret < 0)
  2202. goto out_put;
  2203. ret = register_ftrace_function_probe(glob, ops, data);
  2204. /*
  2205. * The above returns on success the # of functions enabled,
  2206. * but if it didn't find any functions it returns zero.
  2207. * Consider no functions a failure too.
  2208. */
  2209. if (!ret) {
  2210. ret = -ENOENT;
  2211. goto out_disable;
  2212. } else if (ret < 0)
  2213. goto out_disable;
  2214. /* Just return zero, not the number of enabled functions */
  2215. ret = 0;
  2216. out:
  2217. mutex_unlock(&event_mutex);
  2218. return ret;
  2219. out_disable:
  2220. __ftrace_event_enable_disable(file, 0, 1);
  2221. out_put:
  2222. module_put(file->event_call->mod);
  2223. out_free:
  2224. kfree(data);
  2225. goto out;
  2226. }
  2227. static struct ftrace_func_command event_enable_cmd = {
  2228. .name = ENABLE_EVENT_STR,
  2229. .func = event_enable_func,
  2230. };
  2231. static struct ftrace_func_command event_disable_cmd = {
  2232. .name = DISABLE_EVENT_STR,
  2233. .func = event_enable_func,
  2234. };
  2235. static __init int register_event_cmds(void)
  2236. {
  2237. int ret;
  2238. ret = register_ftrace_command(&event_enable_cmd);
  2239. if (WARN_ON(ret < 0))
  2240. return ret;
  2241. ret = register_ftrace_command(&event_disable_cmd);
  2242. if (WARN_ON(ret < 0))
  2243. unregister_ftrace_command(&event_enable_cmd);
  2244. return ret;
  2245. }
  2246. #else
  2247. static inline int register_event_cmds(void) { return 0; }
  2248. #endif /* CONFIG_DYNAMIC_FTRACE */
  2249. /*
  2250. * The top level array has already had its trace_event_file
  2251. * descriptors created in order to allow for early events to
  2252. * be recorded. This function is called after the tracefs has been
  2253. * initialized, and we now have to create the files associated
  2254. * to the events.
  2255. */
  2256. static __init void
  2257. __trace_early_add_event_dirs(struct trace_array *tr)
  2258. {
  2259. struct trace_event_file *file;
  2260. int ret;
  2261. list_for_each_entry(file, &tr->events, list) {
  2262. ret = event_create_dir(tr->event_dir, file);
  2263. if (ret < 0)
  2264. pr_warn("Could not create directory for event %s\n",
  2265. trace_event_name(file->event_call));
  2266. }
  2267. }
  2268. /*
  2269. * For early boot up, the top trace array requires to have
  2270. * a list of events that can be enabled. This must be done before
  2271. * the filesystem is set up in order to allow events to be traced
  2272. * early.
  2273. */
  2274. static __init void
  2275. __trace_early_add_events(struct trace_array *tr)
  2276. {
  2277. struct trace_event_call *call;
  2278. int ret;
  2279. list_for_each_entry(call, &ftrace_events, list) {
  2280. /* Early boot up should not have any modules loaded */
  2281. if (WARN_ON_ONCE(call->mod))
  2282. continue;
  2283. ret = __trace_early_add_new_event(call, tr);
  2284. if (ret < 0)
  2285. pr_warn("Could not create early event %s\n",
  2286. trace_event_name(call));
  2287. }
  2288. }
  2289. /* Remove the event directory structure for a trace directory. */
  2290. static void
  2291. __trace_remove_event_dirs(struct trace_array *tr)
  2292. {
  2293. struct trace_event_file *file, *next;
  2294. list_for_each_entry_safe(file, next, &tr->events, list)
  2295. remove_event_file_dir(file);
  2296. }
  2297. static void __add_event_to_tracers(struct trace_event_call *call)
  2298. {
  2299. struct trace_array *tr;
  2300. list_for_each_entry(tr, &ftrace_trace_arrays, list)
  2301. __trace_add_new_event(call, tr);
  2302. }
  2303. extern struct trace_event_call *__start_ftrace_events[];
  2304. extern struct trace_event_call *__stop_ftrace_events[];
  2305. static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
  2306. static __init int setup_trace_event(char *str)
  2307. {
  2308. strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
  2309. ring_buffer_expanded = true;
  2310. tracing_selftest_disabled = true;
  2311. return 1;
  2312. }
  2313. __setup("trace_event=", setup_trace_event);
  2314. /* Expects to have event_mutex held when called */
  2315. static int
  2316. create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
  2317. {
  2318. struct dentry *d_events;
  2319. struct dentry *entry;
  2320. entry = tracefs_create_file("set_event", 0644, parent,
  2321. tr, &ftrace_set_event_fops);
  2322. if (!entry) {
  2323. pr_warn("Could not create tracefs 'set_event' entry\n");
  2324. return -ENOMEM;
  2325. }
  2326. d_events = tracefs_create_dir("events", parent);
  2327. if (!d_events) {
  2328. pr_warn("Could not create tracefs 'events' directory\n");
  2329. return -ENOMEM;
  2330. }
  2331. entry = tracefs_create_file("set_event_pid", 0644, parent,
  2332. tr, &ftrace_set_event_pid_fops);
  2333. /* ring buffer internal formats */
  2334. trace_create_file("header_page", 0444, d_events,
  2335. ring_buffer_print_page_header,
  2336. &ftrace_show_header_fops);
  2337. trace_create_file("header_event", 0444, d_events,
  2338. ring_buffer_print_entry_header,
  2339. &ftrace_show_header_fops);
  2340. trace_create_file("enable", 0644, d_events,
  2341. tr, &ftrace_tr_enable_fops);
  2342. tr->event_dir = d_events;
  2343. return 0;
  2344. }
  2345. /**
  2346. * event_trace_add_tracer - add a instance of a trace_array to events
  2347. * @parent: The parent dentry to place the files/directories for events in
  2348. * @tr: The trace array associated with these events
  2349. *
  2350. * When a new instance is created, it needs to set up its events
  2351. * directory, as well as other files associated with events. It also
  2352. * creates the event hierachry in the @parent/events directory.
  2353. *
  2354. * Returns 0 on success.
  2355. */
  2356. int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
  2357. {
  2358. int ret;
  2359. mutex_lock(&event_mutex);
  2360. ret = create_event_toplevel_files(parent, tr);
  2361. if (ret)
  2362. goto out_unlock;
  2363. down_write(&trace_event_sem);
  2364. __trace_add_event_dirs(tr);
  2365. up_write(&trace_event_sem);
  2366. out_unlock:
  2367. mutex_unlock(&event_mutex);
  2368. return ret;
  2369. }
  2370. /*
  2371. * The top trace array already had its file descriptors created.
  2372. * Now the files themselves need to be created.
  2373. */
  2374. static __init int
  2375. early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
  2376. {
  2377. int ret;
  2378. mutex_lock(&event_mutex);
  2379. ret = create_event_toplevel_files(parent, tr);
  2380. if (ret)
  2381. goto out_unlock;
  2382. down_write(&trace_event_sem);
  2383. __trace_early_add_event_dirs(tr);
  2384. up_write(&trace_event_sem);
  2385. out_unlock:
  2386. mutex_unlock(&event_mutex);
  2387. return ret;
  2388. }
  2389. int event_trace_del_tracer(struct trace_array *tr)
  2390. {
  2391. mutex_lock(&event_mutex);
  2392. /* Disable any event triggers and associated soft-disabled events */
  2393. clear_event_triggers(tr);
  2394. /* Clear the pid list */
  2395. __ftrace_clear_event_pids(tr);
  2396. /* Disable any running events */
  2397. __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
  2398. /* Access to events are within rcu_read_lock_sched() */
  2399. synchronize_sched();
  2400. down_write(&trace_event_sem);
  2401. __trace_remove_event_dirs(tr);
  2402. tracefs_remove_recursive(tr->event_dir);
  2403. up_write(&trace_event_sem);
  2404. tr->event_dir = NULL;
  2405. mutex_unlock(&event_mutex);
  2406. return 0;
  2407. }
  2408. static __init int event_trace_memsetup(void)
  2409. {
  2410. field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
  2411. file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
  2412. return 0;
  2413. }
  2414. static __init void
  2415. early_enable_events(struct trace_array *tr, bool disable_first)
  2416. {
  2417. char *buf = bootup_event_buf;
  2418. char *token;
  2419. int ret;
  2420. while (true) {
  2421. token = strsep(&buf, ",");
  2422. if (!token)
  2423. break;
  2424. if (*token) {
  2425. /* Restarting syscalls requires that we stop them first */
  2426. if (disable_first)
  2427. ftrace_set_clr_event(tr, token, 0);
  2428. ret = ftrace_set_clr_event(tr, token, 1);
  2429. if (ret)
  2430. pr_warn("Failed to enable trace event: %s\n", token);
  2431. }
  2432. /* Put back the comma to allow this to be called again */
  2433. if (buf)
  2434. *(buf - 1) = ',';
  2435. }
  2436. }
  2437. static __init int event_trace_enable(void)
  2438. {
  2439. struct trace_array *tr = top_trace_array();
  2440. struct trace_event_call **iter, *call;
  2441. int ret;
  2442. if (!tr)
  2443. return -ENODEV;
  2444. for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
  2445. call = *iter;
  2446. ret = event_init(call);
  2447. if (!ret)
  2448. list_add(&call->list, &ftrace_events);
  2449. }
  2450. /*
  2451. * We need the top trace array to have a working set of trace
  2452. * points at early init, before the debug files and directories
  2453. * are created. Create the file entries now, and attach them
  2454. * to the actual file dentries later.
  2455. */
  2456. __trace_early_add_events(tr);
  2457. early_enable_events(tr, false);
  2458. trace_printk_start_comm();
  2459. register_event_cmds();
  2460. register_trigger_cmds();
  2461. return 0;
  2462. }
  2463. /*
  2464. * event_trace_enable() is called from trace_event_init() first to
  2465. * initialize events and perhaps start any events that are on the
  2466. * command line. Unfortunately, there are some events that will not
  2467. * start this early, like the system call tracepoints that need
  2468. * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
  2469. * is called before pid 1 starts, and this flag is never set, making
  2470. * the syscall tracepoint never get reached, but the event is enabled
  2471. * regardless (and not doing anything).
  2472. */
  2473. static __init int event_trace_enable_again(void)
  2474. {
  2475. struct trace_array *tr;
  2476. tr = top_trace_array();
  2477. if (!tr)
  2478. return -ENODEV;
  2479. early_enable_events(tr, true);
  2480. return 0;
  2481. }
  2482. early_initcall(event_trace_enable_again);
  2483. static __init int event_trace_init(void)
  2484. {
  2485. struct trace_array *tr;
  2486. struct dentry *d_tracer;
  2487. struct dentry *entry;
  2488. int ret;
  2489. tr = top_trace_array();
  2490. if (!tr)
  2491. return -ENODEV;
  2492. d_tracer = tracing_init_dentry();
  2493. if (IS_ERR(d_tracer))
  2494. return 0;
  2495. entry = tracefs_create_file("available_events", 0444, d_tracer,
  2496. tr, &ftrace_avail_fops);
  2497. if (!entry)
  2498. pr_warn("Could not create tracefs 'available_events' entry\n");
  2499. if (trace_define_generic_fields())
  2500. pr_warn("tracing: Failed to allocated generic fields");
  2501. if (trace_define_common_fields())
  2502. pr_warn("tracing: Failed to allocate common fields");
  2503. ret = early_event_add_tracer(d_tracer, tr);
  2504. if (ret)
  2505. return ret;
  2506. #ifdef CONFIG_MODULES
  2507. ret = register_module_notifier(&trace_module_nb);
  2508. if (ret)
  2509. pr_warn("Failed to register trace events module notifier\n");
  2510. #endif
  2511. return 0;
  2512. }
  2513. void __init trace_event_init(void)
  2514. {
  2515. event_trace_memsetup();
  2516. init_ftrace_syscalls();
  2517. event_trace_enable();
  2518. }
  2519. fs_initcall(event_trace_init);
  2520. #ifdef CONFIG_FTRACE_STARTUP_TEST
  2521. static DEFINE_SPINLOCK(test_spinlock);
  2522. static DEFINE_SPINLOCK(test_spinlock_irq);
  2523. static DEFINE_MUTEX(test_mutex);
  2524. static __init void test_work(struct work_struct *dummy)
  2525. {
  2526. spin_lock(&test_spinlock);
  2527. spin_lock_irq(&test_spinlock_irq);
  2528. udelay(1);
  2529. spin_unlock_irq(&test_spinlock_irq);
  2530. spin_unlock(&test_spinlock);
  2531. mutex_lock(&test_mutex);
  2532. msleep(1);
  2533. mutex_unlock(&test_mutex);
  2534. }
  2535. static __init int event_test_thread(void *unused)
  2536. {
  2537. void *test_malloc;
  2538. test_malloc = kmalloc(1234, GFP_KERNEL);
  2539. if (!test_malloc)
  2540. pr_info("failed to kmalloc\n");
  2541. schedule_on_each_cpu(test_work);
  2542. kfree(test_malloc);
  2543. set_current_state(TASK_INTERRUPTIBLE);
  2544. while (!kthread_should_stop()) {
  2545. schedule();
  2546. set_current_state(TASK_INTERRUPTIBLE);
  2547. }
  2548. __set_current_state(TASK_RUNNING);
  2549. return 0;
  2550. }
  2551. /*
  2552. * Do various things that may trigger events.
  2553. */
  2554. static __init void event_test_stuff(void)
  2555. {
  2556. struct task_struct *test_thread;
  2557. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  2558. msleep(1);
  2559. kthread_stop(test_thread);
  2560. }
  2561. /*
  2562. * For every trace event defined, we will test each trace point separately,
  2563. * and then by groups, and finally all trace points.
  2564. */
  2565. static __init void event_trace_self_tests(void)
  2566. {
  2567. struct trace_subsystem_dir *dir;
  2568. struct trace_event_file *file;
  2569. struct trace_event_call *call;
  2570. struct event_subsystem *system;
  2571. struct trace_array *tr;
  2572. int ret;
  2573. tr = top_trace_array();
  2574. if (!tr)
  2575. return;
  2576. pr_info("Running tests on trace events:\n");
  2577. list_for_each_entry(file, &tr->events, list) {
  2578. call = file->event_call;
  2579. /* Only test those that have a probe */
  2580. if (!call->class || !call->class->probe)
  2581. continue;
  2582. /*
  2583. * Testing syscall events here is pretty useless, but
  2584. * we still do it if configured. But this is time consuming.
  2585. * What we really need is a user thread to perform the
  2586. * syscalls as we test.
  2587. */
  2588. #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
  2589. if (call->class->system &&
  2590. strcmp(call->class->system, "syscalls") == 0)
  2591. continue;
  2592. #endif
  2593. pr_info("Testing event %s: ", trace_event_name(call));
  2594. /*
  2595. * If an event is already enabled, someone is using
  2596. * it and the self test should not be on.
  2597. */
  2598. if (file->flags & EVENT_FILE_FL_ENABLED) {
  2599. pr_warn("Enabled event during self test!\n");
  2600. WARN_ON_ONCE(1);
  2601. continue;
  2602. }
  2603. ftrace_event_enable_disable(file, 1);
  2604. event_test_stuff();
  2605. ftrace_event_enable_disable(file, 0);
  2606. pr_cont("OK\n");
  2607. }
  2608. /* Now test at the sub system level */
  2609. pr_info("Running tests on trace event systems:\n");
  2610. list_for_each_entry(dir, &tr->systems, list) {
  2611. system = dir->subsystem;
  2612. /* the ftrace system is special, skip it */
  2613. if (strcmp(system->name, "ftrace") == 0)
  2614. continue;
  2615. pr_info("Testing event system %s: ", system->name);
  2616. ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
  2617. if (WARN_ON_ONCE(ret)) {
  2618. pr_warn("error enabling system %s\n",
  2619. system->name);
  2620. continue;
  2621. }
  2622. event_test_stuff();
  2623. ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
  2624. if (WARN_ON_ONCE(ret)) {
  2625. pr_warn("error disabling system %s\n",
  2626. system->name);
  2627. continue;
  2628. }
  2629. pr_cont("OK\n");
  2630. }
  2631. /* Test with all events enabled */
  2632. pr_info("Running tests on all trace events:\n");
  2633. pr_info("Testing all events: ");
  2634. ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
  2635. if (WARN_ON_ONCE(ret)) {
  2636. pr_warn("error enabling all events\n");
  2637. return;
  2638. }
  2639. event_test_stuff();
  2640. /* reset sysname */
  2641. ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
  2642. if (WARN_ON_ONCE(ret)) {
  2643. pr_warn("error disabling all events\n");
  2644. return;
  2645. }
  2646. pr_cont("OK\n");
  2647. }
  2648. #ifdef CONFIG_FUNCTION_TRACER
  2649. static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
  2650. static struct trace_event_file event_trace_file __initdata;
  2651. static void __init
  2652. function_test_events_call(unsigned long ip, unsigned long parent_ip,
  2653. struct ftrace_ops *op, struct pt_regs *pt_regs)
  2654. {
  2655. struct ring_buffer_event *event;
  2656. struct ring_buffer *buffer;
  2657. struct ftrace_entry *entry;
  2658. unsigned long flags;
  2659. long disabled;
  2660. int cpu;
  2661. int pc;
  2662. pc = preempt_count();
  2663. preempt_disable_notrace();
  2664. cpu = raw_smp_processor_id();
  2665. disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
  2666. if (disabled != 1)
  2667. goto out;
  2668. local_save_flags(flags);
  2669. event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
  2670. TRACE_FN, sizeof(*entry),
  2671. flags, pc);
  2672. if (!event)
  2673. goto out;
  2674. entry = ring_buffer_event_data(event);
  2675. entry->ip = ip;
  2676. entry->parent_ip = parent_ip;
  2677. event_trigger_unlock_commit(&event_trace_file, buffer, event,
  2678. entry, flags, pc);
  2679. out:
  2680. atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
  2681. preempt_enable_notrace();
  2682. }
  2683. static struct ftrace_ops trace_ops __initdata =
  2684. {
  2685. .func = function_test_events_call,
  2686. .flags = FTRACE_OPS_FL_RECURSION_SAFE,
  2687. };
  2688. static __init void event_trace_self_test_with_function(void)
  2689. {
  2690. int ret;
  2691. event_trace_file.tr = top_trace_array();
  2692. if (WARN_ON(!event_trace_file.tr))
  2693. return;
  2694. ret = register_ftrace_function(&trace_ops);
  2695. if (WARN_ON(ret < 0)) {
  2696. pr_info("Failed to enable function tracer for event tests\n");
  2697. return;
  2698. }
  2699. pr_info("Running tests again, along with the function tracer\n");
  2700. event_trace_self_tests();
  2701. unregister_ftrace_function(&trace_ops);
  2702. }
  2703. #else
  2704. static __init void event_trace_self_test_with_function(void)
  2705. {
  2706. }
  2707. #endif
  2708. static __init int event_trace_self_tests_init(void)
  2709. {
  2710. if (!tracing_selftest_disabled) {
  2711. event_trace_self_tests();
  2712. event_trace_self_test_with_function();
  2713. }
  2714. return 0;
  2715. }
  2716. late_initcall(event_trace_self_tests_init);
  2717. #endif