etm.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077
  1. /*
  2. * linux/arch/arm/kernel/etm.c
  3. *
  4. * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer.
  5. *
  6. * Copyright (C) 2009 Nokia Corporation.
  7. * Alexander Shishkin
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/io.h>
  17. #include <linux/slab.h>
  18. #include <linux/sysrq.h>
  19. #include <linux/device.h>
  20. #include <linux/clk.h>
  21. #include <linux/amba/bus.h>
  22. #include <linux/fs.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/mutex.h>
  27. #include <linux/module.h>
  28. #include <asm/hardware/coresight.h>
  29. #include <asm/sections.h>
  30. MODULE_LICENSE("GPL");
  31. MODULE_AUTHOR("Alexander Shishkin");
  32. /*
  33. * ETM tracer state
  34. */
  35. struct tracectx {
  36. unsigned int etb_bufsz;
  37. void __iomem *etb_regs;
  38. void __iomem **etm_regs;
  39. int etm_regs_count;
  40. unsigned long flags;
  41. int ncmppairs;
  42. int etm_portsz;
  43. int etm_contextid_size;
  44. u32 etb_fc;
  45. unsigned long range_start;
  46. unsigned long range_end;
  47. unsigned long data_range_start;
  48. unsigned long data_range_end;
  49. bool dump_initial_etb;
  50. struct device *dev;
  51. struct clk *emu_clk;
  52. struct mutex mutex;
  53. };
  54. static struct tracectx tracer = {
  55. .range_start = (unsigned long)_stext,
  56. .range_end = (unsigned long)_etext,
  57. };
  58. static inline bool trace_isrunning(struct tracectx *t)
  59. {
  60. return !!(t->flags & TRACER_RUNNING);
  61. }
  62. static int etm_setup_address_range(struct tracectx *t, int id, int n,
  63. unsigned long start, unsigned long end, int exclude, int data)
  64. {
  65. u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
  66. ETMAAT_NOVALCMP;
  67. if (n < 1 || n > t->ncmppairs)
  68. return -EINVAL;
  69. /* comparators and ranges are numbered starting with 1 as opposed
  70. * to bits in a word */
  71. n--;
  72. if (data)
  73. flags |= ETMAAT_DLOADSTORE;
  74. else
  75. flags |= ETMAAT_IEXEC;
  76. /* first comparator for the range */
  77. etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
  78. etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
  79. /* second comparator is right next to it */
  80. etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
  81. etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
  82. if (data) {
  83. flags = exclude ? ETMVDC3_EXCLONLY : 0;
  84. if (exclude)
  85. n += 8;
  86. etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
  87. } else {
  88. flags = exclude ? ETMTE_INCLEXCL : 0;
  89. etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
  90. }
  91. return 0;
  92. }
  93. static int trace_start_etm(struct tracectx *t, int id)
  94. {
  95. u32 v;
  96. unsigned long timeout = TRACER_TIMEOUT;
  97. v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
  98. v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
  99. if (t->flags & TRACER_CYCLE_ACC)
  100. v |= ETMCTRL_CYCLEACCURATE;
  101. if (t->flags & TRACER_BRANCHOUTPUT)
  102. v |= ETMCTRL_BRANCH_OUTPUT;
  103. if (t->flags & TRACER_TRACE_DATA)
  104. v |= ETMCTRL_DATA_DO_ADDR;
  105. if (t->flags & TRACER_TIMESTAMP)
  106. v |= ETMCTRL_TIMESTAMP_EN;
  107. if (t->flags & TRACER_RETURN_STACK)
  108. v |= ETMCTRL_RETURN_STACK_EN;
  109. etm_unlock(t, id);
  110. etm_writel(t, id, v, ETMR_CTRL);
  111. while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
  112. ;
  113. if (!timeout) {
  114. dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
  115. etm_lock(t, id);
  116. return -EFAULT;
  117. }
  118. if (t->range_start || t->range_end)
  119. etm_setup_address_range(t, id, 1,
  120. t->range_start, t->range_end, 0, 0);
  121. else
  122. etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
  123. etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
  124. etm_writel(t, id, 0, ETMR_TRACESSCTRL);
  125. etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
  126. etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
  127. etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
  128. if (t->data_range_start || t->data_range_end)
  129. etm_setup_address_range(t, id, 2, t->data_range_start,
  130. t->data_range_end, 0, 1);
  131. else
  132. etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
  133. etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
  134. v &= ~ETMCTRL_PROGRAM;
  135. v |= ETMCTRL_PORTSEL;
  136. etm_writel(t, id, v, ETMR_CTRL);
  137. timeout = TRACER_TIMEOUT;
  138. while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
  139. ;
  140. if (!timeout) {
  141. dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
  142. etm_lock(t, id);
  143. return -EFAULT;
  144. }
  145. etm_lock(t, id);
  146. return 0;
  147. }
  148. static int trace_start(struct tracectx *t)
  149. {
  150. int ret;
  151. int id;
  152. u32 etb_fc = t->etb_fc;
  153. etb_unlock(t);
  154. t->dump_initial_etb = false;
  155. etb_writel(t, 0, ETBR_WRITEADDR);
  156. etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
  157. etb_writel(t, 1, ETBR_CTRL);
  158. etb_lock(t);
  159. /* configure etm(s) */
  160. for (id = 0; id < t->etm_regs_count; id++) {
  161. ret = trace_start_etm(t, id);
  162. if (ret)
  163. return ret;
  164. }
  165. t->flags |= TRACER_RUNNING;
  166. return 0;
  167. }
  168. static int trace_stop_etm(struct tracectx *t, int id)
  169. {
  170. unsigned long timeout = TRACER_TIMEOUT;
  171. etm_unlock(t, id);
  172. etm_writel(t, id, 0x440, ETMR_CTRL);
  173. while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
  174. ;
  175. if (!timeout) {
  176. dev_err(t->dev,
  177. "etm%d: Waiting for progbit to assert timed out\n",
  178. id);
  179. etm_lock(t, id);
  180. return -EFAULT;
  181. }
  182. etm_lock(t, id);
  183. return 0;
  184. }
  185. static int trace_power_down_etm(struct tracectx *t, int id)
  186. {
  187. unsigned long timeout = TRACER_TIMEOUT;
  188. etm_unlock(t, id);
  189. while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
  190. ;
  191. if (!timeout) {
  192. dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
  193. id);
  194. etm_lock(t, id);
  195. return -EFAULT;
  196. }
  197. etm_writel(t, id, 0x441, ETMR_CTRL);
  198. etm_lock(t, id);
  199. return 0;
  200. }
  201. static int trace_stop(struct tracectx *t)
  202. {
  203. int id;
  204. unsigned long timeout = TRACER_TIMEOUT;
  205. u32 etb_fc = t->etb_fc;
  206. for (id = 0; id < t->etm_regs_count; id++)
  207. trace_stop_etm(t, id);
  208. for (id = 0; id < t->etm_regs_count; id++)
  209. trace_power_down_etm(t, id);
  210. etb_unlock(t);
  211. if (etb_fc) {
  212. etb_fc |= ETBFF_STOPFL;
  213. etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
  214. }
  215. etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
  216. timeout = TRACER_TIMEOUT;
  217. while (etb_readl(t, ETBR_FORMATTERCTRL) &
  218. ETBFF_MANUAL_FLUSH && --timeout)
  219. ;
  220. if (!timeout) {
  221. dev_dbg(t->dev, "Waiting for formatter flush to commence "
  222. "timed out\n");
  223. etb_lock(t);
  224. return -EFAULT;
  225. }
  226. etb_writel(t, 0, ETBR_CTRL);
  227. etb_lock(t);
  228. t->flags &= ~TRACER_RUNNING;
  229. return 0;
  230. }
  231. static int etb_getdatalen(struct tracectx *t)
  232. {
  233. u32 v;
  234. int wp;
  235. v = etb_readl(t, ETBR_STATUS);
  236. if (v & 1)
  237. return t->etb_bufsz;
  238. wp = etb_readl(t, ETBR_WRITEADDR);
  239. return wp;
  240. }
  241. /* sysrq+v will always stop the running trace and leave it at that */
  242. static void etm_dump(void)
  243. {
  244. struct tracectx *t = &tracer;
  245. u32 first = 0;
  246. int length;
  247. if (!t->etb_regs) {
  248. printk(KERN_INFO "No tracing hardware found\n");
  249. return;
  250. }
  251. if (trace_isrunning(t))
  252. trace_stop(t);
  253. etb_unlock(t);
  254. length = etb_getdatalen(t);
  255. if (length == t->etb_bufsz)
  256. first = etb_readl(t, ETBR_WRITEADDR);
  257. etb_writel(t, first, ETBR_READADDR);
  258. printk(KERN_INFO "Trace buffer contents length: %d\n", length);
  259. printk(KERN_INFO "--- ETB buffer begin ---\n");
  260. for (; length; length--)
  261. printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
  262. printk(KERN_INFO "\n--- ETB buffer end ---\n");
  263. etb_lock(t);
  264. }
  265. static void sysrq_etm_dump(int key)
  266. {
  267. if (!mutex_trylock(&tracer.mutex)) {
  268. printk(KERN_INFO "Tracing hardware busy\n");
  269. return;
  270. }
  271. dev_dbg(tracer.dev, "Dumping ETB buffer\n");
  272. etm_dump();
  273. mutex_unlock(&tracer.mutex);
  274. }
  275. static struct sysrq_key_op sysrq_etm_op = {
  276. .handler = sysrq_etm_dump,
  277. .help_msg = "ETM buffer dump",
  278. .action_msg = "etm",
  279. };
  280. static int etb_open(struct inode *inode, struct file *file)
  281. {
  282. if (!tracer.etb_regs)
  283. return -ENODEV;
  284. file->private_data = &tracer;
  285. return nonseekable_open(inode, file);
  286. }
  287. static ssize_t etb_read(struct file *file, char __user *data,
  288. size_t len, loff_t *ppos)
  289. {
  290. int total, i;
  291. long length;
  292. struct tracectx *t = file->private_data;
  293. u32 first = 0;
  294. u32 *buf;
  295. int wpos;
  296. int skip;
  297. long wlength;
  298. loff_t pos = *ppos;
  299. mutex_lock(&t->mutex);
  300. if (trace_isrunning(t)) {
  301. length = 0;
  302. goto out;
  303. }
  304. etb_unlock(t);
  305. total = etb_getdatalen(t);
  306. if (total == 0 && t->dump_initial_etb)
  307. total = t->etb_bufsz;
  308. if (total == t->etb_bufsz)
  309. first = etb_readl(t, ETBR_WRITEADDR);
  310. if (pos > total * 4) {
  311. skip = 0;
  312. wpos = total;
  313. } else {
  314. skip = (int)pos % 4;
  315. wpos = (int)pos / 4;
  316. }
  317. total -= wpos;
  318. first = (first + wpos) % t->etb_bufsz;
  319. etb_writel(t, first, ETBR_READADDR);
  320. wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
  321. length = min(total * 4 - skip, (int)len);
  322. buf = vmalloc(wlength * 4);
  323. dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
  324. length, pos, wlength, first);
  325. dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
  326. dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
  327. for (i = 0; i < wlength; i++)
  328. buf[i] = etb_readl(t, ETBR_READMEM);
  329. etb_lock(t);
  330. length -= copy_to_user(data, (u8 *)buf + skip, length);
  331. vfree(buf);
  332. *ppos = pos + length;
  333. out:
  334. mutex_unlock(&t->mutex);
  335. return length;
  336. }
  337. static int etb_release(struct inode *inode, struct file *file)
  338. {
  339. /* there's nothing to do here, actually */
  340. return 0;
  341. }
  342. static const struct file_operations etb_fops = {
  343. .owner = THIS_MODULE,
  344. .read = etb_read,
  345. .open = etb_open,
  346. .release = etb_release,
  347. .llseek = no_llseek,
  348. };
  349. static struct miscdevice etb_miscdev = {
  350. .name = "tracebuf",
  351. .minor = 0,
  352. .fops = &etb_fops,
  353. };
  354. static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id)
  355. {
  356. struct tracectx *t = &tracer;
  357. int ret = 0;
  358. ret = amba_request_regions(dev, NULL);
  359. if (ret)
  360. goto out;
  361. mutex_lock(&t->mutex);
  362. t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
  363. if (!t->etb_regs) {
  364. ret = -ENOMEM;
  365. goto out_release;
  366. }
  367. t->dev = &dev->dev;
  368. t->dump_initial_etb = true;
  369. amba_set_drvdata(dev, t);
  370. etb_unlock(t);
  371. t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
  372. dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
  373. /* make sure trace capture is disabled */
  374. etb_writel(t, 0, ETBR_CTRL);
  375. etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
  376. etb_lock(t);
  377. mutex_unlock(&t->mutex);
  378. etb_miscdev.parent = &dev->dev;
  379. ret = misc_register(&etb_miscdev);
  380. if (ret)
  381. goto out_unmap;
  382. /* Get optional clock. Currently used to select clock source on omap3 */
  383. t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
  384. if (IS_ERR(t->emu_clk))
  385. dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
  386. else
  387. clk_enable(t->emu_clk);
  388. dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
  389. out:
  390. return ret;
  391. out_unmap:
  392. mutex_lock(&t->mutex);
  393. amba_set_drvdata(dev, NULL);
  394. iounmap(t->etb_regs);
  395. t->etb_regs = NULL;
  396. out_release:
  397. mutex_unlock(&t->mutex);
  398. amba_release_regions(dev);
  399. return ret;
  400. }
  401. static int etb_remove(struct amba_device *dev)
  402. {
  403. struct tracectx *t = amba_get_drvdata(dev);
  404. amba_set_drvdata(dev, NULL);
  405. iounmap(t->etb_regs);
  406. t->etb_regs = NULL;
  407. if (!IS_ERR(t->emu_clk)) {
  408. clk_disable(t->emu_clk);
  409. clk_put(t->emu_clk);
  410. }
  411. amba_release_regions(dev);
  412. return 0;
  413. }
  414. static struct amba_id etb_ids[] = {
  415. {
  416. .id = 0x0003b907,
  417. .mask = 0x0007ffff,
  418. },
  419. { 0, 0 },
  420. };
  421. static struct amba_driver etb_driver = {
  422. .drv = {
  423. .name = "etb",
  424. .owner = THIS_MODULE,
  425. },
  426. .probe = etb_probe,
  427. .remove = etb_remove,
  428. .id_table = etb_ids,
  429. };
  430. /* use a sysfs file "trace_running" to start/stop tracing */
  431. static ssize_t trace_running_show(struct kobject *kobj,
  432. struct kobj_attribute *attr,
  433. char *buf)
  434. {
  435. return sprintf(buf, "%x\n", trace_isrunning(&tracer));
  436. }
  437. static ssize_t trace_running_store(struct kobject *kobj,
  438. struct kobj_attribute *attr,
  439. const char *buf, size_t n)
  440. {
  441. unsigned int value;
  442. int ret;
  443. if (sscanf(buf, "%u", &value) != 1)
  444. return -EINVAL;
  445. mutex_lock(&tracer.mutex);
  446. if (!tracer.etb_regs)
  447. ret = -ENODEV;
  448. else
  449. ret = value ? trace_start(&tracer) : trace_stop(&tracer);
  450. mutex_unlock(&tracer.mutex);
  451. return ret ? : n;
  452. }
  453. static struct kobj_attribute trace_running_attr =
  454. __ATTR(trace_running, 0644, trace_running_show, trace_running_store);
  455. static ssize_t trace_info_show(struct kobject *kobj,
  456. struct kobj_attribute *attr,
  457. char *buf)
  458. {
  459. u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
  460. int datalen;
  461. int id;
  462. int ret;
  463. mutex_lock(&tracer.mutex);
  464. if (tracer.etb_regs) {
  465. etb_unlock(&tracer);
  466. datalen = etb_getdatalen(&tracer);
  467. etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
  468. etb_ra = etb_readl(&tracer, ETBR_READADDR);
  469. etb_st = etb_readl(&tracer, ETBR_STATUS);
  470. etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
  471. etb_lock(&tracer);
  472. } else {
  473. etb_wa = etb_ra = etb_st = etb_fc = ~0;
  474. datalen = -1;
  475. }
  476. ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
  477. "ETBR_WRITEADDR:\t%08x\n"
  478. "ETBR_READADDR:\t%08x\n"
  479. "ETBR_STATUS:\t%08x\n"
  480. "ETBR_FORMATTERCTRL:\t%08x\n",
  481. datalen,
  482. tracer.ncmppairs,
  483. etb_wa,
  484. etb_ra,
  485. etb_st,
  486. etb_fc
  487. );
  488. for (id = 0; id < tracer.etm_regs_count; id++) {
  489. etm_unlock(&tracer, id);
  490. etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
  491. etm_st = etm_readl(&tracer, id, ETMR_STATUS);
  492. etm_lock(&tracer, id);
  493. ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
  494. "ETMR_STATUS:\t%08x\n",
  495. etm_ctrl,
  496. etm_st
  497. );
  498. }
  499. mutex_unlock(&tracer.mutex);
  500. return ret;
  501. }
  502. static struct kobj_attribute trace_info_attr =
  503. __ATTR(trace_info, 0444, trace_info_show, NULL);
  504. static ssize_t trace_mode_show(struct kobject *kobj,
  505. struct kobj_attribute *attr,
  506. char *buf)
  507. {
  508. return sprintf(buf, "%d %d\n",
  509. !!(tracer.flags & TRACER_CYCLE_ACC),
  510. tracer.etm_portsz);
  511. }
  512. static ssize_t trace_mode_store(struct kobject *kobj,
  513. struct kobj_attribute *attr,
  514. const char *buf, size_t n)
  515. {
  516. unsigned int cycacc, portsz;
  517. if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2)
  518. return -EINVAL;
  519. mutex_lock(&tracer.mutex);
  520. if (cycacc)
  521. tracer.flags |= TRACER_CYCLE_ACC;
  522. else
  523. tracer.flags &= ~TRACER_CYCLE_ACC;
  524. tracer.etm_portsz = portsz & 0x0f;
  525. mutex_unlock(&tracer.mutex);
  526. return n;
  527. }
  528. static struct kobj_attribute trace_mode_attr =
  529. __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
  530. static ssize_t trace_contextid_size_show(struct kobject *kobj,
  531. struct kobj_attribute *attr,
  532. char *buf)
  533. {
  534. /* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
  535. return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
  536. }
  537. static ssize_t trace_contextid_size_store(struct kobject *kobj,
  538. struct kobj_attribute *attr,
  539. const char *buf, size_t n)
  540. {
  541. unsigned int contextid_size;
  542. if (sscanf(buf, "%u", &contextid_size) != 1)
  543. return -EINVAL;
  544. if (contextid_size == 3 || contextid_size > 4)
  545. return -EINVAL;
  546. mutex_lock(&tracer.mutex);
  547. tracer.etm_contextid_size = fls(contextid_size);
  548. mutex_unlock(&tracer.mutex);
  549. return n;
  550. }
  551. static struct kobj_attribute trace_contextid_size_attr =
  552. __ATTR(trace_contextid_size, 0644,
  553. trace_contextid_size_show, trace_contextid_size_store);
  554. static ssize_t trace_branch_output_show(struct kobject *kobj,
  555. struct kobj_attribute *attr,
  556. char *buf)
  557. {
  558. return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
  559. }
  560. static ssize_t trace_branch_output_store(struct kobject *kobj,
  561. struct kobj_attribute *attr,
  562. const char *buf, size_t n)
  563. {
  564. unsigned int branch_output;
  565. if (sscanf(buf, "%u", &branch_output) != 1)
  566. return -EINVAL;
  567. mutex_lock(&tracer.mutex);
  568. if (branch_output) {
  569. tracer.flags |= TRACER_BRANCHOUTPUT;
  570. /* Branch broadcasting is incompatible with the return stack */
  571. tracer.flags &= ~TRACER_RETURN_STACK;
  572. } else {
  573. tracer.flags &= ~TRACER_BRANCHOUTPUT;
  574. }
  575. mutex_unlock(&tracer.mutex);
  576. return n;
  577. }
  578. static struct kobj_attribute trace_branch_output_attr =
  579. __ATTR(trace_branch_output, 0644,
  580. trace_branch_output_show, trace_branch_output_store);
  581. static ssize_t trace_return_stack_show(struct kobject *kobj,
  582. struct kobj_attribute *attr,
  583. char *buf)
  584. {
  585. return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
  586. }
  587. static ssize_t trace_return_stack_store(struct kobject *kobj,
  588. struct kobj_attribute *attr,
  589. const char *buf, size_t n)
  590. {
  591. unsigned int return_stack;
  592. if (sscanf(buf, "%u", &return_stack) != 1)
  593. return -EINVAL;
  594. mutex_lock(&tracer.mutex);
  595. if (return_stack) {
  596. tracer.flags |= TRACER_RETURN_STACK;
  597. /* Return stack is incompatible with branch broadcasting */
  598. tracer.flags &= ~TRACER_BRANCHOUTPUT;
  599. } else {
  600. tracer.flags &= ~TRACER_RETURN_STACK;
  601. }
  602. mutex_unlock(&tracer.mutex);
  603. return n;
  604. }
  605. static struct kobj_attribute trace_return_stack_attr =
  606. __ATTR(trace_return_stack, 0644,
  607. trace_return_stack_show, trace_return_stack_store);
  608. static ssize_t trace_timestamp_show(struct kobject *kobj,
  609. struct kobj_attribute *attr,
  610. char *buf)
  611. {
  612. return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
  613. }
  614. static ssize_t trace_timestamp_store(struct kobject *kobj,
  615. struct kobj_attribute *attr,
  616. const char *buf, size_t n)
  617. {
  618. unsigned int timestamp;
  619. if (sscanf(buf, "%u", &timestamp) != 1)
  620. return -EINVAL;
  621. mutex_lock(&tracer.mutex);
  622. if (timestamp)
  623. tracer.flags |= TRACER_TIMESTAMP;
  624. else
  625. tracer.flags &= ~TRACER_TIMESTAMP;
  626. mutex_unlock(&tracer.mutex);
  627. return n;
  628. }
  629. static struct kobj_attribute trace_timestamp_attr =
  630. __ATTR(trace_timestamp, 0644,
  631. trace_timestamp_show, trace_timestamp_store);
  632. static ssize_t trace_range_show(struct kobject *kobj,
  633. struct kobj_attribute *attr,
  634. char *buf)
  635. {
  636. return sprintf(buf, "%08lx %08lx\n",
  637. tracer.range_start, tracer.range_end);
  638. }
  639. static ssize_t trace_range_store(struct kobject *kobj,
  640. struct kobj_attribute *attr,
  641. const char *buf, size_t n)
  642. {
  643. unsigned long range_start, range_end;
  644. if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
  645. return -EINVAL;
  646. mutex_lock(&tracer.mutex);
  647. tracer.range_start = range_start;
  648. tracer.range_end = range_end;
  649. mutex_unlock(&tracer.mutex);
  650. return n;
  651. }
  652. static struct kobj_attribute trace_range_attr =
  653. __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
  654. static ssize_t trace_data_range_show(struct kobject *kobj,
  655. struct kobj_attribute *attr,
  656. char *buf)
  657. {
  658. unsigned long range_start;
  659. u64 range_end;
  660. mutex_lock(&tracer.mutex);
  661. range_start = tracer.data_range_start;
  662. range_end = tracer.data_range_end;
  663. if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
  664. range_end = 0x100000000ULL;
  665. mutex_unlock(&tracer.mutex);
  666. return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
  667. }
  668. static ssize_t trace_data_range_store(struct kobject *kobj,
  669. struct kobj_attribute *attr,
  670. const char *buf, size_t n)
  671. {
  672. unsigned long range_start;
  673. u64 range_end;
  674. if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
  675. return -EINVAL;
  676. mutex_lock(&tracer.mutex);
  677. tracer.data_range_start = range_start;
  678. tracer.data_range_end = (unsigned long)range_end;
  679. if (range_end)
  680. tracer.flags |= TRACER_TRACE_DATA;
  681. else
  682. tracer.flags &= ~TRACER_TRACE_DATA;
  683. mutex_unlock(&tracer.mutex);
  684. return n;
  685. }
  686. static struct kobj_attribute trace_data_range_attr =
  687. __ATTR(trace_data_range, 0644,
  688. trace_data_range_show, trace_data_range_store);
  689. static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
  690. {
  691. struct tracectx *t = &tracer;
  692. int ret = 0;
  693. void __iomem **new_regs;
  694. int new_count;
  695. u32 etmccr;
  696. u32 etmidr;
  697. u32 etmccer = 0;
  698. u8 etm_version = 0;
  699. mutex_lock(&t->mutex);
  700. new_count = t->etm_regs_count + 1;
  701. new_regs = krealloc(t->etm_regs,
  702. sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
  703. if (!new_regs) {
  704. dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
  705. ret = -ENOMEM;
  706. goto out;
  707. }
  708. t->etm_regs = new_regs;
  709. ret = amba_request_regions(dev, NULL);
  710. if (ret)
  711. goto out;
  712. t->etm_regs[t->etm_regs_count] =
  713. ioremap_nocache(dev->res.start, resource_size(&dev->res));
  714. if (!t->etm_regs[t->etm_regs_count]) {
  715. ret = -ENOMEM;
  716. goto out_release;
  717. }
  718. amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
  719. t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
  720. t->etm_portsz = 1;
  721. t->etm_contextid_size = 3;
  722. etm_unlock(t, t->etm_regs_count);
  723. (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
  724. /* dummy first read */
  725. (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
  726. etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
  727. t->ncmppairs = etmccr & 0xf;
  728. if (etmccr & ETMCCR_ETMIDR_PRESENT) {
  729. etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
  730. etm_version = ETMIDR_VERSION(etmidr);
  731. if (etm_version >= ETMIDR_VERSION_3_1)
  732. etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
  733. }
  734. etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
  735. etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
  736. etm_lock(t, t->etm_regs_count);
  737. ret = sysfs_create_file(&dev->dev.kobj,
  738. &trace_running_attr.attr);
  739. if (ret)
  740. goto out_unmap;
  741. /* failing to create any of these two is not fatal */
  742. ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr);
  743. if (ret)
  744. dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n");
  745. ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr);
  746. if (ret)
  747. dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
  748. ret = sysfs_create_file(&dev->dev.kobj,
  749. &trace_contextid_size_attr.attr);
  750. if (ret)
  751. dev_dbg(&dev->dev,
  752. "Failed to create trace_contextid_size in sysfs\n");
  753. ret = sysfs_create_file(&dev->dev.kobj,
  754. &trace_branch_output_attr.attr);
  755. if (ret)
  756. dev_dbg(&dev->dev,
  757. "Failed to create trace_branch_output in sysfs\n");
  758. if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
  759. ret = sysfs_create_file(&dev->dev.kobj,
  760. &trace_return_stack_attr.attr);
  761. if (ret)
  762. dev_dbg(&dev->dev,
  763. "Failed to create trace_return_stack in sysfs\n");
  764. }
  765. if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
  766. ret = sysfs_create_file(&dev->dev.kobj,
  767. &trace_timestamp_attr.attr);
  768. if (ret)
  769. dev_dbg(&dev->dev,
  770. "Failed to create trace_timestamp in sysfs\n");
  771. }
  772. ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
  773. if (ret)
  774. dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
  775. if (etm_version < ETMIDR_VERSION_PFT_1_0) {
  776. ret = sysfs_create_file(&dev->dev.kobj,
  777. &trace_data_range_attr.attr);
  778. if (ret)
  779. dev_dbg(&dev->dev,
  780. "Failed to create trace_data_range in sysfs\n");
  781. } else {
  782. tracer.flags &= ~TRACER_TRACE_DATA;
  783. }
  784. dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
  785. /* Enable formatter if there are multiple trace sources */
  786. if (new_count > 1)
  787. t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
  788. t->etm_regs_count = new_count;
  789. out:
  790. mutex_unlock(&t->mutex);
  791. return ret;
  792. out_unmap:
  793. amba_set_drvdata(dev, NULL);
  794. iounmap(t->etm_regs[t->etm_regs_count]);
  795. out_release:
  796. amba_release_regions(dev);
  797. mutex_unlock(&t->mutex);
  798. return ret;
  799. }
  800. static int etm_remove(struct amba_device *dev)
  801. {
  802. int i;
  803. struct tracectx *t = &tracer;
  804. void __iomem *etm_regs = amba_get_drvdata(dev);
  805. sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
  806. sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
  807. sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
  808. sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
  809. sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
  810. amba_set_drvdata(dev, NULL);
  811. mutex_lock(&t->mutex);
  812. for (i = 0; i < t->etm_regs_count; i++)
  813. if (t->etm_regs[i] == etm_regs)
  814. break;
  815. for (; i < t->etm_regs_count - 1; i++)
  816. t->etm_regs[i] = t->etm_regs[i + 1];
  817. t->etm_regs_count--;
  818. if (!t->etm_regs_count) {
  819. kfree(t->etm_regs);
  820. t->etm_regs = NULL;
  821. }
  822. mutex_unlock(&t->mutex);
  823. iounmap(etm_regs);
  824. amba_release_regions(dev);
  825. return 0;
  826. }
  827. static struct amba_id etm_ids[] = {
  828. {
  829. .id = 0x0003b921,
  830. .mask = 0x0007ffff,
  831. },
  832. {
  833. .id = 0x0003b950,
  834. .mask = 0x0007ffff,
  835. },
  836. { 0, 0 },
  837. };
  838. static struct amba_driver etm_driver = {
  839. .drv = {
  840. .name = "etm",
  841. .owner = THIS_MODULE,
  842. },
  843. .probe = etm_probe,
  844. .remove = etm_remove,
  845. .id_table = etm_ids,
  846. };
  847. static int __init etm_init(void)
  848. {
  849. int retval;
  850. mutex_init(&tracer.mutex);
  851. retval = amba_driver_register(&etb_driver);
  852. if (retval) {
  853. printk(KERN_ERR "Failed to register etb\n");
  854. return retval;
  855. }
  856. retval = amba_driver_register(&etm_driver);
  857. if (retval) {
  858. amba_driver_unregister(&etb_driver);
  859. printk(KERN_ERR "Failed to probe etm\n");
  860. return retval;
  861. }
  862. /* not being able to install this handler is not fatal */
  863. (void)register_sysrq_key('v', &sysrq_etm_op);
  864. return 0;
  865. }
  866. device_initcall(etm_init);