msm_iommu_perfmon.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/string.h>
  16. #include <linux/iommu.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/bitops.h>
  21. #include <linux/debugfs.h>
  22. #include <mach/iommu.h>
  23. #include <mach/iommu_perfmon.h>
  24. static LIST_HEAD(iommu_list);
  25. static struct dentry *msm_iommu_root_debugfs_dir;
  26. static const char *NO_EVENT_CLASS_NAME = "none";
  27. static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
  28. struct event_class {
  29. unsigned int event_number;
  30. const char *desc;
  31. };
  32. static struct event_class pmu_event_classes[] = {
  33. { 0x00, "cycle_count" },
  34. { 0x01, "cycle_count64" },
  35. { 0x08, "tlb_refill" },
  36. { 0x09, "tlb_refill_read" },
  37. { 0x0A, "tlb_refill_write" },
  38. { 0x10, "access" },
  39. { 0x11, "access_read" },
  40. { 0x12, "access_write" },
  41. { 0x80, "full_misses" },
  42. { 0x81, "partial_miss_1lbfb_hit" },
  43. { 0x82, "partial_miss_2lbfb_hit" },
  44. { 0x83, "full_hit" },
  45. { 0x90, "pred_req_full_miss" },
  46. { 0x91, "pred_req_partial_miss_1lbfb_hit" },
  47. { 0x92, "pred_req_partial_miss_2lbfb_hit" },
  48. { 0xb0, "tot_num_miss_axi_htw_read_req" },
  49. { 0xb1, "tot_num_pred_axi_htw_read_req" },
  50. };
  51. static unsigned int iommu_pm_create_sup_cls_str(char **buf,
  52. struct iommu_pmon *pmon)
  53. {
  54. unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
  55. MAX_EVEN_CLASS_NAME_LEN;
  56. unsigned int pos = 0;
  57. unsigned int nevent_cls = pmon->nevent_cls_supported;
  58. *buf = kzalloc(buf_size, GFP_KERNEL);
  59. if (*buf) {
  60. unsigned int j;
  61. int i;
  62. struct event_class *ptr;
  63. size_t array_len = ARRAY_SIZE(pmu_event_classes);
  64. ptr = pmu_event_classes;
  65. for (j = 0; j < nevent_cls; ++j) {
  66. for (i = 0; i < array_len; ++i) {
  67. if (ptr[i].event_number !=
  68. pmon->event_cls_supported[j])
  69. continue;
  70. if (pos < buf_size) {
  71. pos += snprintf(&(*buf)[pos],
  72. buf_size-pos,
  73. "[%u] %s\n",
  74. ptr[i].event_number,
  75. ptr[i].desc);
  76. }
  77. break;
  78. }
  79. }
  80. }
  81. return pos;
  82. }
  83. static int iommu_pm_event_class_supported(struct iommu_pmon *pmon,
  84. int event_class)
  85. {
  86. unsigned int nevent_cls = pmon->nevent_cls_supported;
  87. unsigned int i;
  88. for (i = 0; i < nevent_cls; ++i) {
  89. if (event_class == pmon->event_cls_supported[i])
  90. return event_class;
  91. }
  92. return MSM_IOMMU_PMU_NO_EVENT_CLASS;
  93. }
  94. static const char *iommu_pm_find_event_class_name(int event_class)
  95. {
  96. size_t array_len;
  97. struct event_class *ptr;
  98. int i;
  99. const char *event_class_name = NO_EVENT_CLASS_NAME;
  100. if (event_class < 0)
  101. goto out;
  102. array_len = ARRAY_SIZE(pmu_event_classes);
  103. ptr = pmu_event_classes;
  104. for (i = 0; i < array_len; ++i) {
  105. if (ptr[i].event_number == event_class) {
  106. event_class_name = ptr[i].desc;
  107. break;
  108. }
  109. }
  110. out:
  111. return event_class_name;
  112. }
  113. static int iommu_pm_find_event_class(struct iommu_pmon *pmon,
  114. const char *event_class_name)
  115. {
  116. size_t array_len;
  117. struct event_class *ptr;
  118. int i;
  119. int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
  120. if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
  121. goto out;
  122. array_len = ARRAY_SIZE(pmu_event_classes);
  123. ptr = pmu_event_classes;
  124. for (i = 0; i < array_len; ++i) {
  125. if (strcmp(ptr[i].desc, event_class_name) == 0) {
  126. event_class = ptr[i].event_number;
  127. goto out;
  128. }
  129. }
  130. out:
  131. event_class = iommu_pm_event_class_supported(pmon, event_class);
  132. return event_class;
  133. }
  134. static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
  135. {
  136. list_add(&iommu_pmon->iommu_list, &iommu_list);
  137. }
  138. static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
  139. {
  140. list_del(&iommu_pmon->iommu_list);
  141. }
  142. static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
  143. {
  144. struct iommu_pmon *pmon;
  145. struct iommu_info *info;
  146. struct list_head *ent;
  147. list_for_each(ent, &iommu_list) {
  148. pmon = list_entry(ent, struct iommu_pmon, iommu_list);
  149. info = &pmon->iommu;
  150. if (dev == info->iommu_dev)
  151. return pmon;
  152. }
  153. return NULL;
  154. }
  155. static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
  156. struct iommu_pmon_counter *counter)
  157. {
  158. int event_class;
  159. unsigned int count_no;
  160. struct iommu_info *iommu = &pmon->iommu;
  161. event_class = counter->current_event_class;
  162. count_no = counter->absolute_counter_no;
  163. if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
  164. if (iommu->hw_ops->is_hw_access_OK(pmon)) {
  165. iommu->ops->iommu_lock_acquire(1);
  166. iommu->hw_ops->counter_disable(iommu, counter);
  167. iommu->hw_ops->ovfl_int_disable(iommu, counter);
  168. iommu->hw_ops->set_event_class(pmon, count_no, 0);
  169. iommu->ops->iommu_lock_release(1);
  170. }
  171. counter->overflow_count = 0;
  172. counter->value = 0;
  173. } else {
  174. counter->overflow_count = 0;
  175. counter->value = 0;
  176. if (iommu->hw_ops->is_hw_access_OK(pmon)) {
  177. iommu->ops->iommu_lock_acquire(1);
  178. iommu->hw_ops->set_event_class(pmon, count_no,
  179. event_class);
  180. iommu->hw_ops->ovfl_int_enable(iommu, counter);
  181. iommu->hw_ops->counter_enable(iommu, counter);
  182. iommu->ops->iommu_lock_release(1);
  183. }
  184. }
  185. }
  186. static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
  187. {
  188. unsigned int i;
  189. unsigned int j;
  190. for (i = 0; i < pmon->num_groups; ++i) {
  191. struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
  192. for (j = 0; j < cnt_grp->num_counters; ++j) {
  193. cnt_grp->counters[j].value = 0;
  194. cnt_grp->counters[j].overflow_count = 0;
  195. }
  196. }
  197. }
  198. static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
  199. {
  200. unsigned int i;
  201. unsigned int j;
  202. for (i = 0; i < pmon->num_groups; ++i) {
  203. struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
  204. for (j = 0; j < cnt_grp->num_counters; ++j)
  205. iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
  206. }
  207. }
  208. static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
  209. {
  210. unsigned int i;
  211. unsigned int j;
  212. struct iommu_info *iommu = &pmon->iommu;
  213. for (i = 0; i < pmon->num_groups; ++i) {
  214. struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
  215. for (j = 0; j < cnt_grp->num_counters; ++j) {
  216. struct iommu_pmon_counter *counter;
  217. counter = &cnt_grp->counters[j];
  218. counter->value = iommu->hw_ops->read_counter(counter);
  219. }
  220. }
  221. }
  222. static void iommu_pm_on(struct iommu_pmon *pmon)
  223. {
  224. unsigned int i;
  225. struct iommu_info *iommu = &pmon->iommu;
  226. struct msm_iommu_drvdata *iommu_drvdata =
  227. dev_get_drvdata(iommu->iommu_dev);
  228. iommu->ops->iommu_power_on(iommu_drvdata);
  229. iommu->ops->iommu_bus_vote(iommu_drvdata, 1);
  230. iommu->ops->iommu_clk_on(iommu_drvdata);
  231. /* Reset counters in HW */
  232. iommu->ops->iommu_lock_acquire(1);
  233. iommu->hw_ops->reset_counters(&pmon->iommu);
  234. iommu->ops->iommu_lock_release(1);
  235. /* Reset SW counters */
  236. iommu_pm_reset_counts(pmon);
  237. pmon->enabled = 1;
  238. iommu_pm_set_all_counters(pmon);
  239. iommu->ops->iommu_lock_acquire(1);
  240. /* enable all counter group */
  241. for (i = 0; i < pmon->num_groups; ++i)
  242. iommu->hw_ops->grp_enable(iommu, i);
  243. /* enable global counters */
  244. iommu->hw_ops->enable_pm(iommu);
  245. iommu->ops->iommu_lock_release(1);
  246. pr_info("%s: TLB performance monitoring turned ON\n",
  247. pmon->iommu.iommu_name);
  248. }
  249. static void iommu_pm_off(struct iommu_pmon *pmon)
  250. {
  251. unsigned int i;
  252. struct iommu_info *iommu = &pmon->iommu;
  253. struct msm_iommu_drvdata *iommu_drvdata =
  254. dev_get_drvdata(iommu->iommu_dev);
  255. pmon->enabled = 0;
  256. iommu->ops->iommu_lock_acquire(1);
  257. /* disable global counters */
  258. iommu->hw_ops->disable_pm(iommu);
  259. /* Check if we overflowed just before turning off pmon */
  260. iommu->hw_ops->check_for_overflow(pmon);
  261. /* disable all counter group */
  262. for (i = 0; i < pmon->num_groups; ++i)
  263. iommu->hw_ops->grp_disable(iommu, i);
  264. /* Update cached copy of counters before turning off power */
  265. iommu_pm_read_all_counters(pmon);
  266. iommu->ops->iommu_lock_release(1);
  267. iommu->ops->iommu_clk_off(iommu_drvdata);
  268. iommu->ops->iommu_bus_vote(iommu_drvdata, 0);
  269. iommu->ops->iommu_power_off(iommu_drvdata);
  270. pr_info("%s: TLB performance monitoring turned OFF\n",
  271. pmon->iommu.iommu_name);
  272. }
  273. static int iommu_pm_debug_open(struct inode *inode, struct file *file)
  274. {
  275. file->private_data = inode->i_private;
  276. return 0;
  277. }
  278. static ssize_t iommu_pm_count_value_read(struct file *fp,
  279. char __user *user_buff,
  280. size_t count, loff_t *pos)
  281. {
  282. size_t rd_cnt;
  283. unsigned long long full_count;
  284. struct iommu_pmon_counter *counter = fp->private_data;
  285. struct iommu_pmon *pmon = counter->cnt_group->pmon;
  286. struct iommu_info *iommu = &pmon->iommu;
  287. char buf[50];
  288. size_t len;
  289. mutex_lock(&pmon->lock);
  290. if (iommu->hw_ops->is_hw_access_OK(pmon)) {
  291. iommu->ops->iommu_lock_acquire(1);
  292. counter->value = iommu->hw_ops->read_counter(counter);
  293. iommu->ops->iommu_lock_release(1);
  294. }
  295. full_count = (unsigned long long) counter->value +
  296. ((unsigned long long)counter->overflow_count *
  297. 0x100000000ULL);
  298. len = snprintf(buf, 50, "%llu\n", full_count);
  299. rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
  300. mutex_unlock(&pmon->lock);
  301. return rd_cnt;
  302. }
  303. static const struct file_operations cnt_value_file_ops = {
  304. .open = iommu_pm_debug_open,
  305. .read = iommu_pm_count_value_read,
  306. };
  307. static ssize_t iommu_pm_event_class_read(struct file *fp,
  308. char __user *user_buff,
  309. size_t count, loff_t *pos)
  310. {
  311. size_t rd_cnt;
  312. struct iommu_pmon_counter *counter = fp->private_data;
  313. struct iommu_pmon *pmon = counter->cnt_group->pmon;
  314. char buf[50];
  315. const char *event_class_name;
  316. size_t len;
  317. mutex_lock(&pmon->lock);
  318. event_class_name = iommu_pm_find_event_class_name(
  319. counter->current_event_class);
  320. len = snprintf(buf, 50, "%s\n", event_class_name);
  321. rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
  322. mutex_unlock(&pmon->lock);
  323. return rd_cnt;
  324. }
  325. static ssize_t iommu_pm_event_class_write(struct file *fp,
  326. const char __user *user_buff,
  327. size_t count, loff_t *pos)
  328. {
  329. size_t wr_cnt;
  330. char buf[50];
  331. size_t buf_size = sizeof(buf);
  332. struct iommu_pmon_counter *counter = fp->private_data;
  333. struct iommu_pmon *pmon = counter->cnt_group->pmon;
  334. int current_event_class;
  335. if ((count + *pos) >= buf_size)
  336. return -EINVAL;
  337. mutex_lock(&pmon->lock);
  338. current_event_class = counter->current_event_class;
  339. wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
  340. if (wr_cnt >= 1) {
  341. int rv;
  342. long value;
  343. buf[wr_cnt-1] = '\0';
  344. rv = kstrtol(buf, 10, &value);
  345. if (!rv) {
  346. counter->current_event_class =
  347. iommu_pm_find_event_class(pmon,
  348. iommu_pm_find_event_class_name(value));
  349. } else {
  350. counter->current_event_class =
  351. iommu_pm_find_event_class(pmon, buf);
  352. } }
  353. if (current_event_class != counter->current_event_class)
  354. iommu_pm_set_event_type(pmon, counter);
  355. mutex_unlock(&pmon->lock);
  356. return wr_cnt;
  357. }
  358. static const struct file_operations event_class_file_ops = {
  359. .open = iommu_pm_debug_open,
  360. .read = iommu_pm_event_class_read,
  361. .write = iommu_pm_event_class_write,
  362. };
  363. static ssize_t iommu_reset_counters_write(struct file *fp,
  364. const char __user *user_buff,
  365. size_t count, loff_t *pos)
  366. {
  367. size_t wr_cnt;
  368. char buf[10];
  369. size_t buf_size = sizeof(buf);
  370. struct iommu_pmon *pmon = fp->private_data;
  371. struct iommu_info *iommu = &pmon->iommu;
  372. if ((count + *pos) >= buf_size)
  373. return -EINVAL;
  374. mutex_lock(&pmon->lock);
  375. wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
  376. if (wr_cnt >= 1) {
  377. unsigned long cmd = 0;
  378. int rv;
  379. buf[wr_cnt-1] = '\0';
  380. rv = kstrtoul(buf, 10, &cmd);
  381. if (!rv && (cmd == 1)) {
  382. if (iommu->hw_ops->is_hw_access_OK(pmon)) {
  383. iommu->ops->iommu_lock_acquire(1);
  384. iommu->hw_ops->reset_counters(&pmon->iommu);
  385. iommu->ops->iommu_lock_release(1);
  386. }
  387. iommu_pm_reset_counts(pmon);
  388. pr_info("TLB performance counters reset\n");
  389. } else {
  390. pr_err("Unknown performance monitor command: %lu\n",
  391. cmd);
  392. }
  393. }
  394. mutex_unlock(&pmon->lock);
  395. return wr_cnt;
  396. }
  397. static const struct file_operations reset_file_ops = {
  398. .open = iommu_pm_debug_open,
  399. .write = iommu_reset_counters_write,
  400. };
  401. static ssize_t iommu_pm_enable_counters_read(struct file *fp,
  402. char __user *user_buff,
  403. size_t count, loff_t *pos)
  404. {
  405. size_t rd_cnt;
  406. char buf[5];
  407. size_t len;
  408. struct iommu_pmon *pmon = fp->private_data;
  409. mutex_lock(&pmon->lock);
  410. len = snprintf(buf, 5, "%u\n", pmon->enabled);
  411. rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
  412. mutex_unlock(&pmon->lock);
  413. return rd_cnt;
  414. }
  415. static ssize_t iommu_pm_enable_counters_write(struct file *fp,
  416. const char __user *user_buff,
  417. size_t count, loff_t *pos)
  418. {
  419. size_t wr_cnt;
  420. char buf[10];
  421. size_t buf_size = sizeof(buf);
  422. struct iommu_pmon *pmon = fp->private_data;
  423. if ((count + *pos) >= buf_size)
  424. return -EINVAL;
  425. mutex_lock(&pmon->lock);
  426. wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
  427. if (wr_cnt >= 1) {
  428. unsigned long cmd;
  429. int rv;
  430. buf[wr_cnt-1] = '\0';
  431. rv = kstrtoul(buf, 10, &cmd);
  432. if (!rv && (cmd < 2)) {
  433. if (pmon->enabled == 1 && cmd == 0) {
  434. if (pmon->iommu.always_on ||
  435. pmon->iommu_attach_count > 0)
  436. iommu_pm_off(pmon);
  437. } else if (pmon->enabled == 0 && cmd == 1) {
  438. /* We can only turn on perf. monitoring if
  439. * iommu is attached (if not always on).
  440. * Delay turning on perf. monitoring until
  441. * we are attached.
  442. */
  443. if (pmon->iommu.always_on ||
  444. pmon->iommu_attach_count > 0)
  445. iommu_pm_on(pmon);
  446. else
  447. pmon->enabled = 1;
  448. }
  449. } else {
  450. pr_err("Unknown performance monitor command: %lu\n",
  451. cmd);
  452. }
  453. }
  454. mutex_unlock(&pmon->lock);
  455. return wr_cnt;
  456. }
  457. static const struct file_operations event_enable_file_ops = {
  458. .open = iommu_pm_debug_open,
  459. .read = iommu_pm_enable_counters_read,
  460. .write = iommu_pm_enable_counters_write,
  461. };
  462. static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
  463. char __user *user_buff,
  464. size_t count, loff_t *pos)
  465. {
  466. size_t rd_cnt = 0;
  467. struct iommu_pmon *pmon = fp->private_data;
  468. char *buf;
  469. size_t len;
  470. mutex_lock(&pmon->lock);
  471. len = iommu_pm_create_sup_cls_str(&buf, pmon);
  472. if (buf) {
  473. rd_cnt = simple_read_from_buffer(user_buff, count, pos,
  474. buf, len);
  475. kfree(buf);
  476. }
  477. mutex_unlock(&pmon->lock);
  478. return rd_cnt;
  479. }
  480. static const struct file_operations available_event_cls_file_ops = {
  481. .open = iommu_pm_debug_open,
  482. .read = iommu_pm_avail_event_cls_read,
  483. };
  484. static int iommu_pm_create_grp_debugfs_counters_hierarchy(
  485. struct iommu_pmon_cnt_group *cnt_grp,
  486. unsigned int *abs_counter_no)
  487. {
  488. int ret = 0;
  489. int j;
  490. char name[20];
  491. for (j = 0; j < cnt_grp->num_counters; ++j) {
  492. struct dentry *grp_dir = cnt_grp->group_dir;
  493. struct dentry *counter_dir;
  494. cnt_grp->counters[j].cnt_group = cnt_grp;
  495. cnt_grp->counters[j].counter_no = j;
  496. cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
  497. (*abs_counter_no)++;
  498. cnt_grp->counters[j].value = 0;
  499. cnt_grp->counters[j].overflow_count = 0;
  500. cnt_grp->counters[j].current_event_class =
  501. MSM_IOMMU_PMU_NO_EVENT_CLASS;
  502. snprintf(name, 20, "counter%u", j);
  503. counter_dir = debugfs_create_dir(name, grp_dir);
  504. if (IS_ERR_OR_NULL(counter_dir)) {
  505. pr_err("unable to create counter debugfs dir %s\n",
  506. name);
  507. ret = -ENOMEM;
  508. goto out;
  509. }
  510. cnt_grp->counters[j].counter_dir = counter_dir;
  511. if (!debugfs_create_file("value", 0644, counter_dir,
  512. &cnt_grp->counters[j],
  513. &cnt_value_file_ops)) {
  514. ret = -EIO;
  515. goto out;
  516. }
  517. if (!debugfs_create_file("current_event_class", 0644,
  518. counter_dir, &cnt_grp->counters[j],
  519. &event_class_file_ops)) {
  520. ret = -EIO;
  521. goto out;
  522. }
  523. }
  524. out:
  525. return ret;
  526. }
  527. static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
  528. struct iommu_pmon *pmon_entry)
  529. {
  530. int i;
  531. int ret = 0;
  532. char name[20];
  533. unsigned int abs_counter_no = 0;
  534. for (i = 0; i < pmon_entry->num_groups; ++i) {
  535. pmon_entry->cnt_grp[i].pmon = pmon_entry;
  536. pmon_entry->cnt_grp[i].grp_no = i;
  537. pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
  538. pmon_entry->cnt_grp[i].counters =
  539. kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
  540. * pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
  541. if (!pmon_entry->cnt_grp[i].counters) {
  542. pr_err("Unable to allocate memory for counters\n");
  543. ret = -ENOMEM;
  544. goto out;
  545. }
  546. snprintf(name, 20, "group%u", i);
  547. pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
  548. pmon_entry->iommu_dir);
  549. if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
  550. pr_err("unable to create group debugfs dir %s\n", name);
  551. ret = -ENOMEM;
  552. goto out;
  553. }
  554. ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
  555. &pmon_entry->cnt_grp[i],
  556. &abs_counter_no);
  557. if (ret)
  558. goto out;
  559. }
  560. out:
  561. return ret;
  562. }
  563. int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
  564. {
  565. int ret = 0;
  566. struct iommu_info *iommu = &pmon_entry->iommu;
  567. int i;
  568. if (!iommu->ops || !iommu->iommu_name || !iommu->base
  569. || !iommu->iommu_dev) {
  570. ret = -EINVAL;
  571. goto out;
  572. }
  573. if (!msm_iommu_root_debugfs_dir) {
  574. msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
  575. if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
  576. pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
  577. ret = -EIO;
  578. goto out;
  579. }
  580. }
  581. pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
  582. * pmon_entry->num_groups, GFP_KERNEL);
  583. if (!pmon_entry->cnt_grp) {
  584. pr_err("Unable to allocate memory for counter groups\n");
  585. ret = -ENOMEM;
  586. goto file_err;
  587. }
  588. pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
  589. msm_iommu_root_debugfs_dir);
  590. if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
  591. pr_err("unable to create iommu debugfs dir %s\n",
  592. iommu->iommu_name);
  593. ret = -ENOMEM;
  594. goto free_mem;
  595. }
  596. if (!debugfs_create_file("reset_counters", 0644,
  597. pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
  598. ret = -EIO;
  599. goto free_mem;
  600. }
  601. if (!debugfs_create_file("enable_counters", 0644,
  602. pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
  603. ret = -EIO;
  604. goto free_mem;
  605. }
  606. if (!debugfs_create_file("available_event_classes", 0644,
  607. pmon_entry->iommu_dir, pmon_entry,
  608. &available_event_cls_file_ops)) {
  609. ret = -EIO;
  610. goto free_mem;
  611. }
  612. ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
  613. if (ret)
  614. goto free_mem;
  615. iommu->hw_ops->initialize_hw(pmon_entry);
  616. if (iommu->evt_irq > 0) {
  617. ret = request_threaded_irq(iommu->evt_irq, NULL,
  618. iommu->hw_ops->evt_ovfl_int_handler,
  619. IRQF_ONESHOT | IRQF_SHARED,
  620. "msm_iommu_pmon_nonsecure_irq", pmon_entry);
  621. if (ret) {
  622. pr_err("Request IRQ %d failed with ret=%d\n",
  623. iommu->evt_irq,
  624. ret);
  625. goto free_mem;
  626. }
  627. } else {
  628. pr_info("%s: Overflow interrupt not available\n", __func__);
  629. }
  630. dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
  631. goto out;
  632. free_mem:
  633. if (pmon_entry->cnt_grp) {
  634. for (i = 0; i < pmon_entry->num_groups; ++i) {
  635. kfree(pmon_entry->cnt_grp[i].counters);
  636. pmon_entry->cnt_grp[i].counters = 0;
  637. }
  638. }
  639. kfree(pmon_entry->cnt_grp);
  640. pmon_entry->cnt_grp = 0;
  641. file_err:
  642. debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
  643. out:
  644. return ret;
  645. }
  646. EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
  647. void msm_iommu_pm_iommu_unregister(struct device *dev)
  648. {
  649. int i;
  650. struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
  651. if (!pmon_entry)
  652. return;
  653. free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
  654. if (!pmon_entry)
  655. goto remove_debugfs;
  656. if (pmon_entry->cnt_grp) {
  657. for (i = 0; i < pmon_entry->num_groups; ++i)
  658. kfree(pmon_entry->cnt_grp[i].counters);
  659. }
  660. kfree(pmon_entry->cnt_grp);
  661. remove_debugfs:
  662. debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
  663. return;
  664. }
  665. EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
  666. struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
  667. {
  668. struct iommu_pmon *pmon_entry;
  669. struct iommu_info *info;
  670. pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
  671. if (!pmon_entry)
  672. return NULL;
  673. info = &pmon_entry->iommu;
  674. info->iommu_dev = dev;
  675. mutex_init(&pmon_entry->lock);
  676. iommu_pm_add_to_iommu_list(pmon_entry);
  677. return pmon_entry;
  678. }
  679. EXPORT_SYMBOL(msm_iommu_pm_alloc);
  680. void msm_iommu_pm_free(struct device *dev)
  681. {
  682. struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
  683. if (pmon)
  684. iommu_pm_del_from_iommu_list(pmon);
  685. }
  686. EXPORT_SYMBOL(msm_iommu_pm_free);
  687. void msm_iommu_attached(struct device *dev)
  688. {
  689. struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
  690. if (pmon) {
  691. mutex_lock(&pmon->lock);
  692. ++pmon->iommu_attach_count;
  693. if (pmon->iommu_attach_count == 1) {
  694. /* If perf. mon was enabled before we attached we do
  695. * the actual enabling after we attach.
  696. */
  697. if (pmon->enabled && !pmon->iommu.always_on)
  698. iommu_pm_on(pmon);
  699. }
  700. mutex_unlock(&pmon->lock);
  701. }
  702. }
  703. EXPORT_SYMBOL(msm_iommu_attached);
  704. void msm_iommu_detached(struct device *dev)
  705. {
  706. struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
  707. if (pmon) {
  708. mutex_lock(&pmon->lock);
  709. if (pmon->iommu_attach_count == 1) {
  710. /* If perf. mon is still enabled we have to disable
  711. * before we do the detach if iommu is not always on.
  712. */
  713. if (pmon->enabled && !pmon->iommu.always_on)
  714. iommu_pm_off(pmon);
  715. }
  716. BUG_ON(pmon->iommu_attach_count == 0);
  717. --pmon->iommu_attach_count;
  718. mutex_unlock(&pmon->lock);
  719. }
  720. }
  721. EXPORT_SYMBOL(msm_iommu_detached);