arm_pmu.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. #undef DEBUG
  2. /*
  3. * ARM performance counter support.
  4. *
  5. * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6. * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  7. *
  8. * This code is based on the sparc64 perf event code, which is in turn based
  9. * on the x86 code.
  10. */
  11. #define pr_fmt(fmt) "hw perfevents: " fmt
  12. #include <linux/bitmap.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/cpu_pm.h>
  15. #include <linux/export.h>
  16. #include <linux/kernel.h>
  17. #include <linux/of_device.h>
  18. #include <linux/perf/arm_pmu.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/slab.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/irq.h>
  23. #include <linux/irqdesc.h>
  24. #include <asm/cputype.h>
  25. #include <asm/irq_regs.h>
  26. static int
  27. armpmu_map_cache_event(const unsigned (*cache_map)
  28. [PERF_COUNT_HW_CACHE_MAX]
  29. [PERF_COUNT_HW_CACHE_OP_MAX]
  30. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  31. u64 config)
  32. {
  33. unsigned int cache_type, cache_op, cache_result, ret;
  34. cache_type = (config >> 0) & 0xff;
  35. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  36. return -EINVAL;
  37. cache_op = (config >> 8) & 0xff;
  38. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  39. return -EINVAL;
  40. cache_result = (config >> 16) & 0xff;
  41. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  42. return -EINVAL;
  43. ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
  44. if (ret == CACHE_OP_UNSUPPORTED)
  45. return -ENOENT;
  46. return ret;
  47. }
  48. static int
  49. armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
  50. {
  51. int mapping;
  52. if (config >= PERF_COUNT_HW_MAX)
  53. return -EINVAL;
  54. mapping = (*event_map)[config];
  55. return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
  56. }
  57. static int
  58. armpmu_map_raw_event(u32 raw_event_mask, u64 config)
  59. {
  60. return (int)(config & raw_event_mask);
  61. }
  62. int
  63. armpmu_map_event(struct perf_event *event,
  64. const unsigned (*event_map)[PERF_COUNT_HW_MAX],
  65. const unsigned (*cache_map)
  66. [PERF_COUNT_HW_CACHE_MAX]
  67. [PERF_COUNT_HW_CACHE_OP_MAX]
  68. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  69. u32 raw_event_mask)
  70. {
  71. u64 config = event->attr.config;
  72. int type = event->attr.type;
  73. if (type == event->pmu->type)
  74. return armpmu_map_raw_event(raw_event_mask, config);
  75. switch (type) {
  76. case PERF_TYPE_HARDWARE:
  77. return armpmu_map_hw_event(event_map, config);
  78. case PERF_TYPE_HW_CACHE:
  79. return armpmu_map_cache_event(cache_map, config);
  80. case PERF_TYPE_RAW:
  81. return armpmu_map_raw_event(raw_event_mask, config);
  82. }
  83. return -ENOENT;
  84. }
  85. int armpmu_event_set_period(struct perf_event *event)
  86. {
  87. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  88. struct hw_perf_event *hwc = &event->hw;
  89. s64 left = local64_read(&hwc->period_left);
  90. s64 period = hwc->sample_period;
  91. int ret = 0;
  92. if (unlikely(left <= -period)) {
  93. left = period;
  94. local64_set(&hwc->period_left, left);
  95. hwc->last_period = period;
  96. ret = 1;
  97. }
  98. if (unlikely(left <= 0)) {
  99. left += period;
  100. local64_set(&hwc->period_left, left);
  101. hwc->last_period = period;
  102. ret = 1;
  103. }
  104. /*
  105. * Limit the maximum period to prevent the counter value
  106. * from overtaking the one we are about to program. In
  107. * effect we are reducing max_period to account for
  108. * interrupt latency (and we are being very conservative).
  109. */
  110. if (left > (armpmu->max_period >> 1))
  111. left = armpmu->max_period >> 1;
  112. local64_set(&hwc->prev_count, (u64)-left);
  113. armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
  114. perf_event_update_userpage(event);
  115. return ret;
  116. }
  117. u64 armpmu_event_update(struct perf_event *event)
  118. {
  119. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  120. struct hw_perf_event *hwc = &event->hw;
  121. u64 delta, prev_raw_count, new_raw_count;
  122. again:
  123. prev_raw_count = local64_read(&hwc->prev_count);
  124. new_raw_count = armpmu->read_counter(event);
  125. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  126. new_raw_count) != prev_raw_count)
  127. goto again;
  128. delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
  129. local64_add(delta, &event->count);
  130. local64_sub(delta, &hwc->period_left);
  131. return new_raw_count;
  132. }
  133. static void
  134. armpmu_read(struct perf_event *event)
  135. {
  136. armpmu_event_update(event);
  137. }
  138. static void
  139. armpmu_stop(struct perf_event *event, int flags)
  140. {
  141. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  142. struct hw_perf_event *hwc = &event->hw;
  143. /*
  144. * ARM pmu always has to update the counter, so ignore
  145. * PERF_EF_UPDATE, see comments in armpmu_start().
  146. */
  147. if (!(hwc->state & PERF_HES_STOPPED)) {
  148. armpmu->disable(event);
  149. armpmu_event_update(event);
  150. hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  151. }
  152. }
  153. static void armpmu_start(struct perf_event *event, int flags)
  154. {
  155. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  156. struct hw_perf_event *hwc = &event->hw;
  157. /*
  158. * ARM pmu always has to reprogram the period, so ignore
  159. * PERF_EF_RELOAD, see the comment below.
  160. */
  161. if (flags & PERF_EF_RELOAD)
  162. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  163. hwc->state = 0;
  164. /*
  165. * Set the period again. Some counters can't be stopped, so when we
  166. * were stopped we simply disabled the IRQ source and the counter
  167. * may have been left counting. If we don't do this step then we may
  168. * get an interrupt too soon or *way* too late if the overflow has
  169. * happened since disabling.
  170. */
  171. armpmu_event_set_period(event);
  172. armpmu->enable(event);
  173. }
  174. static void
  175. armpmu_del(struct perf_event *event, int flags)
  176. {
  177. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  178. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  179. struct hw_perf_event *hwc = &event->hw;
  180. int idx = hwc->idx;
  181. armpmu_stop(event, PERF_EF_UPDATE);
  182. hw_events->events[idx] = NULL;
  183. clear_bit(idx, hw_events->used_mask);
  184. if (armpmu->clear_event_idx)
  185. armpmu->clear_event_idx(hw_events, event);
  186. perf_event_update_userpage(event);
  187. }
  188. static int
  189. armpmu_add(struct perf_event *event, int flags)
  190. {
  191. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  192. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  193. struct hw_perf_event *hwc = &event->hw;
  194. int idx;
  195. int err = 0;
  196. /* An event following a process won't be stopped earlier */
  197. if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
  198. return -ENOENT;
  199. perf_pmu_disable(event->pmu);
  200. /* If we don't have a space for the counter then finish early. */
  201. idx = armpmu->get_event_idx(hw_events, event);
  202. if (idx < 0) {
  203. err = idx;
  204. goto out;
  205. }
  206. /*
  207. * If there is an event in the counter we are going to use then make
  208. * sure it is disabled.
  209. */
  210. event->hw.idx = idx;
  211. armpmu->disable(event);
  212. hw_events->events[idx] = event;
  213. hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  214. if (flags & PERF_EF_START)
  215. armpmu_start(event, PERF_EF_RELOAD);
  216. /* Propagate our changes to the userspace mapping. */
  217. perf_event_update_userpage(event);
  218. out:
  219. perf_pmu_enable(event->pmu);
  220. return err;
  221. }
  222. static int
  223. validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
  224. struct perf_event *event)
  225. {
  226. struct arm_pmu *armpmu;
  227. if (is_software_event(event))
  228. return 1;
  229. /*
  230. * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
  231. * core perf code won't check that the pmu->ctx == leader->ctx
  232. * until after pmu->event_init(event).
  233. */
  234. if (event->pmu != pmu)
  235. return 0;
  236. if (event->state < PERF_EVENT_STATE_OFF)
  237. return 1;
  238. if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
  239. return 1;
  240. armpmu = to_arm_pmu(event->pmu);
  241. return armpmu->get_event_idx(hw_events, event) >= 0;
  242. }
  243. static int
  244. validate_group(struct perf_event *event)
  245. {
  246. struct perf_event *sibling, *leader = event->group_leader;
  247. struct pmu_hw_events fake_pmu;
  248. /*
  249. * Initialise the fake PMU. We only need to populate the
  250. * used_mask for the purposes of validation.
  251. */
  252. memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
  253. if (!validate_event(event->pmu, &fake_pmu, leader))
  254. return -EINVAL;
  255. list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
  256. if (!validate_event(event->pmu, &fake_pmu, sibling))
  257. return -EINVAL;
  258. }
  259. if (!validate_event(event->pmu, &fake_pmu, event))
  260. return -EINVAL;
  261. return 0;
  262. }
  263. static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
  264. {
  265. struct platform_device *pdev = armpmu->plat_device;
  266. return pdev ? dev_get_platdata(&pdev->dev) : NULL;
  267. }
  268. static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
  269. {
  270. struct arm_pmu *armpmu;
  271. struct arm_pmu_platdata *plat;
  272. int ret;
  273. u64 start_clock, finish_clock;
  274. /*
  275. * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
  276. * the handlers expect a struct arm_pmu*. The percpu_irq framework will
  277. * do any necessary shifting, we just need to perform the first
  278. * dereference.
  279. */
  280. armpmu = *(void **)dev;
  281. plat = armpmu_get_platdata(armpmu);
  282. start_clock = sched_clock();
  283. if (plat && plat->handle_irq)
  284. ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
  285. else
  286. ret = armpmu->handle_irq(irq, armpmu);
  287. finish_clock = sched_clock();
  288. perf_sample_event_took(finish_clock - start_clock);
  289. return ret;
  290. }
  291. static void
  292. armpmu_release_hardware(struct arm_pmu *armpmu)
  293. {
  294. armpmu->free_irq(armpmu);
  295. }
  296. static int
  297. armpmu_reserve_hardware(struct arm_pmu *armpmu)
  298. {
  299. int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
  300. if (err) {
  301. armpmu_release_hardware(armpmu);
  302. return err;
  303. }
  304. return 0;
  305. }
  306. static void
  307. hw_perf_event_destroy(struct perf_event *event)
  308. {
  309. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  310. atomic_t *active_events = &armpmu->active_events;
  311. struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
  312. if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
  313. armpmu_release_hardware(armpmu);
  314. mutex_unlock(pmu_reserve_mutex);
  315. }
  316. }
  317. static int
  318. event_requires_mode_exclusion(struct perf_event_attr *attr)
  319. {
  320. return attr->exclude_idle || attr->exclude_user ||
  321. attr->exclude_kernel || attr->exclude_hv;
  322. }
  323. static int
  324. __hw_perf_event_init(struct perf_event *event)
  325. {
  326. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  327. struct hw_perf_event *hwc = &event->hw;
  328. int mapping;
  329. mapping = armpmu->map_event(event);
  330. if (mapping < 0) {
  331. pr_debug("event %x:%llx not supported\n", event->attr.type,
  332. event->attr.config);
  333. return mapping;
  334. }
  335. /*
  336. * We don't assign an index until we actually place the event onto
  337. * hardware. Use -1 to signify that we haven't decided where to put it
  338. * yet. For SMP systems, each core has it's own PMU so we can't do any
  339. * clever allocation or constraints checking at this point.
  340. */
  341. hwc->idx = -1;
  342. hwc->config_base = 0;
  343. hwc->config = 0;
  344. hwc->event_base = 0;
  345. /*
  346. * Check whether we need to exclude the counter from certain modes.
  347. */
  348. if ((!armpmu->set_event_filter ||
  349. armpmu->set_event_filter(hwc, &event->attr)) &&
  350. event_requires_mode_exclusion(&event->attr)) {
  351. pr_debug("ARM performance counters do not support "
  352. "mode exclusion\n");
  353. return -EOPNOTSUPP;
  354. }
  355. /*
  356. * Store the event encoding into the config_base field.
  357. */
  358. hwc->config_base |= (unsigned long)mapping;
  359. if (!is_sampling_event(event)) {
  360. /*
  361. * For non-sampling runs, limit the sample_period to half
  362. * of the counter width. That way, the new counter value
  363. * is far less likely to overtake the previous one unless
  364. * you have some serious IRQ latency issues.
  365. */
  366. hwc->sample_period = armpmu->max_period >> 1;
  367. hwc->last_period = hwc->sample_period;
  368. local64_set(&hwc->period_left, hwc->sample_period);
  369. }
  370. if (event->group_leader != event) {
  371. if (validate_group(event) != 0)
  372. return -EINVAL;
  373. }
  374. return 0;
  375. }
  376. static int armpmu_event_init(struct perf_event *event)
  377. {
  378. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  379. int err = 0;
  380. atomic_t *active_events = &armpmu->active_events;
  381. /*
  382. * Reject CPU-affine events for CPUs that are of a different class to
  383. * that which this PMU handles. Process-following events (where
  384. * event->cpu == -1) can be migrated between CPUs, and thus we have to
  385. * reject them later (in armpmu_add) if they're scheduled on a
  386. * different class of CPU.
  387. */
  388. if (event->cpu != -1 &&
  389. !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
  390. return -ENOENT;
  391. /* does not support taken branch sampling */
  392. if (has_branch_stack(event))
  393. return -EOPNOTSUPP;
  394. if (armpmu->map_event(event) == -ENOENT)
  395. return -ENOENT;
  396. event->destroy = hw_perf_event_destroy;
  397. if (!atomic_inc_not_zero(active_events)) {
  398. mutex_lock(&armpmu->reserve_mutex);
  399. if (atomic_read(active_events) == 0)
  400. err = armpmu_reserve_hardware(armpmu);
  401. if (!err)
  402. atomic_inc(active_events);
  403. mutex_unlock(&armpmu->reserve_mutex);
  404. }
  405. if (err)
  406. return err;
  407. err = __hw_perf_event_init(event);
  408. if (err)
  409. hw_perf_event_destroy(event);
  410. return err;
  411. }
  412. static void armpmu_enable(struct pmu *pmu)
  413. {
  414. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  415. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  416. int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
  417. /* For task-bound events we may be called on other CPUs */
  418. if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
  419. return;
  420. if (enabled)
  421. armpmu->start(armpmu);
  422. }
  423. static void armpmu_disable(struct pmu *pmu)
  424. {
  425. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  426. /* For task-bound events we may be called on other CPUs */
  427. if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
  428. return;
  429. armpmu->stop(armpmu);
  430. }
  431. /*
  432. * In heterogeneous systems, events are specific to a particular
  433. * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
  434. * the same microarchitecture.
  435. */
  436. static int armpmu_filter_match(struct perf_event *event)
  437. {
  438. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  439. unsigned int cpu = smp_processor_id();
  440. return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
  441. }
  442. static ssize_t armpmu_cpumask_show(struct device *dev,
  443. struct device_attribute *attr, char *buf)
  444. {
  445. struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
  446. return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
  447. }
  448. static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
  449. static struct attribute *armpmu_common_attrs[] = {
  450. &dev_attr_cpus.attr,
  451. NULL,
  452. };
  453. static struct attribute_group armpmu_common_attr_group = {
  454. .attrs = armpmu_common_attrs,
  455. };
  456. static void armpmu_init(struct arm_pmu *armpmu)
  457. {
  458. atomic_set(&armpmu->active_events, 0);
  459. mutex_init(&armpmu->reserve_mutex);
  460. armpmu->pmu = (struct pmu) {
  461. .pmu_enable = armpmu_enable,
  462. .pmu_disable = armpmu_disable,
  463. .event_init = armpmu_event_init,
  464. .add = armpmu_add,
  465. .del = armpmu_del,
  466. .start = armpmu_start,
  467. .stop = armpmu_stop,
  468. .read = armpmu_read,
  469. .filter_match = armpmu_filter_match,
  470. .attr_groups = armpmu->attr_groups,
  471. };
  472. armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
  473. &armpmu_common_attr_group;
  474. }
  475. /* Set at runtime when we know what CPU type we are. */
  476. static struct arm_pmu *__oprofile_cpu_pmu;
  477. /*
  478. * Despite the names, these two functions are CPU-specific and are used
  479. * by the OProfile/perf code.
  480. */
  481. const char *perf_pmu_name(void)
  482. {
  483. if (!__oprofile_cpu_pmu)
  484. return NULL;
  485. return __oprofile_cpu_pmu->name;
  486. }
  487. EXPORT_SYMBOL_GPL(perf_pmu_name);
  488. int perf_num_counters(void)
  489. {
  490. int max_events = 0;
  491. if (__oprofile_cpu_pmu != NULL)
  492. max_events = __oprofile_cpu_pmu->num_events;
  493. return max_events;
  494. }
  495. EXPORT_SYMBOL_GPL(perf_num_counters);
  496. static void cpu_pmu_enable_percpu_irq(void *data)
  497. {
  498. int irq = *(int *)data;
  499. enable_percpu_irq(irq, IRQ_TYPE_NONE);
  500. }
  501. static void cpu_pmu_disable_percpu_irq(void *data)
  502. {
  503. int irq = *(int *)data;
  504. disable_percpu_irq(irq);
  505. }
  506. static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
  507. {
  508. int i, irq, irqs;
  509. struct platform_device *pmu_device = cpu_pmu->plat_device;
  510. struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
  511. irqs = min(pmu_device->num_resources, num_possible_cpus());
  512. irq = platform_get_irq(pmu_device, 0);
  513. if (irq > 0 && irq_is_percpu(irq)) {
  514. on_each_cpu_mask(&cpu_pmu->supported_cpus,
  515. cpu_pmu_disable_percpu_irq, &irq, 1);
  516. free_percpu_irq(irq, &hw_events->percpu_pmu);
  517. } else {
  518. for (i = 0; i < irqs; ++i) {
  519. int cpu = i;
  520. if (cpu_pmu->irq_affinity)
  521. cpu = cpu_pmu->irq_affinity[i];
  522. if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
  523. continue;
  524. irq = platform_get_irq(pmu_device, i);
  525. if (irq > 0)
  526. free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
  527. }
  528. }
  529. }
  530. static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
  531. {
  532. int i, err, irq, irqs;
  533. struct platform_device *pmu_device = cpu_pmu->plat_device;
  534. struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
  535. if (!pmu_device)
  536. return -ENODEV;
  537. irqs = min(pmu_device->num_resources, num_possible_cpus());
  538. if (irqs < 1) {
  539. pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
  540. return 0;
  541. }
  542. irq = platform_get_irq(pmu_device, 0);
  543. if (irq > 0 && irq_is_percpu(irq)) {
  544. err = request_percpu_irq(irq, handler, "arm-pmu",
  545. &hw_events->percpu_pmu);
  546. if (err) {
  547. pr_err("unable to request IRQ%d for ARM PMU counters\n",
  548. irq);
  549. return err;
  550. }
  551. on_each_cpu_mask(&cpu_pmu->supported_cpus,
  552. cpu_pmu_enable_percpu_irq, &irq, 1);
  553. } else {
  554. for (i = 0; i < irqs; ++i) {
  555. int cpu = i;
  556. err = 0;
  557. irq = platform_get_irq(pmu_device, i);
  558. if (irq < 0)
  559. continue;
  560. if (cpu_pmu->irq_affinity)
  561. cpu = cpu_pmu->irq_affinity[i];
  562. /*
  563. * If we have a single PMU interrupt that we can't shift,
  564. * assume that we're running on a uniprocessor machine and
  565. * continue. Otherwise, continue without this interrupt.
  566. */
  567. if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
  568. pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
  569. irq, cpu);
  570. continue;
  571. }
  572. err = request_irq(irq, handler,
  573. IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
  574. per_cpu_ptr(&hw_events->percpu_pmu, cpu));
  575. if (err) {
  576. pr_err("unable to request IRQ%d for ARM PMU counters\n",
  577. irq);
  578. return err;
  579. }
  580. cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
  581. }
  582. }
  583. return 0;
  584. }
  585. /*
  586. * PMU hardware loses all context when a CPU goes offline.
  587. * When a CPU is hotplugged back in, since some hardware registers are
  588. * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
  589. * junk values out of them.
  590. */
  591. static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
  592. {
  593. struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
  594. if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
  595. return 0;
  596. if (pmu->reset)
  597. pmu->reset(pmu);
  598. return 0;
  599. }
  600. #ifdef CONFIG_CPU_PM
  601. static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
  602. {
  603. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  604. struct perf_event *event;
  605. int idx;
  606. for (idx = 0; idx < armpmu->num_events; idx++) {
  607. /*
  608. * If the counter is not used skip it, there is no
  609. * need of stopping/restarting it.
  610. */
  611. if (!test_bit(idx, hw_events->used_mask))
  612. continue;
  613. event = hw_events->events[idx];
  614. switch (cmd) {
  615. case CPU_PM_ENTER:
  616. /*
  617. * Stop and update the counter
  618. */
  619. armpmu_stop(event, PERF_EF_UPDATE);
  620. break;
  621. case CPU_PM_EXIT:
  622. case CPU_PM_ENTER_FAILED:
  623. /*
  624. * Restore and enable the counter.
  625. * armpmu_start() indirectly calls
  626. *
  627. * perf_event_update_userpage()
  628. *
  629. * that requires RCU read locking to be functional,
  630. * wrap the call within RCU_NONIDLE to make the
  631. * RCU subsystem aware this cpu is not idle from
  632. * an RCU perspective for the armpmu_start() call
  633. * duration.
  634. */
  635. RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
  636. break;
  637. default:
  638. break;
  639. }
  640. }
  641. }
  642. static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
  643. void *v)
  644. {
  645. struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
  646. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  647. int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
  648. if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
  649. return NOTIFY_DONE;
  650. /*
  651. * Always reset the PMU registers on power-up even if
  652. * there are no events running.
  653. */
  654. if (cmd == CPU_PM_EXIT && armpmu->reset)
  655. armpmu->reset(armpmu);
  656. if (!enabled)
  657. return NOTIFY_OK;
  658. switch (cmd) {
  659. case CPU_PM_ENTER:
  660. armpmu->stop(armpmu);
  661. cpu_pm_pmu_setup(armpmu, cmd);
  662. break;
  663. case CPU_PM_EXIT:
  664. cpu_pm_pmu_setup(armpmu, cmd);
  665. case CPU_PM_ENTER_FAILED:
  666. armpmu->start(armpmu);
  667. break;
  668. default:
  669. return NOTIFY_DONE;
  670. }
  671. return NOTIFY_OK;
  672. }
  673. static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
  674. {
  675. cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
  676. return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
  677. }
  678. static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
  679. {
  680. cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
  681. }
  682. #else
  683. static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
  684. static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
  685. #endif
  686. static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
  687. {
  688. int err;
  689. int cpu;
  690. struct pmu_hw_events __percpu *cpu_hw_events;
  691. cpu_hw_events = alloc_percpu(struct pmu_hw_events);
  692. if (!cpu_hw_events)
  693. return -ENOMEM;
  694. err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
  695. &cpu_pmu->node);
  696. if (err)
  697. goto out_free;
  698. err = cpu_pm_pmu_register(cpu_pmu);
  699. if (err)
  700. goto out_unregister;
  701. for_each_possible_cpu(cpu) {
  702. struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
  703. raw_spin_lock_init(&events->pmu_lock);
  704. events->percpu_pmu = cpu_pmu;
  705. }
  706. cpu_pmu->hw_events = cpu_hw_events;
  707. cpu_pmu->request_irq = cpu_pmu_request_irq;
  708. cpu_pmu->free_irq = cpu_pmu_free_irq;
  709. /* Ensure the PMU has sane values out of reset. */
  710. if (cpu_pmu->reset)
  711. on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
  712. cpu_pmu, 1);
  713. /* If no interrupts available, set the corresponding capability flag */
  714. if (!platform_get_irq(cpu_pmu->plat_device, 0))
  715. cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
  716. /*
  717. * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
  718. * big.LITTLE). This is not an uncore PMU, and we have taken ctx
  719. * sharing into account (e.g. with our pmu::filter_match callback and
  720. * pmu::event_init group validation).
  721. */
  722. cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
  723. return 0;
  724. out_unregister:
  725. cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
  726. &cpu_pmu->node);
  727. out_free:
  728. free_percpu(cpu_hw_events);
  729. return err;
  730. }
  731. static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
  732. {
  733. cpu_pm_pmu_unregister(cpu_pmu);
  734. cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
  735. &cpu_pmu->node);
  736. free_percpu(cpu_pmu->hw_events);
  737. }
  738. /*
  739. * CPU PMU identification and probing.
  740. */
  741. static int probe_current_pmu(struct arm_pmu *pmu,
  742. const struct pmu_probe_info *info)
  743. {
  744. int cpu = get_cpu();
  745. unsigned int cpuid = read_cpuid_id();
  746. int ret = -ENODEV;
  747. pr_info("probing PMU on CPU %d\n", cpu);
  748. for (; info->init != NULL; info++) {
  749. if ((cpuid & info->mask) != info->cpuid)
  750. continue;
  751. ret = info->init(pmu);
  752. break;
  753. }
  754. put_cpu();
  755. return ret;
  756. }
  757. static int of_pmu_irq_cfg(struct arm_pmu *pmu)
  758. {
  759. int *irqs, i = 0;
  760. bool using_spi = false;
  761. struct platform_device *pdev = pmu->plat_device;
  762. irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
  763. if (!irqs)
  764. return -ENOMEM;
  765. do {
  766. struct device_node *dn;
  767. int cpu, irq;
  768. /* See if we have an affinity entry */
  769. dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
  770. if (!dn)
  771. break;
  772. /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
  773. irq = platform_get_irq(pdev, i);
  774. if (irq > 0) {
  775. bool spi = !irq_is_percpu(irq);
  776. if (i > 0 && spi != using_spi) {
  777. pr_err("PPI/SPI IRQ type mismatch for %s!\n",
  778. dn->name);
  779. of_node_put(dn);
  780. kfree(irqs);
  781. return -EINVAL;
  782. }
  783. using_spi = spi;
  784. }
  785. /* Now look up the logical CPU number */
  786. for_each_possible_cpu(cpu) {
  787. struct device_node *cpu_dn;
  788. cpu_dn = of_cpu_device_node_get(cpu);
  789. of_node_put(cpu_dn);
  790. if (dn == cpu_dn)
  791. break;
  792. }
  793. if (cpu >= nr_cpu_ids) {
  794. pr_warn("Failed to find logical CPU for %s\n",
  795. dn->name);
  796. of_node_put(dn);
  797. cpumask_setall(&pmu->supported_cpus);
  798. break;
  799. }
  800. of_node_put(dn);
  801. /* For SPIs, we need to track the affinity per IRQ */
  802. if (using_spi) {
  803. if (i >= pdev->num_resources)
  804. break;
  805. irqs[i] = cpu;
  806. }
  807. /* Keep track of the CPUs containing this PMU type */
  808. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  809. i++;
  810. } while (1);
  811. /* If we didn't manage to parse anything, try the interrupt affinity */
  812. if (cpumask_weight(&pmu->supported_cpus) == 0) {
  813. int irq = platform_get_irq(pdev, 0);
  814. if (irq > 0 && irq_is_percpu(irq)) {
  815. /* If using PPIs, check the affinity of the partition */
  816. int ret;
  817. ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
  818. if (ret) {
  819. kfree(irqs);
  820. return ret;
  821. }
  822. } else {
  823. /* Otherwise default to all CPUs */
  824. cpumask_setall(&pmu->supported_cpus);
  825. }
  826. }
  827. /* If we matched up the IRQ affinities, use them to route the SPIs */
  828. if (using_spi && i == pdev->num_resources)
  829. pmu->irq_affinity = irqs;
  830. else
  831. kfree(irqs);
  832. return 0;
  833. }
  834. int arm_pmu_device_probe(struct platform_device *pdev,
  835. const struct of_device_id *of_table,
  836. const struct pmu_probe_info *probe_table)
  837. {
  838. const struct of_device_id *of_id;
  839. const int (*init_fn)(struct arm_pmu *);
  840. struct device_node *node = pdev->dev.of_node;
  841. struct arm_pmu *pmu;
  842. int ret = -ENODEV;
  843. pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
  844. if (!pmu) {
  845. pr_info("failed to allocate PMU device!\n");
  846. return -ENOMEM;
  847. }
  848. armpmu_init(pmu);
  849. pmu->plat_device = pdev;
  850. if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
  851. init_fn = of_id->data;
  852. pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
  853. "secure-reg-access");
  854. /* arm64 systems boot only as non-secure */
  855. if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
  856. pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
  857. pmu->secure_access = false;
  858. }
  859. ret = of_pmu_irq_cfg(pmu);
  860. if (!ret)
  861. ret = init_fn(pmu);
  862. } else if (probe_table) {
  863. cpumask_setall(&pmu->supported_cpus);
  864. ret = probe_current_pmu(pmu, probe_table);
  865. }
  866. if (ret) {
  867. pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
  868. goto out_free;
  869. }
  870. ret = cpu_pmu_init(pmu);
  871. if (ret)
  872. goto out_free;
  873. ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
  874. if (ret)
  875. goto out_destroy;
  876. if (!__oprofile_cpu_pmu)
  877. __oprofile_cpu_pmu = pmu;
  878. pr_info("enabled with %s PMU driver, %d counters available\n",
  879. pmu->name, pmu->num_events);
  880. return 0;
  881. out_destroy:
  882. cpu_pmu_destroy(pmu);
  883. out_free:
  884. pr_info("%s: failed to register PMU devices!\n",
  885. of_node_full_name(node));
  886. kfree(pmu->irq_affinity);
  887. kfree(pmu);
  888. return ret;
  889. }
  890. static int arm_pmu_hp_init(void)
  891. {
  892. int ret;
  893. ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
  894. "AP_PERF_ARM_STARTING",
  895. arm_perf_starting_cpu, NULL);
  896. if (ret)
  897. pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
  898. ret);
  899. return ret;
  900. }
  901. subsys_initcall(arm_pmu_hp_init);