core-fsl-emb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Performance event support - Freescale Embedded Performance Monitor
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. * Copyright 2010 Freescale Semiconductor, Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/perf_event.h>
  15. #include <linux/percpu.h>
  16. #include <linux/hardirq.h>
  17. #include <asm/reg_fsl_emb.h>
  18. #include <asm/pmc.h>
  19. #include <asm/machdep.h>
  20. #include <asm/firmware.h>
  21. #include <asm/ptrace.h>
  22. struct cpu_hw_events {
  23. int n_events;
  24. int disabled;
  25. u8 pmcs_enabled;
  26. struct perf_event *event[MAX_HWEVENTS];
  27. };
  28. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  29. static struct fsl_emb_pmu *ppmu;
  30. /* Number of perf_events counting hardware events */
  31. static atomic_t num_events;
  32. /* Used to avoid races in calling reserve/release_pmc_hardware */
  33. static DEFINE_MUTEX(pmc_reserve_mutex);
  34. /*
  35. * If interrupts were soft-disabled when a PMU interrupt occurs, treat
  36. * it as an NMI.
  37. */
  38. static inline int perf_intr_is_nmi(struct pt_regs *regs)
  39. {
  40. #ifdef __powerpc64__
  41. return !regs->softe;
  42. #else
  43. return 0;
  44. #endif
  45. }
  46. static void perf_event_interrupt(struct pt_regs *regs);
  47. /*
  48. * Read one performance monitor counter (PMC).
  49. */
  50. static unsigned long read_pmc(int idx)
  51. {
  52. unsigned long val;
  53. switch (idx) {
  54. case 0:
  55. val = mfpmr(PMRN_PMC0);
  56. break;
  57. case 1:
  58. val = mfpmr(PMRN_PMC1);
  59. break;
  60. case 2:
  61. val = mfpmr(PMRN_PMC2);
  62. break;
  63. case 3:
  64. val = mfpmr(PMRN_PMC3);
  65. break;
  66. default:
  67. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  68. val = 0;
  69. }
  70. return val;
  71. }
  72. /*
  73. * Write one PMC.
  74. */
  75. static void write_pmc(int idx, unsigned long val)
  76. {
  77. switch (idx) {
  78. case 0:
  79. mtpmr(PMRN_PMC0, val);
  80. break;
  81. case 1:
  82. mtpmr(PMRN_PMC1, val);
  83. break;
  84. case 2:
  85. mtpmr(PMRN_PMC2, val);
  86. break;
  87. case 3:
  88. mtpmr(PMRN_PMC3, val);
  89. break;
  90. default:
  91. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  92. }
  93. isync();
  94. }
  95. /*
  96. * Write one local control A register
  97. */
  98. static void write_pmlca(int idx, unsigned long val)
  99. {
  100. switch (idx) {
  101. case 0:
  102. mtpmr(PMRN_PMLCA0, val);
  103. break;
  104. case 1:
  105. mtpmr(PMRN_PMLCA1, val);
  106. break;
  107. case 2:
  108. mtpmr(PMRN_PMLCA2, val);
  109. break;
  110. case 3:
  111. mtpmr(PMRN_PMLCA3, val);
  112. break;
  113. default:
  114. printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
  115. }
  116. isync();
  117. }
  118. /*
  119. * Write one local control B register
  120. */
  121. static void write_pmlcb(int idx, unsigned long val)
  122. {
  123. switch (idx) {
  124. case 0:
  125. mtpmr(PMRN_PMLCB0, val);
  126. break;
  127. case 1:
  128. mtpmr(PMRN_PMLCB1, val);
  129. break;
  130. case 2:
  131. mtpmr(PMRN_PMLCB2, val);
  132. break;
  133. case 3:
  134. mtpmr(PMRN_PMLCB3, val);
  135. break;
  136. default:
  137. printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
  138. }
  139. isync();
  140. }
  141. static void fsl_emb_pmu_read(struct perf_event *event)
  142. {
  143. s64 val, delta, prev;
  144. if (event->hw.state & PERF_HES_STOPPED)
  145. return;
  146. /*
  147. * Performance monitor interrupts come even when interrupts
  148. * are soft-disabled, as long as interrupts are hard-enabled.
  149. * Therefore we treat them like NMIs.
  150. */
  151. do {
  152. prev = local64_read(&event->hw.prev_count);
  153. barrier();
  154. val = read_pmc(event->hw.idx);
  155. } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
  156. /* The counters are only 32 bits wide */
  157. delta = (val - prev) & 0xfffffffful;
  158. local64_add(delta, &event->count);
  159. local64_sub(delta, &event->hw.period_left);
  160. }
  161. /*
  162. * Disable all events to prevent PMU interrupts and to allow
  163. * events to be added or removed.
  164. */
  165. static void fsl_emb_pmu_disable(struct pmu *pmu)
  166. {
  167. struct cpu_hw_events *cpuhw;
  168. unsigned long flags;
  169. local_irq_save(flags);
  170. cpuhw = &__get_cpu_var(cpu_hw_events);
  171. if (!cpuhw->disabled) {
  172. cpuhw->disabled = 1;
  173. /*
  174. * Check if we ever enabled the PMU on this cpu.
  175. */
  176. if (!cpuhw->pmcs_enabled) {
  177. ppc_enable_pmcs();
  178. cpuhw->pmcs_enabled = 1;
  179. }
  180. if (atomic_read(&num_events)) {
  181. /*
  182. * Set the 'freeze all counters' bit, and disable
  183. * interrupts. The barrier is to make sure the
  184. * mtpmr has been executed and the PMU has frozen
  185. * the events before we return.
  186. */
  187. mtpmr(PMRN_PMGC0, PMGC0_FAC);
  188. isync();
  189. }
  190. }
  191. local_irq_restore(flags);
  192. }
  193. /*
  194. * Re-enable all events if disable == 0.
  195. * If we were previously disabled and events were added, then
  196. * put the new config on the PMU.
  197. */
  198. static void fsl_emb_pmu_enable(struct pmu *pmu)
  199. {
  200. struct cpu_hw_events *cpuhw;
  201. unsigned long flags;
  202. local_irq_save(flags);
  203. cpuhw = &__get_cpu_var(cpu_hw_events);
  204. if (!cpuhw->disabled)
  205. goto out;
  206. cpuhw->disabled = 0;
  207. ppc_set_pmu_inuse(cpuhw->n_events != 0);
  208. if (cpuhw->n_events > 0) {
  209. mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
  210. isync();
  211. }
  212. out:
  213. local_irq_restore(flags);
  214. }
  215. static int collect_events(struct perf_event *group, int max_count,
  216. struct perf_event *ctrs[])
  217. {
  218. int n = 0;
  219. struct perf_event *event;
  220. if (!is_software_event(group)) {
  221. if (n >= max_count)
  222. return -1;
  223. ctrs[n] = group;
  224. n++;
  225. }
  226. list_for_each_entry(event, &group->sibling_list, group_entry) {
  227. if (!is_software_event(event) &&
  228. event->state != PERF_EVENT_STATE_OFF) {
  229. if (n >= max_count)
  230. return -1;
  231. ctrs[n] = event;
  232. n++;
  233. }
  234. }
  235. return n;
  236. }
  237. /* context locked on entry */
  238. static int fsl_emb_pmu_add(struct perf_event *event, int flags)
  239. {
  240. struct cpu_hw_events *cpuhw;
  241. int ret = -EAGAIN;
  242. int num_counters = ppmu->n_counter;
  243. u64 val;
  244. int i;
  245. perf_pmu_disable(event->pmu);
  246. cpuhw = &get_cpu_var(cpu_hw_events);
  247. if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
  248. num_counters = ppmu->n_restricted;
  249. /*
  250. * Allocate counters from top-down, so that restricted-capable
  251. * counters are kept free as long as possible.
  252. */
  253. for (i = num_counters - 1; i >= 0; i--) {
  254. if (cpuhw->event[i])
  255. continue;
  256. break;
  257. }
  258. if (i < 0)
  259. goto out;
  260. event->hw.idx = i;
  261. cpuhw->event[i] = event;
  262. ++cpuhw->n_events;
  263. val = 0;
  264. if (event->hw.sample_period) {
  265. s64 left = local64_read(&event->hw.period_left);
  266. if (left < 0x80000000L)
  267. val = 0x80000000L - left;
  268. }
  269. local64_set(&event->hw.prev_count, val);
  270. if (!(flags & PERF_EF_START)) {
  271. event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  272. val = 0;
  273. }
  274. write_pmc(i, val);
  275. perf_event_update_userpage(event);
  276. write_pmlcb(i, event->hw.config >> 32);
  277. write_pmlca(i, event->hw.config_base);
  278. ret = 0;
  279. out:
  280. put_cpu_var(cpu_hw_events);
  281. perf_pmu_enable(event->pmu);
  282. return ret;
  283. }
  284. /* context locked on entry */
  285. static void fsl_emb_pmu_del(struct perf_event *event, int flags)
  286. {
  287. struct cpu_hw_events *cpuhw;
  288. int i = event->hw.idx;
  289. perf_pmu_disable(event->pmu);
  290. if (i < 0)
  291. goto out;
  292. fsl_emb_pmu_read(event);
  293. cpuhw = &get_cpu_var(cpu_hw_events);
  294. WARN_ON(event != cpuhw->event[event->hw.idx]);
  295. write_pmlca(i, 0);
  296. write_pmlcb(i, 0);
  297. write_pmc(i, 0);
  298. cpuhw->event[i] = NULL;
  299. event->hw.idx = -1;
  300. /*
  301. * TODO: if at least one restricted event exists, and we
  302. * just freed up a non-restricted-capable counter, and
  303. * there is a restricted-capable counter occupied by
  304. * a non-restricted event, migrate that event to the
  305. * vacated counter.
  306. */
  307. cpuhw->n_events--;
  308. out:
  309. perf_pmu_enable(event->pmu);
  310. put_cpu_var(cpu_hw_events);
  311. }
  312. static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
  313. {
  314. unsigned long flags;
  315. s64 left;
  316. if (event->hw.idx < 0 || !event->hw.sample_period)
  317. return;
  318. if (!(event->hw.state & PERF_HES_STOPPED))
  319. return;
  320. if (ef_flags & PERF_EF_RELOAD)
  321. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  322. local_irq_save(flags);
  323. perf_pmu_disable(event->pmu);
  324. event->hw.state = 0;
  325. left = local64_read(&event->hw.period_left);
  326. write_pmc(event->hw.idx, left);
  327. perf_event_update_userpage(event);
  328. perf_pmu_enable(event->pmu);
  329. local_irq_restore(flags);
  330. }
  331. static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
  332. {
  333. unsigned long flags;
  334. if (event->hw.idx < 0 || !event->hw.sample_period)
  335. return;
  336. if (event->hw.state & PERF_HES_STOPPED)
  337. return;
  338. local_irq_save(flags);
  339. perf_pmu_disable(event->pmu);
  340. fsl_emb_pmu_read(event);
  341. event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  342. write_pmc(event->hw.idx, 0);
  343. perf_event_update_userpage(event);
  344. perf_pmu_enable(event->pmu);
  345. local_irq_restore(flags);
  346. }
  347. /*
  348. * Release the PMU if this is the last perf_event.
  349. */
  350. static void hw_perf_event_destroy(struct perf_event *event)
  351. {
  352. if (!atomic_add_unless(&num_events, -1, 1)) {
  353. mutex_lock(&pmc_reserve_mutex);
  354. if (atomic_dec_return(&num_events) == 0)
  355. release_pmc_hardware();
  356. mutex_unlock(&pmc_reserve_mutex);
  357. }
  358. }
  359. /*
  360. * Translate a generic cache event_id config to a raw event_id code.
  361. */
  362. static int hw_perf_cache_event(u64 config, u64 *eventp)
  363. {
  364. unsigned long type, op, result;
  365. int ev;
  366. if (!ppmu->cache_events)
  367. return -EINVAL;
  368. /* unpack config */
  369. type = config & 0xff;
  370. op = (config >> 8) & 0xff;
  371. result = (config >> 16) & 0xff;
  372. if (type >= PERF_COUNT_HW_CACHE_MAX ||
  373. op >= PERF_COUNT_HW_CACHE_OP_MAX ||
  374. result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  375. return -EINVAL;
  376. ev = (*ppmu->cache_events)[type][op][result];
  377. if (ev == 0)
  378. return -EOPNOTSUPP;
  379. if (ev == -1)
  380. return -EINVAL;
  381. *eventp = ev;
  382. return 0;
  383. }
  384. static int fsl_emb_pmu_event_init(struct perf_event *event)
  385. {
  386. u64 ev;
  387. struct perf_event *events[MAX_HWEVENTS];
  388. int n;
  389. int err;
  390. int num_restricted;
  391. int i;
  392. switch (event->attr.type) {
  393. case PERF_TYPE_HARDWARE:
  394. ev = event->attr.config;
  395. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  396. return -EOPNOTSUPP;
  397. ev = ppmu->generic_events[ev];
  398. break;
  399. case PERF_TYPE_HW_CACHE:
  400. err = hw_perf_cache_event(event->attr.config, &ev);
  401. if (err)
  402. return err;
  403. break;
  404. case PERF_TYPE_RAW:
  405. ev = event->attr.config;
  406. break;
  407. default:
  408. return -ENOENT;
  409. }
  410. event->hw.config = ppmu->xlate_event(ev);
  411. if (!(event->hw.config & FSL_EMB_EVENT_VALID))
  412. return -EINVAL;
  413. /*
  414. * If this is in a group, check if it can go on with all the
  415. * other hardware events in the group. We assume the event
  416. * hasn't been linked into its leader's sibling list at this point.
  417. */
  418. n = 0;
  419. if (event->group_leader != event) {
  420. n = collect_events(event->group_leader,
  421. ppmu->n_counter - 1, events);
  422. if (n < 0)
  423. return -EINVAL;
  424. }
  425. if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
  426. num_restricted = 0;
  427. for (i = 0; i < n; i++) {
  428. if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
  429. num_restricted++;
  430. }
  431. if (num_restricted >= ppmu->n_restricted)
  432. return -EINVAL;
  433. }
  434. event->hw.idx = -1;
  435. event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
  436. (u32)((ev << 16) & PMLCA_EVENT_MASK);
  437. if (event->attr.exclude_user)
  438. event->hw.config_base |= PMLCA_FCU;
  439. if (event->attr.exclude_kernel)
  440. event->hw.config_base |= PMLCA_FCS;
  441. if (event->attr.exclude_idle)
  442. return -ENOTSUPP;
  443. event->hw.last_period = event->hw.sample_period;
  444. local64_set(&event->hw.period_left, event->hw.last_period);
  445. /*
  446. * See if we need to reserve the PMU.
  447. * If no events are currently in use, then we have to take a
  448. * mutex to ensure that we don't race with another task doing
  449. * reserve_pmc_hardware or release_pmc_hardware.
  450. */
  451. err = 0;
  452. if (!atomic_inc_not_zero(&num_events)) {
  453. mutex_lock(&pmc_reserve_mutex);
  454. if (atomic_read(&num_events) == 0 &&
  455. reserve_pmc_hardware(perf_event_interrupt))
  456. err = -EBUSY;
  457. else
  458. atomic_inc(&num_events);
  459. mutex_unlock(&pmc_reserve_mutex);
  460. mtpmr(PMRN_PMGC0, PMGC0_FAC);
  461. isync();
  462. }
  463. event->destroy = hw_perf_event_destroy;
  464. return err;
  465. }
  466. static struct pmu fsl_emb_pmu = {
  467. .pmu_enable = fsl_emb_pmu_enable,
  468. .pmu_disable = fsl_emb_pmu_disable,
  469. .event_init = fsl_emb_pmu_event_init,
  470. .add = fsl_emb_pmu_add,
  471. .del = fsl_emb_pmu_del,
  472. .start = fsl_emb_pmu_start,
  473. .stop = fsl_emb_pmu_stop,
  474. .read = fsl_emb_pmu_read,
  475. };
  476. /*
  477. * A counter has overflowed; update its count and record
  478. * things if requested. Note that interrupts are hard-disabled
  479. * here so there is no possibility of being interrupted.
  480. */
  481. static void record_and_restart(struct perf_event *event, unsigned long val,
  482. struct pt_regs *regs)
  483. {
  484. u64 period = event->hw.sample_period;
  485. s64 prev, delta, left;
  486. int record = 0;
  487. if (event->hw.state & PERF_HES_STOPPED) {
  488. write_pmc(event->hw.idx, 0);
  489. return;
  490. }
  491. /* we don't have to worry about interrupts here */
  492. prev = local64_read(&event->hw.prev_count);
  493. delta = (val - prev) & 0xfffffffful;
  494. local64_add(delta, &event->count);
  495. /*
  496. * See if the total period for this event has expired,
  497. * and update for the next period.
  498. */
  499. val = 0;
  500. left = local64_read(&event->hw.period_left) - delta;
  501. if (period) {
  502. if (left <= 0) {
  503. left += period;
  504. if (left <= 0)
  505. left = period;
  506. record = 1;
  507. event->hw.last_period = event->hw.sample_period;
  508. }
  509. if (left < 0x80000000LL)
  510. val = 0x80000000LL - left;
  511. }
  512. write_pmc(event->hw.idx, val);
  513. local64_set(&event->hw.prev_count, val);
  514. local64_set(&event->hw.period_left, left);
  515. perf_event_update_userpage(event);
  516. /*
  517. * Finally record data if requested.
  518. */
  519. if (record) {
  520. struct perf_sample_data data;
  521. perf_sample_data_init(&data, 0);
  522. data.period = event->hw.last_period;
  523. if (perf_event_overflow(event, &data, regs))
  524. fsl_emb_pmu_stop(event, 0);
  525. }
  526. }
  527. static void perf_event_interrupt(struct pt_regs *regs)
  528. {
  529. int i;
  530. struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  531. struct perf_event *event;
  532. unsigned long val;
  533. int found = 0;
  534. int nmi;
  535. nmi = perf_intr_is_nmi(regs);
  536. if (nmi)
  537. nmi_enter();
  538. else
  539. irq_enter();
  540. for (i = 0; i < ppmu->n_counter; ++i) {
  541. event = cpuhw->event[i];
  542. val = read_pmc(i);
  543. if ((int)val < 0) {
  544. if (event) {
  545. /* event has overflowed */
  546. found = 1;
  547. record_and_restart(event, val, regs);
  548. } else {
  549. /*
  550. * Disabled counter is negative,
  551. * reset it just in case.
  552. */
  553. write_pmc(i, 0);
  554. }
  555. }
  556. }
  557. /* PMM will keep counters frozen until we return from the interrupt. */
  558. mtmsr(mfmsr() | MSR_PMM);
  559. mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
  560. isync();
  561. if (nmi)
  562. nmi_exit();
  563. else
  564. irq_exit();
  565. }
  566. void hw_perf_event_setup(int cpu)
  567. {
  568. struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
  569. memset(cpuhw, 0, sizeof(*cpuhw));
  570. }
  571. int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
  572. {
  573. if (ppmu)
  574. return -EBUSY; /* something's already registered */
  575. ppmu = pmu;
  576. pr_info("%s performance monitor hardware support registered\n",
  577. pmu->name);
  578. perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
  579. return 0;
  580. }