perf_event.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * Copyright 2014 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. *
  15. * Perf_events support for Tile processor.
  16. *
  17. * This code is based upon the x86 perf event
  18. * code, which is:
  19. *
  20. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  21. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  22. * Copyright (C) 2009 Jaswinder Singh Rajput
  23. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  24. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  25. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  26. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  27. */
  28. #include <linux/kprobes.h>
  29. #include <linux/kernel.h>
  30. #include <linux/kdebug.h>
  31. #include <linux/mutex.h>
  32. #include <linux/bitmap.h>
  33. #include <linux/irq.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/perf_event.h>
  36. #include <linux/atomic.h>
  37. #include <asm/traps.h>
  38. #include <asm/stack.h>
  39. #include <asm/pmc.h>
  40. #include <hv/hypervisor.h>
  41. #define TILE_MAX_COUNTERS 4
  42. #define PERF_COUNT_0_IDX 0
  43. #define PERF_COUNT_1_IDX 1
  44. #define AUX_PERF_COUNT_0_IDX 2
  45. #define AUX_PERF_COUNT_1_IDX 3
  46. struct cpu_hw_events {
  47. int n_events;
  48. struct perf_event *events[TILE_MAX_COUNTERS]; /* counter order */
  49. struct perf_event *event_list[TILE_MAX_COUNTERS]; /* enabled
  50. order */
  51. int assign[TILE_MAX_COUNTERS];
  52. unsigned long active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)];
  53. unsigned long used_mask;
  54. };
  55. /* TILE arch specific performance monitor unit */
  56. struct tile_pmu {
  57. const char *name;
  58. int version;
  59. const int *hw_events; /* generic hw events table */
  60. /* generic hw cache events table */
  61. const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
  62. [PERF_COUNT_HW_CACHE_OP_MAX]
  63. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  64. int (*map_hw_event)(u64); /*method used to map
  65. hw events */
  66. int (*map_cache_event)(u64); /*method used to map
  67. cache events */
  68. u64 max_period; /* max sampling period */
  69. u64 cntval_mask; /* counter width mask */
  70. int cntval_bits; /* counter width */
  71. int max_events; /* max generic hw events
  72. in map */
  73. int num_counters; /* number base + aux counters */
  74. int num_base_counters; /* number base counters */
  75. };
  76. DEFINE_PER_CPU(u64, perf_irqs);
  77. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  78. #define TILE_OP_UNSUPP (-1)
  79. #ifndef __tilegx__
  80. /* TILEPro hardware events map */
  81. static const int tile_hw_event_map[] = {
  82. [PERF_COUNT_HW_CPU_CYCLES] = 0x01, /* ONE */
  83. [PERF_COUNT_HW_INSTRUCTIONS] = 0x06, /* MP_BUNDLE_RETIRED */
  84. [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP,
  85. [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP,
  86. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x16, /*
  87. MP_CONDITIONAL_BRANCH_ISSUED */
  88. [PERF_COUNT_HW_BRANCH_MISSES] = 0x14, /*
  89. MP_CONDITIONAL_BRANCH_MISSPREDICT */
  90. [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP,
  91. };
  92. #else
  93. /* TILEGx hardware events map */
  94. static const int tile_hw_event_map[] = {
  95. [PERF_COUNT_HW_CPU_CYCLES] = 0x181, /* ONE */
  96. [PERF_COUNT_HW_INSTRUCTIONS] = 0xdb, /* INSTRUCTION_BUNDLE */
  97. [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP,
  98. [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP,
  99. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xd9, /*
  100. COND_BRANCH_PRED_CORRECT */
  101. [PERF_COUNT_HW_BRANCH_MISSES] = 0xda, /*
  102. COND_BRANCH_PRED_INCORRECT */
  103. [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP,
  104. };
  105. #endif
  106. #define C(x) PERF_COUNT_HW_CACHE_##x
  107. /*
  108. * Generalized hw caching related hw_event table, filled
  109. * in on a per model basis. A value of -1 means
  110. * 'not supported', any other value means the
  111. * raw hw_event ID.
  112. */
  113. #ifndef __tilegx__
  114. /* TILEPro hardware cache event map */
  115. static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
  116. [PERF_COUNT_HW_CACHE_OP_MAX]
  117. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  118. [C(L1D)] = {
  119. [C(OP_READ)] = {
  120. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  121. [C(RESULT_MISS)] = 0x21, /* RD_MISS */
  122. },
  123. [C(OP_WRITE)] = {
  124. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  125. [C(RESULT_MISS)] = 0x22, /* WR_MISS */
  126. },
  127. [C(OP_PREFETCH)] = {
  128. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  129. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  130. },
  131. },
  132. [C(L1I)] = {
  133. [C(OP_READ)] = {
  134. [C(RESULT_ACCESS)] = 0x12, /* MP_ICACHE_HIT_ISSUED */
  135. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  136. },
  137. [C(OP_WRITE)] = {
  138. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  139. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  140. },
  141. [C(OP_PREFETCH)] = {
  142. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  143. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  144. },
  145. },
  146. [C(LL)] = {
  147. [C(OP_READ)] = {
  148. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  149. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  150. },
  151. [C(OP_WRITE)] = {
  152. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  153. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  154. },
  155. [C(OP_PREFETCH)] = {
  156. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  157. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  158. },
  159. },
  160. [C(DTLB)] = {
  161. [C(OP_READ)] = {
  162. [C(RESULT_ACCESS)] = 0x1d, /* TLB_CNT */
  163. [C(RESULT_MISS)] = 0x20, /* TLB_EXCEPTION */
  164. },
  165. [C(OP_WRITE)] = {
  166. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  167. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  168. },
  169. [C(OP_PREFETCH)] = {
  170. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  171. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  172. },
  173. },
  174. [C(ITLB)] = {
  175. [C(OP_READ)] = {
  176. [C(RESULT_ACCESS)] = 0x13, /* MP_ITLB_HIT_ISSUED */
  177. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  178. },
  179. [C(OP_WRITE)] = {
  180. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  181. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  182. },
  183. [C(OP_PREFETCH)] = {
  184. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  185. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  186. },
  187. },
  188. [C(BPU)] = {
  189. [C(OP_READ)] = {
  190. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  191. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  192. },
  193. [C(OP_WRITE)] = {
  194. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  195. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  196. },
  197. [C(OP_PREFETCH)] = {
  198. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  199. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  200. },
  201. },
  202. };
  203. #else
  204. /* TILEGx hardware events map */
  205. static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
  206. [PERF_COUNT_HW_CACHE_OP_MAX]
  207. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  208. [C(L1D)] = {
  209. /*
  210. * Like some other architectures (e.g. ARM), the performance
  211. * counters don't differentiate between read and write
  212. * accesses/misses, so this isn't strictly correct, but it's the
  213. * best we can do. Writes and reads get combined.
  214. */
  215. [C(OP_READ)] = {
  216. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  217. [C(RESULT_MISS)] = 0x44, /* RD_MISS */
  218. },
  219. [C(OP_WRITE)] = {
  220. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  221. [C(RESULT_MISS)] = 0x45, /* WR_MISS */
  222. },
  223. [C(OP_PREFETCH)] = {
  224. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  225. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  226. },
  227. },
  228. [C(L1I)] = {
  229. [C(OP_READ)] = {
  230. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  231. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  232. },
  233. [C(OP_WRITE)] = {
  234. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  235. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  236. },
  237. [C(OP_PREFETCH)] = {
  238. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  239. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  240. },
  241. },
  242. [C(LL)] = {
  243. [C(OP_READ)] = {
  244. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  245. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  246. },
  247. [C(OP_WRITE)] = {
  248. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  249. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  250. },
  251. [C(OP_PREFETCH)] = {
  252. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  253. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  254. },
  255. },
  256. [C(DTLB)] = {
  257. [C(OP_READ)] = {
  258. [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */
  259. [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */
  260. },
  261. [C(OP_WRITE)] = {
  262. [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */
  263. [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */
  264. },
  265. [C(OP_PREFETCH)] = {
  266. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  267. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  268. },
  269. },
  270. [C(ITLB)] = {
  271. [C(OP_READ)] = {
  272. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  273. [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */
  274. },
  275. [C(OP_WRITE)] = {
  276. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  277. [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */
  278. },
  279. [C(OP_PREFETCH)] = {
  280. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  281. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  282. },
  283. },
  284. [C(BPU)] = {
  285. [C(OP_READ)] = {
  286. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  287. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  288. },
  289. [C(OP_WRITE)] = {
  290. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  291. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  292. },
  293. [C(OP_PREFETCH)] = {
  294. [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
  295. [C(RESULT_MISS)] = TILE_OP_UNSUPP,
  296. },
  297. },
  298. };
  299. #endif
  300. static atomic_t tile_active_events;
  301. static DEFINE_MUTEX(perf_intr_reserve_mutex);
  302. static int tile_map_hw_event(u64 config);
  303. static int tile_map_cache_event(u64 config);
  304. static int tile_pmu_handle_irq(struct pt_regs *regs, int fault);
  305. /*
  306. * To avoid new_raw_count getting larger then pre_raw_count
  307. * in tile_perf_event_update(), we limit the value of max_period to 2^31 - 1.
  308. */
  309. static const struct tile_pmu tilepmu = {
  310. #ifndef __tilegx__
  311. .name = "tilepro",
  312. #else
  313. .name = "tilegx",
  314. #endif
  315. .max_events = ARRAY_SIZE(tile_hw_event_map),
  316. .map_hw_event = tile_map_hw_event,
  317. .hw_events = tile_hw_event_map,
  318. .map_cache_event = tile_map_cache_event,
  319. .cache_events = &tile_cache_event_map,
  320. .cntval_bits = 32,
  321. .cntval_mask = (1ULL << 32) - 1,
  322. .max_period = (1ULL << 31) - 1,
  323. .num_counters = TILE_MAX_COUNTERS,
  324. .num_base_counters = TILE_BASE_COUNTERS,
  325. };
  326. static const struct tile_pmu *tile_pmu __read_mostly;
  327. /*
  328. * Check whether perf event is enabled.
  329. */
  330. int tile_perf_enabled(void)
  331. {
  332. return atomic_read(&tile_active_events) != 0;
  333. }
  334. /*
  335. * Read Performance Counters.
  336. */
  337. static inline u64 read_counter(int idx)
  338. {
  339. u64 val = 0;
  340. /* __insn_mfspr() only takes an immediate argument */
  341. switch (idx) {
  342. case PERF_COUNT_0_IDX:
  343. val = __insn_mfspr(SPR_PERF_COUNT_0);
  344. break;
  345. case PERF_COUNT_1_IDX:
  346. val = __insn_mfspr(SPR_PERF_COUNT_1);
  347. break;
  348. case AUX_PERF_COUNT_0_IDX:
  349. val = __insn_mfspr(SPR_AUX_PERF_COUNT_0);
  350. break;
  351. case AUX_PERF_COUNT_1_IDX:
  352. val = __insn_mfspr(SPR_AUX_PERF_COUNT_1);
  353. break;
  354. default:
  355. WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX ||
  356. idx < PERF_COUNT_0_IDX);
  357. }
  358. return val;
  359. }
  360. /*
  361. * Write Performance Counters.
  362. */
  363. static inline void write_counter(int idx, u64 value)
  364. {
  365. /* __insn_mtspr() only takes an immediate argument */
  366. switch (idx) {
  367. case PERF_COUNT_0_IDX:
  368. __insn_mtspr(SPR_PERF_COUNT_0, value);
  369. break;
  370. case PERF_COUNT_1_IDX:
  371. __insn_mtspr(SPR_PERF_COUNT_1, value);
  372. break;
  373. case AUX_PERF_COUNT_0_IDX:
  374. __insn_mtspr(SPR_AUX_PERF_COUNT_0, value);
  375. break;
  376. case AUX_PERF_COUNT_1_IDX:
  377. __insn_mtspr(SPR_AUX_PERF_COUNT_1, value);
  378. break;
  379. default:
  380. WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX ||
  381. idx < PERF_COUNT_0_IDX);
  382. }
  383. }
  384. /*
  385. * Enable performance event by setting
  386. * Performance Counter Control registers.
  387. */
  388. static inline void tile_pmu_enable_event(struct perf_event *event)
  389. {
  390. struct hw_perf_event *hwc = &event->hw;
  391. unsigned long cfg, mask;
  392. int shift, idx = hwc->idx;
  393. /*
  394. * prevent early activation from tile_pmu_start() in hw_perf_enable
  395. */
  396. if (WARN_ON_ONCE(idx == -1))
  397. return;
  398. if (idx < tile_pmu->num_base_counters)
  399. cfg = __insn_mfspr(SPR_PERF_COUNT_CTL);
  400. else
  401. cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL);
  402. switch (idx) {
  403. case PERF_COUNT_0_IDX:
  404. case AUX_PERF_COUNT_0_IDX:
  405. mask = TILE_EVENT_MASK;
  406. shift = 0;
  407. break;
  408. case PERF_COUNT_1_IDX:
  409. case AUX_PERF_COUNT_1_IDX:
  410. mask = TILE_EVENT_MASK << 16;
  411. shift = 16;
  412. break;
  413. default:
  414. WARN_ON_ONCE(idx < PERF_COUNT_0_IDX ||
  415. idx > AUX_PERF_COUNT_1_IDX);
  416. return;
  417. }
  418. /* Clear mask bits to enable the event. */
  419. cfg &= ~mask;
  420. cfg |= hwc->config << shift;
  421. if (idx < tile_pmu->num_base_counters)
  422. __insn_mtspr(SPR_PERF_COUNT_CTL, cfg);
  423. else
  424. __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg);
  425. }
  426. /*
  427. * Disable performance event by clearing
  428. * Performance Counter Control registers.
  429. */
  430. static inline void tile_pmu_disable_event(struct perf_event *event)
  431. {
  432. struct hw_perf_event *hwc = &event->hw;
  433. unsigned long cfg, mask;
  434. int idx = hwc->idx;
  435. if (idx == -1)
  436. return;
  437. if (idx < tile_pmu->num_base_counters)
  438. cfg = __insn_mfspr(SPR_PERF_COUNT_CTL);
  439. else
  440. cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL);
  441. switch (idx) {
  442. case PERF_COUNT_0_IDX:
  443. case AUX_PERF_COUNT_0_IDX:
  444. mask = TILE_PLM_MASK;
  445. break;
  446. case PERF_COUNT_1_IDX:
  447. case AUX_PERF_COUNT_1_IDX:
  448. mask = TILE_PLM_MASK << 16;
  449. break;
  450. default:
  451. WARN_ON_ONCE(idx < PERF_COUNT_0_IDX ||
  452. idx > AUX_PERF_COUNT_1_IDX);
  453. return;
  454. }
  455. /* Set mask bits to disable the event. */
  456. cfg |= mask;
  457. if (idx < tile_pmu->num_base_counters)
  458. __insn_mtspr(SPR_PERF_COUNT_CTL, cfg);
  459. else
  460. __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg);
  461. }
  462. /*
  463. * Propagate event elapsed time into the generic event.
  464. * Can only be executed on the CPU where the event is active.
  465. * Returns the delta events processed.
  466. */
  467. static u64 tile_perf_event_update(struct perf_event *event)
  468. {
  469. struct hw_perf_event *hwc = &event->hw;
  470. int shift = 64 - tile_pmu->cntval_bits;
  471. u64 prev_raw_count, new_raw_count;
  472. u64 oldval;
  473. int idx = hwc->idx;
  474. u64 delta;
  475. /*
  476. * Careful: an NMI might modify the previous event value.
  477. *
  478. * Our tactic to handle this is to first atomically read and
  479. * exchange a new raw count - then add that new-prev delta
  480. * count to the generic event atomically:
  481. */
  482. again:
  483. prev_raw_count = local64_read(&hwc->prev_count);
  484. new_raw_count = read_counter(idx);
  485. oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  486. new_raw_count);
  487. if (oldval != prev_raw_count)
  488. goto again;
  489. /*
  490. * Now we have the new raw value and have updated the prev
  491. * timestamp already. We can now calculate the elapsed delta
  492. * (event-)time and add that to the generic event.
  493. *
  494. * Careful, not all hw sign-extends above the physical width
  495. * of the count.
  496. */
  497. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  498. delta >>= shift;
  499. local64_add(delta, &event->count);
  500. local64_sub(delta, &hwc->period_left);
  501. return new_raw_count;
  502. }
  503. /*
  504. * Set the next IRQ period, based on the hwc->period_left value.
  505. * To be called with the event disabled in hw:
  506. */
  507. static int tile_event_set_period(struct perf_event *event)
  508. {
  509. struct hw_perf_event *hwc = &event->hw;
  510. int idx = hwc->idx;
  511. s64 left = local64_read(&hwc->period_left);
  512. s64 period = hwc->sample_period;
  513. int ret = 0;
  514. /*
  515. * If we are way outside a reasonable range then just skip forward:
  516. */
  517. if (unlikely(left <= -period)) {
  518. left = period;
  519. local64_set(&hwc->period_left, left);
  520. hwc->last_period = period;
  521. ret = 1;
  522. }
  523. if (unlikely(left <= 0)) {
  524. left += period;
  525. local64_set(&hwc->period_left, left);
  526. hwc->last_period = period;
  527. ret = 1;
  528. }
  529. if (left > tile_pmu->max_period)
  530. left = tile_pmu->max_period;
  531. /*
  532. * The hw event starts counting from this event offset,
  533. * mark it to be able to extra future deltas:
  534. */
  535. local64_set(&hwc->prev_count, (u64)-left);
  536. write_counter(idx, (u64)(-left) & tile_pmu->cntval_mask);
  537. perf_event_update_userpage(event);
  538. return ret;
  539. }
  540. /*
  541. * Stop the event but do not release the PMU counter
  542. */
  543. static void tile_pmu_stop(struct perf_event *event, int flags)
  544. {
  545. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  546. struct hw_perf_event *hwc = &event->hw;
  547. int idx = hwc->idx;
  548. if (__test_and_clear_bit(idx, cpuc->active_mask)) {
  549. tile_pmu_disable_event(event);
  550. cpuc->events[hwc->idx] = NULL;
  551. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  552. hwc->state |= PERF_HES_STOPPED;
  553. }
  554. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  555. /*
  556. * Drain the remaining delta count out of a event
  557. * that we are disabling:
  558. */
  559. tile_perf_event_update(event);
  560. hwc->state |= PERF_HES_UPTODATE;
  561. }
  562. }
  563. /*
  564. * Start an event (without re-assigning counter)
  565. */
  566. static void tile_pmu_start(struct perf_event *event, int flags)
  567. {
  568. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  569. int idx = event->hw.idx;
  570. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  571. return;
  572. if (WARN_ON_ONCE(idx == -1))
  573. return;
  574. if (flags & PERF_EF_RELOAD) {
  575. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  576. tile_event_set_period(event);
  577. }
  578. event->hw.state = 0;
  579. cpuc->events[idx] = event;
  580. __set_bit(idx, cpuc->active_mask);
  581. unmask_pmc_interrupts();
  582. tile_pmu_enable_event(event);
  583. perf_event_update_userpage(event);
  584. }
  585. /*
  586. * Add a single event to the PMU.
  587. *
  588. * The event is added to the group of enabled events
  589. * but only if it can be scehduled with existing events.
  590. */
  591. static int tile_pmu_add(struct perf_event *event, int flags)
  592. {
  593. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  594. struct hw_perf_event *hwc;
  595. unsigned long mask;
  596. int b, max_cnt;
  597. hwc = &event->hw;
  598. /*
  599. * We are full.
  600. */
  601. if (cpuc->n_events == tile_pmu->num_counters)
  602. return -ENOSPC;
  603. cpuc->event_list[cpuc->n_events] = event;
  604. cpuc->n_events++;
  605. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  606. if (!(flags & PERF_EF_START))
  607. hwc->state |= PERF_HES_ARCH;
  608. /*
  609. * Find first empty counter.
  610. */
  611. max_cnt = tile_pmu->num_counters;
  612. mask = ~cpuc->used_mask;
  613. /* Find next free counter. */
  614. b = find_next_bit(&mask, max_cnt, 0);
  615. /* Should not happen. */
  616. if (WARN_ON_ONCE(b == max_cnt))
  617. return -ENOSPC;
  618. /*
  619. * Assign counter to event.
  620. */
  621. event->hw.idx = b;
  622. __set_bit(b, &cpuc->used_mask);
  623. /*
  624. * Start if requested.
  625. */
  626. if (flags & PERF_EF_START)
  627. tile_pmu_start(event, PERF_EF_RELOAD);
  628. return 0;
  629. }
  630. /*
  631. * Delete a single event from the PMU.
  632. *
  633. * The event is deleted from the group of enabled events.
  634. * If it is the last event, disable PMU interrupt.
  635. */
  636. static void tile_pmu_del(struct perf_event *event, int flags)
  637. {
  638. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  639. int i;
  640. /*
  641. * Remove event from list, compact list if necessary.
  642. */
  643. for (i = 0; i < cpuc->n_events; i++) {
  644. if (cpuc->event_list[i] == event) {
  645. while (++i < cpuc->n_events)
  646. cpuc->event_list[i-1] = cpuc->event_list[i];
  647. --cpuc->n_events;
  648. cpuc->events[event->hw.idx] = NULL;
  649. __clear_bit(event->hw.idx, &cpuc->used_mask);
  650. tile_pmu_stop(event, PERF_EF_UPDATE);
  651. break;
  652. }
  653. }
  654. /*
  655. * If there are no events left, then mask PMU interrupt.
  656. */
  657. if (cpuc->n_events == 0)
  658. mask_pmc_interrupts();
  659. perf_event_update_userpage(event);
  660. }
  661. /*
  662. * Propagate event elapsed time into the event.
  663. */
  664. static inline void tile_pmu_read(struct perf_event *event)
  665. {
  666. tile_perf_event_update(event);
  667. }
  668. /*
  669. * Map generic events to Tile PMU.
  670. */
  671. static int tile_map_hw_event(u64 config)
  672. {
  673. if (config >= tile_pmu->max_events)
  674. return -EINVAL;
  675. return tile_pmu->hw_events[config];
  676. }
  677. /*
  678. * Map generic hardware cache events to Tile PMU.
  679. */
  680. static int tile_map_cache_event(u64 config)
  681. {
  682. unsigned int cache_type, cache_op, cache_result;
  683. int code;
  684. if (!tile_pmu->cache_events)
  685. return -ENOENT;
  686. cache_type = (config >> 0) & 0xff;
  687. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  688. return -EINVAL;
  689. cache_op = (config >> 8) & 0xff;
  690. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  691. return -EINVAL;
  692. cache_result = (config >> 16) & 0xff;
  693. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  694. return -EINVAL;
  695. code = (*tile_pmu->cache_events)[cache_type][cache_op][cache_result];
  696. if (code == TILE_OP_UNSUPP)
  697. return -EINVAL;
  698. return code;
  699. }
  700. static void tile_event_destroy(struct perf_event *event)
  701. {
  702. if (atomic_dec_return(&tile_active_events) == 0)
  703. release_pmc_hardware();
  704. }
  705. static int __tile_event_init(struct perf_event *event)
  706. {
  707. struct perf_event_attr *attr = &event->attr;
  708. struct hw_perf_event *hwc = &event->hw;
  709. int code;
  710. switch (attr->type) {
  711. case PERF_TYPE_HARDWARE:
  712. code = tile_pmu->map_hw_event(attr->config);
  713. break;
  714. case PERF_TYPE_HW_CACHE:
  715. code = tile_pmu->map_cache_event(attr->config);
  716. break;
  717. case PERF_TYPE_RAW:
  718. code = attr->config & TILE_EVENT_MASK;
  719. break;
  720. default:
  721. /* Should not happen. */
  722. return -EOPNOTSUPP;
  723. }
  724. if (code < 0)
  725. return code;
  726. hwc->config = code;
  727. hwc->idx = -1;
  728. if (attr->exclude_user)
  729. hwc->config |= TILE_CTL_EXCL_USER;
  730. if (attr->exclude_kernel)
  731. hwc->config |= TILE_CTL_EXCL_KERNEL;
  732. if (attr->exclude_hv)
  733. hwc->config |= TILE_CTL_EXCL_HV;
  734. if (!hwc->sample_period) {
  735. hwc->sample_period = tile_pmu->max_period;
  736. hwc->last_period = hwc->sample_period;
  737. local64_set(&hwc->period_left, hwc->sample_period);
  738. }
  739. event->destroy = tile_event_destroy;
  740. return 0;
  741. }
  742. static int tile_event_init(struct perf_event *event)
  743. {
  744. int err = 0;
  745. perf_irq_t old_irq_handler = NULL;
  746. if (atomic_inc_return(&tile_active_events) == 1)
  747. old_irq_handler = reserve_pmc_hardware(tile_pmu_handle_irq);
  748. if (old_irq_handler) {
  749. pr_warn("PMC hardware busy (reserved by oprofile)\n");
  750. atomic_dec(&tile_active_events);
  751. return -EBUSY;
  752. }
  753. switch (event->attr.type) {
  754. case PERF_TYPE_RAW:
  755. case PERF_TYPE_HARDWARE:
  756. case PERF_TYPE_HW_CACHE:
  757. break;
  758. default:
  759. return -ENOENT;
  760. }
  761. err = __tile_event_init(event);
  762. if (err) {
  763. if (event->destroy)
  764. event->destroy(event);
  765. }
  766. return err;
  767. }
  768. static struct pmu tilera_pmu = {
  769. .event_init = tile_event_init,
  770. .add = tile_pmu_add,
  771. .del = tile_pmu_del,
  772. .start = tile_pmu_start,
  773. .stop = tile_pmu_stop,
  774. .read = tile_pmu_read,
  775. };
  776. /*
  777. * PMU's IRQ handler, PMU has 2 interrupts, they share the same handler.
  778. */
  779. int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
  780. {
  781. struct perf_sample_data data;
  782. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  783. struct perf_event *event;
  784. struct hw_perf_event *hwc;
  785. u64 val;
  786. unsigned long status;
  787. int bit;
  788. __this_cpu_inc(perf_irqs);
  789. if (!atomic_read(&tile_active_events))
  790. return 0;
  791. status = pmc_get_overflow();
  792. pmc_ack_overflow(status);
  793. for_each_set_bit(bit, &status, tile_pmu->num_counters) {
  794. event = cpuc->events[bit];
  795. if (!event)
  796. continue;
  797. if (!test_bit(bit, cpuc->active_mask))
  798. continue;
  799. hwc = &event->hw;
  800. val = tile_perf_event_update(event);
  801. if (val & (1ULL << (tile_pmu->cntval_bits - 1)))
  802. continue;
  803. perf_sample_data_init(&data, 0, event->hw.last_period);
  804. if (!tile_event_set_period(event))
  805. continue;
  806. if (perf_event_overflow(event, &data, regs))
  807. tile_pmu_stop(event, 0);
  808. }
  809. return 0;
  810. }
  811. static bool __init supported_pmu(void)
  812. {
  813. tile_pmu = &tilepmu;
  814. return true;
  815. }
  816. int __init init_hw_perf_events(void)
  817. {
  818. supported_pmu();
  819. perf_pmu_register(&tilera_pmu, "cpu", PERF_TYPE_RAW);
  820. return 0;
  821. }
  822. arch_initcall(init_hw_perf_events);
  823. /* Callchain handling code. */
  824. /*
  825. * Tile specific backtracing code for perf_events.
  826. */
  827. static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
  828. struct pt_regs *regs)
  829. {
  830. struct KBacktraceIterator kbt;
  831. unsigned int i;
  832. /*
  833. * Get the address just after the "jalr" instruction that
  834. * jumps to the handler for a syscall. When we find this
  835. * address in a backtrace, we silently ignore it, which gives
  836. * us a one-step backtrace connection from the sys_xxx()
  837. * function in the kernel to the xxx() function in libc.
  838. * Otherwise, we lose the ability to properly attribute time
  839. * from the libc calls to the kernel implementations, since
  840. * oprofile only considers PCs from backtraces a pair at a time.
  841. */
  842. unsigned long handle_syscall_pc = handle_syscall_link_address();
  843. KBacktraceIterator_init(&kbt, NULL, regs);
  844. kbt.profile = 1;
  845. /*
  846. * The sample for the pc is already recorded. Now we are adding the
  847. * address of the callsites on the stack. Our iterator starts
  848. * with the frame of the (already sampled) call site. If our
  849. * iterator contained a "return address" field, we could have just
  850. * used it and wouldn't have needed to skip the first
  851. * frame. That's in effect what the arm and x86 versions do.
  852. * Instead we peel off the first iteration to get the equivalent
  853. * behavior.
  854. */
  855. if (KBacktraceIterator_end(&kbt))
  856. return;
  857. KBacktraceIterator_next(&kbt);
  858. /*
  859. * Set stack depth to 16 for user and kernel space respectively, that
  860. * is, total 32 stack frames.
  861. */
  862. for (i = 0; i < 16; ++i) {
  863. unsigned long pc;
  864. if (KBacktraceIterator_end(&kbt))
  865. break;
  866. pc = kbt.it.pc;
  867. if (pc != handle_syscall_pc)
  868. perf_callchain_store(entry, pc);
  869. KBacktraceIterator_next(&kbt);
  870. }
  871. }
  872. void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
  873. struct pt_regs *regs)
  874. {
  875. perf_callchain(entry, regs);
  876. }
  877. void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
  878. struct pt_regs *regs)
  879. {
  880. perf_callchain(entry, regs);
  881. }