uncore.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. /*
  2. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Jacob Shin <jacob.shin@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/perf_event.h>
  11. #include <linux/percpu.h>
  12. #include <linux/types.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/cpu.h>
  16. #include <linux/cpumask.h>
  17. #include <asm/cpufeature.h>
  18. #include <asm/perf_event.h>
  19. #include <asm/msr.h>
  20. #define NUM_COUNTERS_NB 4
  21. #define NUM_COUNTERS_L2 4
  22. #define MAX_COUNTERS NUM_COUNTERS_NB
  23. #define RDPMC_BASE_NB 6
  24. #define RDPMC_BASE_L2 10
  25. #define COUNTER_SHIFT 16
  26. static HLIST_HEAD(uncore_unused_list);
  27. struct amd_uncore {
  28. int id;
  29. int refcnt;
  30. int cpu;
  31. int num_counters;
  32. int rdpmc_base;
  33. u32 msr_base;
  34. cpumask_t *active_mask;
  35. struct pmu *pmu;
  36. struct perf_event *events[MAX_COUNTERS];
  37. struct hlist_node node;
  38. };
  39. static struct amd_uncore * __percpu *amd_uncore_nb;
  40. static struct amd_uncore * __percpu *amd_uncore_l2;
  41. static struct pmu amd_nb_pmu;
  42. static struct pmu amd_l2_pmu;
  43. static cpumask_t amd_nb_active_mask;
  44. static cpumask_t amd_l2_active_mask;
  45. static bool is_nb_event(struct perf_event *event)
  46. {
  47. return event->pmu->type == amd_nb_pmu.type;
  48. }
  49. static bool is_l2_event(struct perf_event *event)
  50. {
  51. return event->pmu->type == amd_l2_pmu.type;
  52. }
  53. static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
  54. {
  55. if (is_nb_event(event) && amd_uncore_nb)
  56. return *per_cpu_ptr(amd_uncore_nb, event->cpu);
  57. else if (is_l2_event(event) && amd_uncore_l2)
  58. return *per_cpu_ptr(amd_uncore_l2, event->cpu);
  59. return NULL;
  60. }
  61. static void amd_uncore_read(struct perf_event *event)
  62. {
  63. struct hw_perf_event *hwc = &event->hw;
  64. u64 prev, new;
  65. s64 delta;
  66. /*
  67. * since we do not enable counter overflow interrupts,
  68. * we do not have to worry about prev_count changing on us
  69. */
  70. prev = local64_read(&hwc->prev_count);
  71. rdpmcl(hwc->event_base_rdpmc, new);
  72. local64_set(&hwc->prev_count, new);
  73. delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
  74. delta >>= COUNTER_SHIFT;
  75. local64_add(delta, &event->count);
  76. }
  77. static void amd_uncore_start(struct perf_event *event, int flags)
  78. {
  79. struct hw_perf_event *hwc = &event->hw;
  80. if (flags & PERF_EF_RELOAD)
  81. wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
  82. hwc->state = 0;
  83. wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
  84. perf_event_update_userpage(event);
  85. }
  86. static void amd_uncore_stop(struct perf_event *event, int flags)
  87. {
  88. struct hw_perf_event *hwc = &event->hw;
  89. wrmsrl(hwc->config_base, hwc->config);
  90. hwc->state |= PERF_HES_STOPPED;
  91. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  92. amd_uncore_read(event);
  93. hwc->state |= PERF_HES_UPTODATE;
  94. }
  95. }
  96. static int amd_uncore_add(struct perf_event *event, int flags)
  97. {
  98. int i;
  99. struct amd_uncore *uncore = event_to_amd_uncore(event);
  100. struct hw_perf_event *hwc = &event->hw;
  101. /* are we already assigned? */
  102. if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
  103. goto out;
  104. for (i = 0; i < uncore->num_counters; i++) {
  105. if (uncore->events[i] == event) {
  106. hwc->idx = i;
  107. goto out;
  108. }
  109. }
  110. /* if not, take the first available counter */
  111. hwc->idx = -1;
  112. for (i = 0; i < uncore->num_counters; i++) {
  113. if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
  114. hwc->idx = i;
  115. break;
  116. }
  117. }
  118. out:
  119. if (hwc->idx == -1)
  120. return -EBUSY;
  121. hwc->config_base = uncore->msr_base + (2 * hwc->idx);
  122. hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
  123. hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
  124. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  125. if (flags & PERF_EF_START)
  126. amd_uncore_start(event, PERF_EF_RELOAD);
  127. return 0;
  128. }
  129. static void amd_uncore_del(struct perf_event *event, int flags)
  130. {
  131. int i;
  132. struct amd_uncore *uncore = event_to_amd_uncore(event);
  133. struct hw_perf_event *hwc = &event->hw;
  134. amd_uncore_stop(event, PERF_EF_UPDATE);
  135. for (i = 0; i < uncore->num_counters; i++) {
  136. if (cmpxchg(&uncore->events[i], event, NULL) == event)
  137. break;
  138. }
  139. hwc->idx = -1;
  140. }
  141. static int amd_uncore_event_init(struct perf_event *event)
  142. {
  143. struct amd_uncore *uncore;
  144. struct hw_perf_event *hwc = &event->hw;
  145. if (event->attr.type != event->pmu->type)
  146. return -ENOENT;
  147. /*
  148. * NB and L2 counters (MSRs) are shared across all cores that share the
  149. * same NB / L2 cache. Interrupts can be directed to a single target
  150. * core, however, event counts generated by processes running on other
  151. * cores cannot be masked out. So we do not support sampling and
  152. * per-thread events.
  153. */
  154. if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
  155. return -EINVAL;
  156. /* NB and L2 counters do not have usr/os/guest/host bits */
  157. if (event->attr.exclude_user || event->attr.exclude_kernel ||
  158. event->attr.exclude_host || event->attr.exclude_guest)
  159. return -EINVAL;
  160. /* and we do not enable counter overflow interrupts */
  161. hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
  162. hwc->idx = -1;
  163. if (event->cpu < 0)
  164. return -EINVAL;
  165. uncore = event_to_amd_uncore(event);
  166. if (!uncore)
  167. return -ENODEV;
  168. /*
  169. * since request can come in to any of the shared cores, we will remap
  170. * to a single common cpu.
  171. */
  172. event->cpu = uncore->cpu;
  173. return 0;
  174. }
  175. static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
  176. struct device_attribute *attr,
  177. char *buf)
  178. {
  179. cpumask_t *active_mask;
  180. struct pmu *pmu = dev_get_drvdata(dev);
  181. if (pmu->type == amd_nb_pmu.type)
  182. active_mask = &amd_nb_active_mask;
  183. else if (pmu->type == amd_l2_pmu.type)
  184. active_mask = &amd_l2_active_mask;
  185. else
  186. return 0;
  187. return cpumap_print_to_pagebuf(true, buf, active_mask);
  188. }
  189. static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
  190. static struct attribute *amd_uncore_attrs[] = {
  191. &dev_attr_cpumask.attr,
  192. NULL,
  193. };
  194. static struct attribute_group amd_uncore_attr_group = {
  195. .attrs = amd_uncore_attrs,
  196. };
  197. PMU_FORMAT_ATTR(event, "config:0-7,32-35");
  198. PMU_FORMAT_ATTR(umask, "config:8-15");
  199. static struct attribute *amd_uncore_format_attr[] = {
  200. &format_attr_event.attr,
  201. &format_attr_umask.attr,
  202. NULL,
  203. };
  204. static struct attribute_group amd_uncore_format_group = {
  205. .name = "format",
  206. .attrs = amd_uncore_format_attr,
  207. };
  208. static const struct attribute_group *amd_uncore_attr_groups[] = {
  209. &amd_uncore_attr_group,
  210. &amd_uncore_format_group,
  211. NULL,
  212. };
  213. static struct pmu amd_nb_pmu = {
  214. .task_ctx_nr = perf_invalid_context,
  215. .attr_groups = amd_uncore_attr_groups,
  216. .name = "amd_nb",
  217. .event_init = amd_uncore_event_init,
  218. .add = amd_uncore_add,
  219. .del = amd_uncore_del,
  220. .start = amd_uncore_start,
  221. .stop = amd_uncore_stop,
  222. .read = amd_uncore_read,
  223. };
  224. static struct pmu amd_l2_pmu = {
  225. .task_ctx_nr = perf_invalid_context,
  226. .attr_groups = amd_uncore_attr_groups,
  227. .name = "amd_l2",
  228. .event_init = amd_uncore_event_init,
  229. .add = amd_uncore_add,
  230. .del = amd_uncore_del,
  231. .start = amd_uncore_start,
  232. .stop = amd_uncore_stop,
  233. .read = amd_uncore_read,
  234. };
  235. static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
  236. {
  237. return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
  238. cpu_to_node(cpu));
  239. }
  240. static int amd_uncore_cpu_up_prepare(unsigned int cpu)
  241. {
  242. struct amd_uncore *uncore_nb = NULL, *uncore_l2;
  243. if (amd_uncore_nb) {
  244. uncore_nb = amd_uncore_alloc(cpu);
  245. if (!uncore_nb)
  246. goto fail;
  247. uncore_nb->cpu = cpu;
  248. uncore_nb->num_counters = NUM_COUNTERS_NB;
  249. uncore_nb->rdpmc_base = RDPMC_BASE_NB;
  250. uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
  251. uncore_nb->active_mask = &amd_nb_active_mask;
  252. uncore_nb->pmu = &amd_nb_pmu;
  253. uncore_nb->id = -1;
  254. *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
  255. }
  256. if (amd_uncore_l2) {
  257. uncore_l2 = amd_uncore_alloc(cpu);
  258. if (!uncore_l2)
  259. goto fail;
  260. uncore_l2->cpu = cpu;
  261. uncore_l2->num_counters = NUM_COUNTERS_L2;
  262. uncore_l2->rdpmc_base = RDPMC_BASE_L2;
  263. uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
  264. uncore_l2->active_mask = &amd_l2_active_mask;
  265. uncore_l2->pmu = &amd_l2_pmu;
  266. uncore_l2->id = -1;
  267. *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
  268. }
  269. return 0;
  270. fail:
  271. if (amd_uncore_nb)
  272. *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
  273. kfree(uncore_nb);
  274. return -ENOMEM;
  275. }
  276. static struct amd_uncore *
  277. amd_uncore_find_online_sibling(struct amd_uncore *this,
  278. struct amd_uncore * __percpu *uncores)
  279. {
  280. unsigned int cpu;
  281. struct amd_uncore *that;
  282. for_each_online_cpu(cpu) {
  283. that = *per_cpu_ptr(uncores, cpu);
  284. if (!that)
  285. continue;
  286. if (this == that)
  287. continue;
  288. if (this->id == that->id) {
  289. hlist_add_head(&this->node, &uncore_unused_list);
  290. this = that;
  291. break;
  292. }
  293. }
  294. this->refcnt++;
  295. return this;
  296. }
  297. static int amd_uncore_cpu_starting(unsigned int cpu)
  298. {
  299. unsigned int eax, ebx, ecx, edx;
  300. struct amd_uncore *uncore;
  301. if (amd_uncore_nb) {
  302. uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
  303. cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
  304. uncore->id = ecx & 0xff;
  305. uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
  306. *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
  307. }
  308. if (amd_uncore_l2) {
  309. unsigned int apicid = cpu_data(cpu).apicid;
  310. unsigned int nshared;
  311. uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
  312. cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
  313. nshared = ((eax >> 14) & 0xfff) + 1;
  314. uncore->id = apicid - (apicid % nshared);
  315. uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
  316. *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
  317. }
  318. return 0;
  319. }
  320. static void uncore_clean_online(void)
  321. {
  322. struct amd_uncore *uncore;
  323. struct hlist_node *n;
  324. hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
  325. hlist_del(&uncore->node);
  326. kfree(uncore);
  327. }
  328. }
  329. static void uncore_online(unsigned int cpu,
  330. struct amd_uncore * __percpu *uncores)
  331. {
  332. struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
  333. uncore_clean_online();
  334. if (cpu == uncore->cpu)
  335. cpumask_set_cpu(cpu, uncore->active_mask);
  336. }
  337. static int amd_uncore_cpu_online(unsigned int cpu)
  338. {
  339. if (amd_uncore_nb)
  340. uncore_online(cpu, amd_uncore_nb);
  341. if (amd_uncore_l2)
  342. uncore_online(cpu, amd_uncore_l2);
  343. return 0;
  344. }
  345. static void uncore_down_prepare(unsigned int cpu,
  346. struct amd_uncore * __percpu *uncores)
  347. {
  348. unsigned int i;
  349. struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
  350. if (this->cpu != cpu)
  351. return;
  352. /* this cpu is going down, migrate to a shared sibling if possible */
  353. for_each_online_cpu(i) {
  354. struct amd_uncore *that = *per_cpu_ptr(uncores, i);
  355. if (cpu == i)
  356. continue;
  357. if (this == that) {
  358. perf_pmu_migrate_context(this->pmu, cpu, i);
  359. cpumask_clear_cpu(cpu, that->active_mask);
  360. cpumask_set_cpu(i, that->active_mask);
  361. that->cpu = i;
  362. break;
  363. }
  364. }
  365. }
  366. static int amd_uncore_cpu_down_prepare(unsigned int cpu)
  367. {
  368. if (amd_uncore_nb)
  369. uncore_down_prepare(cpu, amd_uncore_nb);
  370. if (amd_uncore_l2)
  371. uncore_down_prepare(cpu, amd_uncore_l2);
  372. return 0;
  373. }
  374. static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
  375. {
  376. struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
  377. if (cpu == uncore->cpu)
  378. cpumask_clear_cpu(cpu, uncore->active_mask);
  379. if (!--uncore->refcnt)
  380. kfree(uncore);
  381. *per_cpu_ptr(uncores, cpu) = NULL;
  382. }
  383. static int amd_uncore_cpu_dead(unsigned int cpu)
  384. {
  385. if (amd_uncore_nb)
  386. uncore_dead(cpu, amd_uncore_nb);
  387. if (amd_uncore_l2)
  388. uncore_dead(cpu, amd_uncore_l2);
  389. return 0;
  390. }
  391. static int __init amd_uncore_init(void)
  392. {
  393. int ret = -ENODEV;
  394. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
  395. goto fail_nodev;
  396. if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
  397. goto fail_nodev;
  398. if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
  399. amd_uncore_nb = alloc_percpu(struct amd_uncore *);
  400. if (!amd_uncore_nb) {
  401. ret = -ENOMEM;
  402. goto fail_nb;
  403. }
  404. ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
  405. if (ret)
  406. goto fail_nb;
  407. pr_info("perf: AMD NB counters detected\n");
  408. ret = 0;
  409. }
  410. if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
  411. amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
  412. if (!amd_uncore_l2) {
  413. ret = -ENOMEM;
  414. goto fail_l2;
  415. }
  416. ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
  417. if (ret)
  418. goto fail_l2;
  419. pr_info("perf: AMD L2I counters detected\n");
  420. ret = 0;
  421. }
  422. /*
  423. * Install callbacks. Core will call them for each online cpu.
  424. */
  425. if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
  426. "PERF_X86_AMD_UNCORE_PREP",
  427. amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
  428. goto fail_l2;
  429. if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
  430. "AP_PERF_X86_AMD_UNCORE_STARTING",
  431. amd_uncore_cpu_starting, NULL))
  432. goto fail_prep;
  433. if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
  434. "AP_PERF_X86_AMD_UNCORE_ONLINE",
  435. amd_uncore_cpu_online,
  436. amd_uncore_cpu_down_prepare))
  437. goto fail_start;
  438. return 0;
  439. fail_start:
  440. cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
  441. fail_prep:
  442. cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
  443. fail_l2:
  444. if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
  445. perf_pmu_unregister(&amd_nb_pmu);
  446. if (amd_uncore_l2)
  447. free_percpu(amd_uncore_l2);
  448. fail_nb:
  449. if (amd_uncore_nb)
  450. free_percpu(amd_uncore_nb);
  451. fail_nodev:
  452. return ret;
  453. }
  454. device_initcall(amd_uncore_init);