uncore_snb.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
  2. #include "uncore.h"
  3. /* Uncore IMC PCI IDs */
  4. #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
  5. #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
  6. #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
  7. #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
  8. #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
  9. #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
  10. #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
  11. #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
  12. #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
  13. #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
  14. #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
  15. #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
  16. /* SNB event control */
  17. #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
  18. #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
  19. #define SNB_UNC_CTL_EDGE_DET (1 << 18)
  20. #define SNB_UNC_CTL_EN (1 << 22)
  21. #define SNB_UNC_CTL_INVERT (1 << 23)
  22. #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
  23. #define NHM_UNC_CTL_CMASK_MASK 0xff000000
  24. #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
  25. #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
  26. SNB_UNC_CTL_UMASK_MASK | \
  27. SNB_UNC_CTL_EDGE_DET | \
  28. SNB_UNC_CTL_INVERT | \
  29. SNB_UNC_CTL_CMASK_MASK)
  30. #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
  31. SNB_UNC_CTL_UMASK_MASK | \
  32. SNB_UNC_CTL_EDGE_DET | \
  33. SNB_UNC_CTL_INVERT | \
  34. NHM_UNC_CTL_CMASK_MASK)
  35. /* SNB global control register */
  36. #define SNB_UNC_PERF_GLOBAL_CTL 0x391
  37. #define SNB_UNC_FIXED_CTR_CTRL 0x394
  38. #define SNB_UNC_FIXED_CTR 0x395
  39. /* SNB uncore global control */
  40. #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
  41. #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
  42. /* SNB Cbo register */
  43. #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
  44. #define SNB_UNC_CBO_0_PER_CTR0 0x706
  45. #define SNB_UNC_CBO_MSR_OFFSET 0x10
  46. /* SNB ARB register */
  47. #define SNB_UNC_ARB_PER_CTR0 0x3b0
  48. #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
  49. #define SNB_UNC_ARB_MSR_OFFSET 0x10
  50. /* NHM global control register */
  51. #define NHM_UNC_PERF_GLOBAL_CTL 0x391
  52. #define NHM_UNC_FIXED_CTR 0x394
  53. #define NHM_UNC_FIXED_CTR_CTRL 0x395
  54. /* NHM uncore global control */
  55. #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
  56. #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
  57. /* NHM uncore register */
  58. #define NHM_UNC_PERFEVTSEL0 0x3c0
  59. #define NHM_UNC_UNCORE_PMC0 0x3b0
  60. /* SKL uncore global control */
  61. #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
  62. #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
  63. DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
  64. DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
  65. DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
  66. DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
  67. DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
  68. DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
  69. /* Sandy Bridge uncore support */
  70. static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  71. {
  72. struct hw_perf_event *hwc = &event->hw;
  73. if (hwc->idx < UNCORE_PMC_IDX_FIXED)
  74. wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
  75. else
  76. wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
  77. }
  78. static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  79. {
  80. wrmsrl(event->hw.config_base, 0);
  81. }
  82. static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
  83. {
  84. if (box->pmu->pmu_idx == 0) {
  85. wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
  86. SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
  87. }
  88. }
  89. static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
  90. {
  91. wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
  92. SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
  93. }
  94. static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
  95. {
  96. if (box->pmu->pmu_idx == 0)
  97. wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
  98. }
  99. static struct uncore_event_desc snb_uncore_events[] = {
  100. INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
  101. { /* end: all zeroes */ },
  102. };
  103. static struct attribute *snb_uncore_formats_attr[] = {
  104. &format_attr_event.attr,
  105. &format_attr_umask.attr,
  106. &format_attr_edge.attr,
  107. &format_attr_inv.attr,
  108. &format_attr_cmask5.attr,
  109. NULL,
  110. };
  111. static struct attribute_group snb_uncore_format_group = {
  112. .name = "format",
  113. .attrs = snb_uncore_formats_attr,
  114. };
  115. static struct intel_uncore_ops snb_uncore_msr_ops = {
  116. .init_box = snb_uncore_msr_init_box,
  117. .enable_box = snb_uncore_msr_enable_box,
  118. .exit_box = snb_uncore_msr_exit_box,
  119. .disable_event = snb_uncore_msr_disable_event,
  120. .enable_event = snb_uncore_msr_enable_event,
  121. .read_counter = uncore_msr_read_counter,
  122. };
  123. static struct event_constraint snb_uncore_arb_constraints[] = {
  124. UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
  125. UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
  126. EVENT_CONSTRAINT_END
  127. };
  128. static struct intel_uncore_type snb_uncore_cbox = {
  129. .name = "cbox",
  130. .num_counters = 2,
  131. .num_boxes = 4,
  132. .perf_ctr_bits = 44,
  133. .fixed_ctr_bits = 48,
  134. .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
  135. .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
  136. .fixed_ctr = SNB_UNC_FIXED_CTR,
  137. .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
  138. .single_fixed = 1,
  139. .event_mask = SNB_UNC_RAW_EVENT_MASK,
  140. .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
  141. .ops = &snb_uncore_msr_ops,
  142. .format_group = &snb_uncore_format_group,
  143. .event_descs = snb_uncore_events,
  144. };
  145. static struct intel_uncore_type snb_uncore_arb = {
  146. .name = "arb",
  147. .num_counters = 2,
  148. .num_boxes = 1,
  149. .perf_ctr_bits = 44,
  150. .perf_ctr = SNB_UNC_ARB_PER_CTR0,
  151. .event_ctl = SNB_UNC_ARB_PERFEVTSEL0,
  152. .event_mask = SNB_UNC_RAW_EVENT_MASK,
  153. .msr_offset = SNB_UNC_ARB_MSR_OFFSET,
  154. .constraints = snb_uncore_arb_constraints,
  155. .ops = &snb_uncore_msr_ops,
  156. .format_group = &snb_uncore_format_group,
  157. };
  158. static struct intel_uncore_type *snb_msr_uncores[] = {
  159. &snb_uncore_cbox,
  160. &snb_uncore_arb,
  161. NULL,
  162. };
  163. void snb_uncore_cpu_init(void)
  164. {
  165. uncore_msr_uncores = snb_msr_uncores;
  166. if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  167. snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  168. }
  169. static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
  170. {
  171. if (box->pmu->pmu_idx == 0) {
  172. wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
  173. SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
  174. }
  175. }
  176. static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
  177. {
  178. wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
  179. SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
  180. }
  181. static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
  182. {
  183. if (box->pmu->pmu_idx == 0)
  184. wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
  185. }
  186. static struct intel_uncore_ops skl_uncore_msr_ops = {
  187. .init_box = skl_uncore_msr_init_box,
  188. .enable_box = skl_uncore_msr_enable_box,
  189. .exit_box = skl_uncore_msr_exit_box,
  190. .disable_event = snb_uncore_msr_disable_event,
  191. .enable_event = snb_uncore_msr_enable_event,
  192. .read_counter = uncore_msr_read_counter,
  193. };
  194. static struct intel_uncore_type skl_uncore_cbox = {
  195. .name = "cbox",
  196. .num_counters = 4,
  197. .num_boxes = 5,
  198. .perf_ctr_bits = 44,
  199. .fixed_ctr_bits = 48,
  200. .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
  201. .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
  202. .fixed_ctr = SNB_UNC_FIXED_CTR,
  203. .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
  204. .single_fixed = 1,
  205. .event_mask = SNB_UNC_RAW_EVENT_MASK,
  206. .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
  207. .ops = &skl_uncore_msr_ops,
  208. .format_group = &snb_uncore_format_group,
  209. .event_descs = snb_uncore_events,
  210. };
  211. static struct intel_uncore_type *skl_msr_uncores[] = {
  212. &skl_uncore_cbox,
  213. &snb_uncore_arb,
  214. NULL,
  215. };
  216. void skl_uncore_cpu_init(void)
  217. {
  218. uncore_msr_uncores = skl_msr_uncores;
  219. if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  220. skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  221. snb_uncore_arb.ops = &skl_uncore_msr_ops;
  222. }
  223. enum {
  224. SNB_PCI_UNCORE_IMC,
  225. };
  226. static struct uncore_event_desc snb_uncore_imc_events[] = {
  227. INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
  228. INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
  229. INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
  230. INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
  231. INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
  232. INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
  233. { /* end: all zeroes */ },
  234. };
  235. #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
  236. #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
  237. /* page size multiple covering all config regs */
  238. #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
  239. #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
  240. #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
  241. #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
  242. #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
  243. #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
  244. static struct attribute *snb_uncore_imc_formats_attr[] = {
  245. &format_attr_event.attr,
  246. NULL,
  247. };
  248. static struct attribute_group snb_uncore_imc_format_group = {
  249. .name = "format",
  250. .attrs = snb_uncore_imc_formats_attr,
  251. };
  252. static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
  253. {
  254. struct pci_dev *pdev = box->pci_dev;
  255. int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
  256. resource_size_t addr;
  257. u32 pci_dword;
  258. pci_read_config_dword(pdev, where, &pci_dword);
  259. addr = pci_dword;
  260. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  261. pci_read_config_dword(pdev, where + 4, &pci_dword);
  262. addr |= ((resource_size_t)pci_dword << 32);
  263. #endif
  264. addr &= ~(PAGE_SIZE - 1);
  265. box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
  266. box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
  267. }
  268. static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
  269. {
  270. iounmap(box->io_addr);
  271. }
  272. static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
  273. {}
  274. static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
  275. {}
  276. static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  277. {}
  278. static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  279. {}
  280. static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  281. {
  282. struct hw_perf_event *hwc = &event->hw;
  283. return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
  284. }
  285. /*
  286. * custom event_init() function because we define our own fixed, free
  287. * running counters, so we do not want to conflict with generic uncore
  288. * logic. Also simplifies processing
  289. */
  290. static int snb_uncore_imc_event_init(struct perf_event *event)
  291. {
  292. struct intel_uncore_pmu *pmu;
  293. struct intel_uncore_box *box;
  294. struct hw_perf_event *hwc = &event->hw;
  295. u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
  296. int idx, base;
  297. if (event->attr.type != event->pmu->type)
  298. return -ENOENT;
  299. pmu = uncore_event_to_pmu(event);
  300. /* no device found for this pmu */
  301. if (pmu->func_id < 0)
  302. return -ENOENT;
  303. /* Sampling not supported yet */
  304. if (hwc->sample_period)
  305. return -EINVAL;
  306. /* unsupported modes and filters */
  307. if (event->attr.exclude_user ||
  308. event->attr.exclude_kernel ||
  309. event->attr.exclude_hv ||
  310. event->attr.exclude_idle ||
  311. event->attr.exclude_host ||
  312. event->attr.exclude_guest ||
  313. event->attr.sample_period) /* no sampling */
  314. return -EINVAL;
  315. /*
  316. * Place all uncore events for a particular physical package
  317. * onto a single cpu
  318. */
  319. if (event->cpu < 0)
  320. return -EINVAL;
  321. /* check only supported bits are set */
  322. if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
  323. return -EINVAL;
  324. box = uncore_pmu_to_box(pmu, event->cpu);
  325. if (!box || box->cpu < 0)
  326. return -EINVAL;
  327. event->cpu = box->cpu;
  328. event->pmu_private = box;
  329. event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
  330. event->hw.idx = -1;
  331. event->hw.last_tag = ~0ULL;
  332. event->hw.extra_reg.idx = EXTRA_REG_NONE;
  333. event->hw.branch_reg.idx = EXTRA_REG_NONE;
  334. /*
  335. * check event is known (whitelist, determines counter)
  336. */
  337. switch (cfg) {
  338. case SNB_UNCORE_PCI_IMC_DATA_READS:
  339. base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
  340. idx = UNCORE_PMC_IDX_FIXED;
  341. break;
  342. case SNB_UNCORE_PCI_IMC_DATA_WRITES:
  343. base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
  344. idx = UNCORE_PMC_IDX_FIXED + 1;
  345. break;
  346. default:
  347. return -EINVAL;
  348. }
  349. /* must be done before validate_group */
  350. event->hw.event_base = base;
  351. event->hw.config = cfg;
  352. event->hw.idx = idx;
  353. /* no group validation needed, we have free running counters */
  354. return 0;
  355. }
  356. static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  357. {
  358. return 0;
  359. }
  360. static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
  361. {
  362. struct intel_uncore_box *box = uncore_event_to_box(event);
  363. u64 count;
  364. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  365. return;
  366. event->hw.state = 0;
  367. box->n_active++;
  368. list_add_tail(&event->active_entry, &box->active_list);
  369. count = snb_uncore_imc_read_counter(box, event);
  370. local64_set(&event->hw.prev_count, count);
  371. if (box->n_active == 1)
  372. uncore_pmu_start_hrtimer(box);
  373. }
  374. static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
  375. {
  376. struct intel_uncore_box *box = uncore_event_to_box(event);
  377. struct hw_perf_event *hwc = &event->hw;
  378. if (!(hwc->state & PERF_HES_STOPPED)) {
  379. box->n_active--;
  380. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  381. hwc->state |= PERF_HES_STOPPED;
  382. list_del(&event->active_entry);
  383. if (box->n_active == 0)
  384. uncore_pmu_cancel_hrtimer(box);
  385. }
  386. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  387. /*
  388. * Drain the remaining delta count out of a event
  389. * that we are disabling:
  390. */
  391. uncore_perf_event_update(box, event);
  392. hwc->state |= PERF_HES_UPTODATE;
  393. }
  394. }
  395. static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
  396. {
  397. struct intel_uncore_box *box = uncore_event_to_box(event);
  398. struct hw_perf_event *hwc = &event->hw;
  399. if (!box)
  400. return -ENODEV;
  401. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  402. if (!(flags & PERF_EF_START))
  403. hwc->state |= PERF_HES_ARCH;
  404. snb_uncore_imc_event_start(event, 0);
  405. return 0;
  406. }
  407. static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
  408. {
  409. snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
  410. }
  411. int snb_pci2phy_map_init(int devid)
  412. {
  413. struct pci_dev *dev = NULL;
  414. struct pci2phy_map *map;
  415. int bus, segment;
  416. dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
  417. if (!dev)
  418. return -ENOTTY;
  419. bus = dev->bus->number;
  420. segment = pci_domain_nr(dev->bus);
  421. raw_spin_lock(&pci2phy_map_lock);
  422. map = __find_pci2phy_map(segment);
  423. if (!map) {
  424. raw_spin_unlock(&pci2phy_map_lock);
  425. pci_dev_put(dev);
  426. return -ENOMEM;
  427. }
  428. map->pbus_to_physid[bus] = 0;
  429. raw_spin_unlock(&pci2phy_map_lock);
  430. pci_dev_put(dev);
  431. return 0;
  432. }
  433. static struct pmu snb_uncore_imc_pmu = {
  434. .task_ctx_nr = perf_invalid_context,
  435. .event_init = snb_uncore_imc_event_init,
  436. .add = snb_uncore_imc_event_add,
  437. .del = snb_uncore_imc_event_del,
  438. .start = snb_uncore_imc_event_start,
  439. .stop = snb_uncore_imc_event_stop,
  440. .read = uncore_pmu_event_read,
  441. };
  442. static struct intel_uncore_ops snb_uncore_imc_ops = {
  443. .init_box = snb_uncore_imc_init_box,
  444. .exit_box = snb_uncore_imc_exit_box,
  445. .enable_box = snb_uncore_imc_enable_box,
  446. .disable_box = snb_uncore_imc_disable_box,
  447. .disable_event = snb_uncore_imc_disable_event,
  448. .enable_event = snb_uncore_imc_enable_event,
  449. .hw_config = snb_uncore_imc_hw_config,
  450. .read_counter = snb_uncore_imc_read_counter,
  451. };
  452. static struct intel_uncore_type snb_uncore_imc = {
  453. .name = "imc",
  454. .num_counters = 2,
  455. .num_boxes = 1,
  456. .fixed_ctr_bits = 32,
  457. .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
  458. .event_descs = snb_uncore_imc_events,
  459. .format_group = &snb_uncore_imc_format_group,
  460. .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
  461. .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
  462. .ops = &snb_uncore_imc_ops,
  463. .pmu = &snb_uncore_imc_pmu,
  464. };
  465. static struct intel_uncore_type *snb_pci_uncores[] = {
  466. [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
  467. NULL,
  468. };
  469. static const struct pci_device_id snb_uncore_pci_ids[] = {
  470. { /* IMC */
  471. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
  472. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  473. },
  474. { /* end: all zeroes */ },
  475. };
  476. static const struct pci_device_id ivb_uncore_pci_ids[] = {
  477. { /* IMC */
  478. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
  479. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  480. },
  481. { /* IMC */
  482. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
  483. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  484. },
  485. { /* end: all zeroes */ },
  486. };
  487. static const struct pci_device_id hsw_uncore_pci_ids[] = {
  488. { /* IMC */
  489. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
  490. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  491. },
  492. { /* IMC */
  493. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
  494. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  495. },
  496. { /* end: all zeroes */ },
  497. };
  498. static const struct pci_device_id bdw_uncore_pci_ids[] = {
  499. { /* IMC */
  500. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
  501. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  502. },
  503. { /* end: all zeroes */ },
  504. };
  505. static const struct pci_device_id skl_uncore_pci_ids[] = {
  506. { /* IMC */
  507. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
  508. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  509. },
  510. { /* IMC */
  511. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
  512. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  513. },
  514. { /* IMC */
  515. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
  516. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  517. },
  518. { /* IMC */
  519. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
  520. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  521. },
  522. { /* IMC */
  523. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
  524. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  525. },
  526. { /* IMC */
  527. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
  528. .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
  529. },
  530. { /* end: all zeroes */ },
  531. };
  532. static struct pci_driver snb_uncore_pci_driver = {
  533. .name = "snb_uncore",
  534. .id_table = snb_uncore_pci_ids,
  535. };
  536. static struct pci_driver ivb_uncore_pci_driver = {
  537. .name = "ivb_uncore",
  538. .id_table = ivb_uncore_pci_ids,
  539. };
  540. static struct pci_driver hsw_uncore_pci_driver = {
  541. .name = "hsw_uncore",
  542. .id_table = hsw_uncore_pci_ids,
  543. };
  544. static struct pci_driver bdw_uncore_pci_driver = {
  545. .name = "bdw_uncore",
  546. .id_table = bdw_uncore_pci_ids,
  547. };
  548. static struct pci_driver skl_uncore_pci_driver = {
  549. .name = "skl_uncore",
  550. .id_table = skl_uncore_pci_ids,
  551. };
  552. struct imc_uncore_pci_dev {
  553. __u32 pci_id;
  554. struct pci_driver *driver;
  555. };
  556. #define IMC_DEV(a, d) \
  557. { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
  558. static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
  559. IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
  560. IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
  561. IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
  562. IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
  563. IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
  564. IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
  565. IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
  566. IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
  567. IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
  568. IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
  569. IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
  570. IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
  571. { /* end marker */ }
  572. };
  573. #define for_each_imc_pci_id(x, t) \
  574. for (x = (t); (x)->pci_id; x++)
  575. static struct pci_driver *imc_uncore_find_dev(void)
  576. {
  577. const struct imc_uncore_pci_dev *p;
  578. int ret;
  579. for_each_imc_pci_id(p, desktop_imc_pci_ids) {
  580. ret = snb_pci2phy_map_init(p->pci_id);
  581. if (ret == 0)
  582. return p->driver;
  583. }
  584. return NULL;
  585. }
  586. static int imc_uncore_pci_init(void)
  587. {
  588. struct pci_driver *imc_drv = imc_uncore_find_dev();
  589. if (!imc_drv)
  590. return -ENODEV;
  591. uncore_pci_uncores = snb_pci_uncores;
  592. uncore_pci_driver = imc_drv;
  593. return 0;
  594. }
  595. int snb_uncore_pci_init(void)
  596. {
  597. return imc_uncore_pci_init();
  598. }
  599. int ivb_uncore_pci_init(void)
  600. {
  601. return imc_uncore_pci_init();
  602. }
  603. int hsw_uncore_pci_init(void)
  604. {
  605. return imc_uncore_pci_init();
  606. }
  607. int bdw_uncore_pci_init(void)
  608. {
  609. return imc_uncore_pci_init();
  610. }
  611. int skl_uncore_pci_init(void)
  612. {
  613. return imc_uncore_pci_init();
  614. }
  615. /* end of Sandy Bridge uncore support */
  616. /* Nehalem uncore support */
  617. static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
  618. {
  619. wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
  620. }
  621. static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
  622. {
  623. wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
  624. }
  625. static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  626. {
  627. struct hw_perf_event *hwc = &event->hw;
  628. if (hwc->idx < UNCORE_PMC_IDX_FIXED)
  629. wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
  630. else
  631. wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
  632. }
  633. static struct attribute *nhm_uncore_formats_attr[] = {
  634. &format_attr_event.attr,
  635. &format_attr_umask.attr,
  636. &format_attr_edge.attr,
  637. &format_attr_inv.attr,
  638. &format_attr_cmask8.attr,
  639. NULL,
  640. };
  641. static struct attribute_group nhm_uncore_format_group = {
  642. .name = "format",
  643. .attrs = nhm_uncore_formats_attr,
  644. };
  645. static struct uncore_event_desc nhm_uncore_events[] = {
  646. INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
  647. INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
  648. INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
  649. INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
  650. INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
  651. INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
  652. INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
  653. INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
  654. INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
  655. { /* end: all zeroes */ },
  656. };
  657. static struct intel_uncore_ops nhm_uncore_msr_ops = {
  658. .disable_box = nhm_uncore_msr_disable_box,
  659. .enable_box = nhm_uncore_msr_enable_box,
  660. .disable_event = snb_uncore_msr_disable_event,
  661. .enable_event = nhm_uncore_msr_enable_event,
  662. .read_counter = uncore_msr_read_counter,
  663. };
  664. static struct intel_uncore_type nhm_uncore = {
  665. .name = "",
  666. .num_counters = 8,
  667. .num_boxes = 1,
  668. .perf_ctr_bits = 48,
  669. .fixed_ctr_bits = 48,
  670. .event_ctl = NHM_UNC_PERFEVTSEL0,
  671. .perf_ctr = NHM_UNC_UNCORE_PMC0,
  672. .fixed_ctr = NHM_UNC_FIXED_CTR,
  673. .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
  674. .event_mask = NHM_UNC_RAW_EVENT_MASK,
  675. .event_descs = nhm_uncore_events,
  676. .ops = &nhm_uncore_msr_ops,
  677. .format_group = &nhm_uncore_format_group,
  678. };
  679. static struct intel_uncore_type *nhm_msr_uncores[] = {
  680. &nhm_uncore,
  681. NULL,
  682. };
  683. void nhm_uncore_cpu_init(void)
  684. {
  685. uncore_msr_uncores = nhm_msr_uncores;
  686. }
  687. /* end of Nehalem uncore support */