uncore.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. #include <linux/slab.h>
  2. #include <linux/pci.h>
  3. #include <asm/apicdef.h>
  4. #include <linux/perf_event.h>
  5. #include "../perf_event.h"
  6. #define UNCORE_PMU_NAME_LEN 32
  7. #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
  8. #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
  9. #define UNCORE_FIXED_EVENT 0xff
  10. #define UNCORE_PMC_IDX_MAX_GENERIC 8
  11. #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
  12. #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
  13. #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
  14. ((dev << 24) | (func << 16) | (type << 8) | idx)
  15. #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
  16. #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
  17. #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
  18. #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
  19. #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
  20. #define UNCORE_EXTRA_PCI_DEV 0xff
  21. #define UNCORE_EXTRA_PCI_DEV_MAX 3
  22. #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
  23. struct pci_extra_dev {
  24. struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
  25. };
  26. struct intel_uncore_ops;
  27. struct intel_uncore_pmu;
  28. struct intel_uncore_box;
  29. struct uncore_event_desc;
  30. struct intel_uncore_type {
  31. const char *name;
  32. int num_counters;
  33. int num_boxes;
  34. int perf_ctr_bits;
  35. int fixed_ctr_bits;
  36. unsigned perf_ctr;
  37. unsigned event_ctl;
  38. unsigned event_mask;
  39. unsigned event_mask_ext;
  40. unsigned fixed_ctr;
  41. unsigned fixed_ctl;
  42. unsigned box_ctl;
  43. unsigned msr_offset;
  44. unsigned num_shared_regs:8;
  45. unsigned single_fixed:1;
  46. unsigned pair_ctr_ctl:1;
  47. unsigned *msr_offsets;
  48. struct event_constraint unconstrainted;
  49. struct event_constraint *constraints;
  50. struct intel_uncore_pmu *pmus;
  51. struct intel_uncore_ops *ops;
  52. struct uncore_event_desc *event_descs;
  53. const struct attribute_group *attr_groups[4];
  54. struct pmu *pmu; /* for custom pmu ops */
  55. };
  56. #define pmu_group attr_groups[0]
  57. #define format_group attr_groups[1]
  58. #define events_group attr_groups[2]
  59. struct intel_uncore_ops {
  60. void (*init_box)(struct intel_uncore_box *);
  61. void (*exit_box)(struct intel_uncore_box *);
  62. void (*disable_box)(struct intel_uncore_box *);
  63. void (*enable_box)(struct intel_uncore_box *);
  64. void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
  65. void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
  66. u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
  67. int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
  68. struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
  69. struct perf_event *);
  70. void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
  71. };
  72. struct intel_uncore_pmu {
  73. struct pmu pmu;
  74. char name[UNCORE_PMU_NAME_LEN];
  75. int pmu_idx;
  76. int func_id;
  77. bool registered;
  78. atomic_t activeboxes;
  79. struct intel_uncore_type *type;
  80. struct intel_uncore_box **boxes;
  81. };
  82. struct intel_uncore_extra_reg {
  83. raw_spinlock_t lock;
  84. u64 config, config1, config2;
  85. atomic_t ref;
  86. };
  87. struct intel_uncore_box {
  88. int pci_phys_id;
  89. int pkgid;
  90. int n_active; /* number of active events */
  91. int n_events;
  92. int cpu; /* cpu to collect events */
  93. unsigned long flags;
  94. atomic_t refcnt;
  95. struct perf_event *events[UNCORE_PMC_IDX_MAX];
  96. struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
  97. struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
  98. unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
  99. u64 tags[UNCORE_PMC_IDX_MAX];
  100. struct pci_dev *pci_dev;
  101. struct intel_uncore_pmu *pmu;
  102. u64 hrtimer_duration; /* hrtimer timeout for this box */
  103. struct hrtimer hrtimer;
  104. struct list_head list;
  105. struct list_head active_list;
  106. void *io_addr;
  107. struct intel_uncore_extra_reg shared_regs[0];
  108. };
  109. #define UNCORE_BOX_FLAG_INITIATED 0
  110. #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
  111. struct uncore_event_desc {
  112. struct kobj_attribute attr;
  113. const char *config;
  114. };
  115. struct pci2phy_map {
  116. struct list_head list;
  117. int segment;
  118. int pbus_to_physid[256];
  119. };
  120. struct pci2phy_map *__find_pci2phy_map(int segment);
  121. ssize_t uncore_event_show(struct kobject *kobj,
  122. struct kobj_attribute *attr, char *buf);
  123. #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
  124. { \
  125. .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
  126. .config = _config, \
  127. }
  128. #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
  129. static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
  130. struct kobj_attribute *attr, \
  131. char *page) \
  132. { \
  133. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  134. return sprintf(page, _format "\n"); \
  135. } \
  136. static struct kobj_attribute format_attr_##_var = \
  137. __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
  138. static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
  139. {
  140. return box->pmu->type->box_ctl;
  141. }
  142. static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
  143. {
  144. return box->pmu->type->fixed_ctl;
  145. }
  146. static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
  147. {
  148. return box->pmu->type->fixed_ctr;
  149. }
  150. static inline
  151. unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
  152. {
  153. if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
  154. return idx * 8 + box->pmu->type->event_ctl;
  155. return idx * 4 + box->pmu->type->event_ctl;
  156. }
  157. static inline
  158. unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
  159. {
  160. return idx * 8 + box->pmu->type->perf_ctr;
  161. }
  162. static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
  163. {
  164. struct intel_uncore_pmu *pmu = box->pmu;
  165. return pmu->type->msr_offsets ?
  166. pmu->type->msr_offsets[pmu->pmu_idx] :
  167. pmu->type->msr_offset * pmu->pmu_idx;
  168. }
  169. static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
  170. {
  171. if (!box->pmu->type->box_ctl)
  172. return 0;
  173. return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
  174. }
  175. static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
  176. {
  177. if (!box->pmu->type->fixed_ctl)
  178. return 0;
  179. return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
  180. }
  181. static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
  182. {
  183. return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
  184. }
  185. static inline
  186. unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
  187. {
  188. return box->pmu->type->event_ctl +
  189. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
  190. uncore_msr_box_offset(box);
  191. }
  192. static inline
  193. unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
  194. {
  195. return box->pmu->type->perf_ctr +
  196. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
  197. uncore_msr_box_offset(box);
  198. }
  199. static inline
  200. unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
  201. {
  202. if (box->pci_dev)
  203. return uncore_pci_fixed_ctl(box);
  204. else
  205. return uncore_msr_fixed_ctl(box);
  206. }
  207. static inline
  208. unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
  209. {
  210. if (box->pci_dev)
  211. return uncore_pci_fixed_ctr(box);
  212. else
  213. return uncore_msr_fixed_ctr(box);
  214. }
  215. static inline
  216. unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
  217. {
  218. if (box->pci_dev)
  219. return uncore_pci_event_ctl(box, idx);
  220. else
  221. return uncore_msr_event_ctl(box, idx);
  222. }
  223. static inline
  224. unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
  225. {
  226. if (box->pci_dev)
  227. return uncore_pci_perf_ctr(box, idx);
  228. else
  229. return uncore_msr_perf_ctr(box, idx);
  230. }
  231. static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
  232. {
  233. return box->pmu->type->perf_ctr_bits;
  234. }
  235. static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
  236. {
  237. return box->pmu->type->fixed_ctr_bits;
  238. }
  239. static inline int uncore_num_counters(struct intel_uncore_box *box)
  240. {
  241. return box->pmu->type->num_counters;
  242. }
  243. static inline void uncore_disable_box(struct intel_uncore_box *box)
  244. {
  245. if (box->pmu->type->ops->disable_box)
  246. box->pmu->type->ops->disable_box(box);
  247. }
  248. static inline void uncore_enable_box(struct intel_uncore_box *box)
  249. {
  250. if (box->pmu->type->ops->enable_box)
  251. box->pmu->type->ops->enable_box(box);
  252. }
  253. static inline void uncore_disable_event(struct intel_uncore_box *box,
  254. struct perf_event *event)
  255. {
  256. box->pmu->type->ops->disable_event(box, event);
  257. }
  258. static inline void uncore_enable_event(struct intel_uncore_box *box,
  259. struct perf_event *event)
  260. {
  261. box->pmu->type->ops->enable_event(box, event);
  262. }
  263. static inline u64 uncore_read_counter(struct intel_uncore_box *box,
  264. struct perf_event *event)
  265. {
  266. return box->pmu->type->ops->read_counter(box, event);
  267. }
  268. static inline void uncore_box_init(struct intel_uncore_box *box)
  269. {
  270. if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
  271. if (box->pmu->type->ops->init_box)
  272. box->pmu->type->ops->init_box(box);
  273. }
  274. }
  275. static inline void uncore_box_exit(struct intel_uncore_box *box)
  276. {
  277. if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
  278. if (box->pmu->type->ops->exit_box)
  279. box->pmu->type->ops->exit_box(box);
  280. }
  281. }
  282. static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
  283. {
  284. return (box->pkgid < 0);
  285. }
  286. static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
  287. {
  288. return container_of(event->pmu, struct intel_uncore_pmu, pmu);
  289. }
  290. static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
  291. {
  292. return event->pmu_private;
  293. }
  294. struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
  295. u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
  296. void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
  297. void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
  298. void uncore_pmu_event_read(struct perf_event *event);
  299. void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
  300. struct event_constraint *
  301. uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
  302. void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
  303. u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
  304. extern struct intel_uncore_type **uncore_msr_uncores;
  305. extern struct intel_uncore_type **uncore_pci_uncores;
  306. extern struct pci_driver *uncore_pci_driver;
  307. extern raw_spinlock_t pci2phy_map_lock;
  308. extern struct list_head pci2phy_map_head;
  309. extern struct pci_extra_dev *uncore_extra_pci_dev;
  310. extern struct event_constraint uncore_constraint_empty;
  311. /* perf_event_intel_uncore_snb.c */
  312. int snb_uncore_pci_init(void);
  313. int ivb_uncore_pci_init(void);
  314. int hsw_uncore_pci_init(void);
  315. int bdw_uncore_pci_init(void);
  316. int skl_uncore_pci_init(void);
  317. void snb_uncore_cpu_init(void);
  318. void nhm_uncore_cpu_init(void);
  319. void skl_uncore_cpu_init(void);
  320. int snb_pci2phy_map_init(int devid);
  321. /* perf_event_intel_uncore_snbep.c */
  322. int snbep_uncore_pci_init(void);
  323. void snbep_uncore_cpu_init(void);
  324. int ivbep_uncore_pci_init(void);
  325. void ivbep_uncore_cpu_init(void);
  326. int hswep_uncore_pci_init(void);
  327. void hswep_uncore_cpu_init(void);
  328. int bdx_uncore_pci_init(void);
  329. void bdx_uncore_cpu_init(void);
  330. int knl_uncore_pci_init(void);
  331. void knl_uncore_cpu_init(void);
  332. int skx_uncore_pci_init(void);
  333. void skx_uncore_cpu_init(void);
  334. /* perf_event_intel_uncore_nhmex.c */
  335. void nhmex_uncore_cpu_init(void);