perf_event.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. /*
  2. * Performance events x86 architecture header
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #include <linux/perf_event.h>
  15. /* To enable MSR tracing please use the generic trace points. */
  16. /*
  17. * | NHM/WSM | SNB |
  18. * register -------------------------------
  19. * | HT | no HT | HT | no HT |
  20. *-----------------------------------------
  21. * offcore | core | core | cpu | core |
  22. * lbr_sel | core | core | cpu | core |
  23. * ld_lat | cpu | core | cpu | core |
  24. *-----------------------------------------
  25. *
  26. * Given that there is a small number of shared regs,
  27. * we can pre-allocate their slot in the per-cpu
  28. * per-core reg tables.
  29. */
  30. enum extra_reg_type {
  31. EXTRA_REG_NONE = -1, /* not used */
  32. EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
  33. EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
  34. EXTRA_REG_LBR = 2, /* lbr_select */
  35. EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
  36. EXTRA_REG_FE = 4, /* fe_* */
  37. EXTRA_REG_MAX /* number of entries needed */
  38. };
  39. struct event_constraint {
  40. union {
  41. unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  42. u64 idxmsk64;
  43. };
  44. u64 code;
  45. u64 cmask;
  46. int weight;
  47. int overlap;
  48. int flags;
  49. };
  50. /*
  51. * struct hw_perf_event.flags flags
  52. */
  53. #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
  54. #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
  55. #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
  56. #define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
  57. #define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */
  58. #define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */
  59. #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
  60. #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
  61. #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
  62. #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
  63. #define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
  64. #define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */
  65. struct amd_nb {
  66. int nb_id; /* NorthBridge id */
  67. int refcnt; /* reference count */
  68. struct perf_event *owners[X86_PMC_IDX_MAX];
  69. struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  70. };
  71. /* The maximal number of PEBS events: */
  72. #define MAX_PEBS_EVENTS 8
  73. /*
  74. * Flags PEBS can handle without an PMI.
  75. *
  76. * TID can only be handled by flushing at context switch.
  77. *
  78. */
  79. #define PEBS_FREERUNNING_FLAGS \
  80. (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
  81. PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
  82. PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
  83. PERF_SAMPLE_TRANSACTION)
  84. /*
  85. * A debug store configuration.
  86. *
  87. * We only support architectures that use 64bit fields.
  88. */
  89. struct debug_store {
  90. u64 bts_buffer_base;
  91. u64 bts_index;
  92. u64 bts_absolute_maximum;
  93. u64 bts_interrupt_threshold;
  94. u64 pebs_buffer_base;
  95. u64 pebs_index;
  96. u64 pebs_absolute_maximum;
  97. u64 pebs_interrupt_threshold;
  98. u64 pebs_event_reset[MAX_PEBS_EVENTS];
  99. };
  100. /*
  101. * Per register state.
  102. */
  103. struct er_account {
  104. raw_spinlock_t lock; /* per-core: protect structure */
  105. u64 config; /* extra MSR config */
  106. u64 reg; /* extra MSR number */
  107. atomic_t ref; /* reference count */
  108. };
  109. /*
  110. * Per core/cpu state
  111. *
  112. * Used to coordinate shared registers between HT threads or
  113. * among events on a single PMU.
  114. */
  115. struct intel_shared_regs {
  116. struct er_account regs[EXTRA_REG_MAX];
  117. int refcnt; /* per-core: #HT threads */
  118. unsigned core_id; /* per-core: core id */
  119. };
  120. enum intel_excl_state_type {
  121. INTEL_EXCL_UNUSED = 0, /* counter is unused */
  122. INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */
  123. INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
  124. };
  125. struct intel_excl_states {
  126. enum intel_excl_state_type state[X86_PMC_IDX_MAX];
  127. bool sched_started; /* true if scheduling has started */
  128. };
  129. struct intel_excl_cntrs {
  130. raw_spinlock_t lock;
  131. struct intel_excl_states states[2];
  132. union {
  133. u16 has_exclusive[2];
  134. u32 exclusive_present;
  135. };
  136. int refcnt; /* per-core: #HT threads */
  137. unsigned core_id; /* per-core: core id */
  138. };
  139. #define MAX_LBR_ENTRIES 32
  140. enum {
  141. X86_PERF_KFREE_SHARED = 0,
  142. X86_PERF_KFREE_EXCL = 1,
  143. X86_PERF_KFREE_MAX
  144. };
  145. struct cpu_hw_events {
  146. /*
  147. * Generic x86 PMC bits
  148. */
  149. struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
  150. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  151. unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  152. int enabled;
  153. int n_events; /* the # of events in the below arrays */
  154. int n_added; /* the # last events in the below arrays;
  155. they've never been enabled yet */
  156. int n_txn; /* the # last events in the below arrays;
  157. added in the current transaction */
  158. int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
  159. u64 tags[X86_PMC_IDX_MAX];
  160. struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
  161. struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
  162. int n_excl; /* the number of exclusive events */
  163. unsigned int txn_flags;
  164. int is_fake;
  165. /*
  166. * Intel DebugStore bits
  167. */
  168. struct debug_store *ds;
  169. u64 pebs_enabled;
  170. int n_pebs;
  171. int n_large_pebs;
  172. /*
  173. * Intel LBR bits
  174. */
  175. int lbr_users;
  176. struct perf_branch_stack lbr_stack;
  177. struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
  178. struct er_account *lbr_sel;
  179. u64 br_sel;
  180. /*
  181. * Intel host/guest exclude bits
  182. */
  183. u64 intel_ctrl_guest_mask;
  184. u64 intel_ctrl_host_mask;
  185. struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
  186. /*
  187. * Intel checkpoint mask
  188. */
  189. u64 intel_cp_status;
  190. /*
  191. * manage shared (per-core, per-cpu) registers
  192. * used on Intel NHM/WSM/SNB
  193. */
  194. struct intel_shared_regs *shared_regs;
  195. /*
  196. * manage exclusive counter access between hyperthread
  197. */
  198. struct event_constraint *constraint_list; /* in enable order */
  199. struct intel_excl_cntrs *excl_cntrs;
  200. int excl_thread_id; /* 0 or 1 */
  201. /*
  202. * AMD specific bits
  203. */
  204. struct amd_nb *amd_nb;
  205. /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
  206. u64 perf_ctr_virt_mask;
  207. void *kfree_on_online[X86_PERF_KFREE_MAX];
  208. };
  209. #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
  210. { .idxmsk64 = (n) }, \
  211. .code = (c), \
  212. .cmask = (m), \
  213. .weight = (w), \
  214. .overlap = (o), \
  215. .flags = f, \
  216. }
  217. #define EVENT_CONSTRAINT(c, n, m) \
  218. __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
  219. #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
  220. __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
  221. 0, PERF_X86_EVENT_EXCL)
  222. /*
  223. * The overlap flag marks event constraints with overlapping counter
  224. * masks. This is the case if the counter mask of such an event is not
  225. * a subset of any other counter mask of a constraint with an equal or
  226. * higher weight, e.g.:
  227. *
  228. * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
  229. * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
  230. * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
  231. *
  232. * The event scheduler may not select the correct counter in the first
  233. * cycle because it needs to know which subsequent events will be
  234. * scheduled. It may fail to schedule the events then. So we set the
  235. * overlap flag for such constraints to give the scheduler a hint which
  236. * events to select for counter rescheduling.
  237. *
  238. * Care must be taken as the rescheduling algorithm is O(n!) which
  239. * will increase scheduling cycles for an over-committed system
  240. * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
  241. * and its counter masks must be kept at a minimum.
  242. */
  243. #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
  244. __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
  245. /*
  246. * Constraint on the Event code.
  247. */
  248. #define INTEL_EVENT_CONSTRAINT(c, n) \
  249. EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
  250. /*
  251. * Constraint on the Event code + UMask + fixed-mask
  252. *
  253. * filter mask to validate fixed counter events.
  254. * the following filters disqualify for fixed counters:
  255. * - inv
  256. * - edge
  257. * - cnt-mask
  258. * - in_tx
  259. * - in_tx_checkpointed
  260. * The other filters are supported by fixed counters.
  261. * The any-thread option is supported starting with v3.
  262. */
  263. #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
  264. #define FIXED_EVENT_CONSTRAINT(c, n) \
  265. EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
  266. /*
  267. * Constraint on the Event code + UMask
  268. */
  269. #define INTEL_UEVENT_CONSTRAINT(c, n) \
  270. EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
  271. /* Constraint on specific umask bit only + event */
  272. #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
  273. EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
  274. /* Like UEVENT_CONSTRAINT, but match flags too */
  275. #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
  276. EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
  277. #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
  278. __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
  279. HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
  280. #define INTEL_PLD_CONSTRAINT(c, n) \
  281. __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  282. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
  283. #define INTEL_PST_CONSTRAINT(c, n) \
  284. __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  285. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
  286. /* Event constraint, but match on all event flags too. */
  287. #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
  288. EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
  289. /* Check only flags, but allow all event/umask */
  290. #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
  291. EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
  292. /* Check flags and event code, and set the HSW store flag */
  293. #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
  294. __EVENT_CONSTRAINT(code, n, \
  295. ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
  296. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
  297. /* Check flags and event code, and set the HSW load flag */
  298. #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
  299. __EVENT_CONSTRAINT(code, n, \
  300. ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
  301. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
  302. #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
  303. __EVENT_CONSTRAINT(code, n, \
  304. ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
  305. HWEIGHT(n), 0, \
  306. PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
  307. /* Check flags and event code/umask, and set the HSW store flag */
  308. #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
  309. __EVENT_CONSTRAINT(code, n, \
  310. INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  311. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
  312. #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
  313. __EVENT_CONSTRAINT(code, n, \
  314. INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  315. HWEIGHT(n), 0, \
  316. PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
  317. /* Check flags and event code/umask, and set the HSW load flag */
  318. #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
  319. __EVENT_CONSTRAINT(code, n, \
  320. INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  321. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
  322. #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
  323. __EVENT_CONSTRAINT(code, n, \
  324. INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  325. HWEIGHT(n), 0, \
  326. PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
  327. /* Check flags and event code/umask, and set the HSW N/A flag */
  328. #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
  329. __EVENT_CONSTRAINT(code, n, \
  330. INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
  331. HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
  332. /*
  333. * We define the end marker as having a weight of -1
  334. * to enable blacklisting of events using a counter bitmask
  335. * of zero and thus a weight of zero.
  336. * The end marker has a weight that cannot possibly be
  337. * obtained from counting the bits in the bitmask.
  338. */
  339. #define EVENT_CONSTRAINT_END { .weight = -1 }
  340. /*
  341. * Check for end marker with weight == -1
  342. */
  343. #define for_each_event_constraint(e, c) \
  344. for ((e) = (c); (e)->weight != -1; (e)++)
  345. /*
  346. * Extra registers for specific events.
  347. *
  348. * Some events need large masks and require external MSRs.
  349. * Those extra MSRs end up being shared for all events on
  350. * a PMU and sometimes between PMU of sibling HT threads.
  351. * In either case, the kernel needs to handle conflicting
  352. * accesses to those extra, shared, regs. The data structure
  353. * to manage those registers is stored in cpu_hw_event.
  354. */
  355. struct extra_reg {
  356. unsigned int event;
  357. unsigned int msr;
  358. u64 config_mask;
  359. u64 valid_mask;
  360. int idx; /* per_xxx->regs[] reg index */
  361. bool extra_msr_access;
  362. };
  363. #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
  364. .event = (e), \
  365. .msr = (ms), \
  366. .config_mask = (m), \
  367. .valid_mask = (vm), \
  368. .idx = EXTRA_REG_##i, \
  369. .extra_msr_access = true, \
  370. }
  371. #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
  372. EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
  373. #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
  374. EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
  375. ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
  376. #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
  377. INTEL_UEVENT_EXTRA_REG(c, \
  378. MSR_PEBS_LD_LAT_THRESHOLD, \
  379. 0xffff, \
  380. LDLAT)
  381. #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
  382. union perf_capabilities {
  383. struct {
  384. u64 lbr_format:6;
  385. u64 pebs_trap:1;
  386. u64 pebs_arch_reg:1;
  387. u64 pebs_format:4;
  388. u64 smm_freeze:1;
  389. /*
  390. * PMU supports separate counter range for writing
  391. * values > 32bit.
  392. */
  393. u64 full_width_write:1;
  394. };
  395. u64 capabilities;
  396. };
  397. struct x86_pmu_quirk {
  398. struct x86_pmu_quirk *next;
  399. void (*func)(void);
  400. };
  401. union x86_pmu_config {
  402. struct {
  403. u64 event:8,
  404. umask:8,
  405. usr:1,
  406. os:1,
  407. edge:1,
  408. pc:1,
  409. interrupt:1,
  410. __reserved1:1,
  411. en:1,
  412. inv:1,
  413. cmask:8,
  414. event2:4,
  415. __reserved2:4,
  416. go:1,
  417. ho:1;
  418. } bits;
  419. u64 value;
  420. };
  421. #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
  422. enum {
  423. x86_lbr_exclusive_lbr,
  424. x86_lbr_exclusive_bts,
  425. x86_lbr_exclusive_pt,
  426. x86_lbr_exclusive_max,
  427. };
  428. /*
  429. * struct x86_pmu - generic x86 pmu
  430. */
  431. struct x86_pmu {
  432. /*
  433. * Generic x86 PMC bits
  434. */
  435. const char *name;
  436. int version;
  437. int (*handle_irq)(struct pt_regs *);
  438. void (*disable_all)(void);
  439. void (*enable_all)(int added);
  440. void (*enable)(struct perf_event *);
  441. void (*disable)(struct perf_event *);
  442. void (*add)(struct perf_event *);
  443. void (*del)(struct perf_event *);
  444. int (*hw_config)(struct perf_event *event);
  445. int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
  446. unsigned eventsel;
  447. unsigned perfctr;
  448. int (*addr_offset)(int index, bool eventsel);
  449. int (*rdpmc_index)(int index);
  450. u64 (*event_map)(int);
  451. int max_events;
  452. int num_counters;
  453. int num_counters_fixed;
  454. int cntval_bits;
  455. u64 cntval_mask;
  456. union {
  457. unsigned long events_maskl;
  458. unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
  459. };
  460. int events_mask_len;
  461. int apic;
  462. u64 max_period;
  463. struct event_constraint *
  464. (*get_event_constraints)(struct cpu_hw_events *cpuc,
  465. int idx,
  466. struct perf_event *event);
  467. void (*put_event_constraints)(struct cpu_hw_events *cpuc,
  468. struct perf_event *event);
  469. void (*start_scheduling)(struct cpu_hw_events *cpuc);
  470. void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
  471. void (*stop_scheduling)(struct cpu_hw_events *cpuc);
  472. struct event_constraint *event_constraints;
  473. struct x86_pmu_quirk *quirks;
  474. int perfctr_second_write;
  475. bool late_ack;
  476. u64 (*limit_period)(struct perf_event *event, u64 l);
  477. /*
  478. * sysfs attrs
  479. */
  480. int attr_rdpmc_broken;
  481. int attr_rdpmc;
  482. struct attribute **format_attrs;
  483. struct attribute **event_attrs;
  484. ssize_t (*events_sysfs_show)(char *page, u64 config);
  485. struct attribute **cpu_events;
  486. /*
  487. * CPU Hotplug hooks
  488. */
  489. int (*cpu_prepare)(int cpu);
  490. void (*cpu_starting)(int cpu);
  491. void (*cpu_dying)(int cpu);
  492. void (*cpu_dead)(int cpu);
  493. void (*check_microcode)(void);
  494. void (*sched_task)(struct perf_event_context *ctx,
  495. bool sched_in);
  496. /*
  497. * Intel Arch Perfmon v2+
  498. */
  499. u64 intel_ctrl;
  500. union perf_capabilities intel_cap;
  501. /*
  502. * Intel DebugStore bits
  503. */
  504. unsigned int bts :1,
  505. bts_active :1,
  506. pebs :1,
  507. pebs_active :1,
  508. pebs_broken :1,
  509. pebs_prec_dist :1;
  510. int pebs_record_size;
  511. int pebs_buffer_size;
  512. void (*drain_pebs)(struct pt_regs *regs);
  513. struct event_constraint *pebs_constraints;
  514. void (*pebs_aliases)(struct perf_event *event);
  515. int max_pebs_events;
  516. unsigned long free_running_flags;
  517. /*
  518. * Intel LBR
  519. */
  520. unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
  521. int lbr_nr; /* hardware stack size */
  522. u64 lbr_sel_mask; /* LBR_SELECT valid bits */
  523. const int *lbr_sel_map; /* lbr_select mappings */
  524. bool lbr_double_abort; /* duplicated lbr aborts */
  525. bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
  526. /*
  527. * Intel PT/LBR/BTS are exclusive
  528. */
  529. atomic_t lbr_exclusive[x86_lbr_exclusive_max];
  530. /*
  531. * AMD bits
  532. */
  533. unsigned int amd_nb_constraints : 1;
  534. /*
  535. * Extra registers for events
  536. */
  537. struct extra_reg *extra_regs;
  538. unsigned int flags;
  539. /*
  540. * Intel host/guest support (KVM)
  541. */
  542. struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
  543. };
  544. struct x86_perf_task_context {
  545. u64 lbr_from[MAX_LBR_ENTRIES];
  546. u64 lbr_to[MAX_LBR_ENTRIES];
  547. u64 lbr_info[MAX_LBR_ENTRIES];
  548. int tos;
  549. int lbr_callstack_users;
  550. int lbr_stack_state;
  551. };
  552. #define x86_add_quirk(func_) \
  553. do { \
  554. static struct x86_pmu_quirk __quirk __initdata = { \
  555. .func = func_, \
  556. }; \
  557. __quirk.next = x86_pmu.quirks; \
  558. x86_pmu.quirks = &__quirk; \
  559. } while (0)
  560. /*
  561. * x86_pmu flags
  562. */
  563. #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
  564. #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
  565. #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
  566. #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
  567. #define EVENT_VAR(_id) event_attr_##_id
  568. #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
  569. #define EVENT_ATTR(_name, _id) \
  570. static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
  571. .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
  572. .id = PERF_COUNT_HW_##_id, \
  573. .event_str = NULL, \
  574. };
  575. #define EVENT_ATTR_STR(_name, v, str) \
  576. static struct perf_pmu_events_attr event_attr_##v = { \
  577. .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
  578. .id = 0, \
  579. .event_str = str, \
  580. };
  581. #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
  582. static struct perf_pmu_events_ht_attr event_attr_##v = { \
  583. .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
  584. .id = 0, \
  585. .event_str_noht = noht, \
  586. .event_str_ht = ht, \
  587. }
  588. extern struct x86_pmu x86_pmu __read_mostly;
  589. static inline bool x86_pmu_has_lbr_callstack(void)
  590. {
  591. return x86_pmu.lbr_sel_map &&
  592. x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
  593. }
  594. DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  595. int x86_perf_event_set_period(struct perf_event *event);
  596. /*
  597. * Generalized hw caching related hw_event table, filled
  598. * in on a per model basis. A value of 0 means
  599. * 'not supported', -1 means 'hw_event makes no sense on
  600. * this CPU', any other value means the raw hw_event
  601. * ID.
  602. */
  603. #define C(x) PERF_COUNT_HW_CACHE_##x
  604. extern u64 __read_mostly hw_cache_event_ids
  605. [PERF_COUNT_HW_CACHE_MAX]
  606. [PERF_COUNT_HW_CACHE_OP_MAX]
  607. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  608. extern u64 __read_mostly hw_cache_extra_regs
  609. [PERF_COUNT_HW_CACHE_MAX]
  610. [PERF_COUNT_HW_CACHE_OP_MAX]
  611. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  612. u64 x86_perf_event_update(struct perf_event *event);
  613. static inline unsigned int x86_pmu_config_addr(int index)
  614. {
  615. return x86_pmu.eventsel + (x86_pmu.addr_offset ?
  616. x86_pmu.addr_offset(index, true) : index);
  617. }
  618. static inline unsigned int x86_pmu_event_addr(int index)
  619. {
  620. return x86_pmu.perfctr + (x86_pmu.addr_offset ?
  621. x86_pmu.addr_offset(index, false) : index);
  622. }
  623. static inline int x86_pmu_rdpmc_index(int index)
  624. {
  625. return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
  626. }
  627. int x86_add_exclusive(unsigned int what);
  628. void x86_del_exclusive(unsigned int what);
  629. int x86_reserve_hardware(void);
  630. void x86_release_hardware(void);
  631. void hw_perf_lbr_event_destroy(struct perf_event *event);
  632. int x86_setup_perfctr(struct perf_event *event);
  633. int x86_pmu_hw_config(struct perf_event *event);
  634. void x86_pmu_disable_all(void);
  635. static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
  636. u64 enable_mask)
  637. {
  638. u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
  639. if (hwc->extra_reg.reg)
  640. wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
  641. wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
  642. }
  643. void x86_pmu_enable_all(int added);
  644. int perf_assign_events(struct event_constraint **constraints, int n,
  645. int wmin, int wmax, int gpmax, int *assign);
  646. int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
  647. void x86_pmu_stop(struct perf_event *event, int flags);
  648. static inline void x86_pmu_disable_event(struct perf_event *event)
  649. {
  650. struct hw_perf_event *hwc = &event->hw;
  651. wrmsrl(hwc->config_base, hwc->config);
  652. }
  653. void x86_pmu_enable_event(struct perf_event *event);
  654. int x86_pmu_handle_irq(struct pt_regs *regs);
  655. extern struct event_constraint emptyconstraint;
  656. extern struct event_constraint unconstrained;
  657. static inline bool kernel_ip(unsigned long ip)
  658. {
  659. #ifdef CONFIG_X86_32
  660. return ip > PAGE_OFFSET;
  661. #else
  662. return (long)ip < 0;
  663. #endif
  664. }
  665. /*
  666. * Not all PMUs provide the right context information to place the reported IP
  667. * into full context. Specifically segment registers are typically not
  668. * supplied.
  669. *
  670. * Assuming the address is a linear address (it is for IBS), we fake the CS and
  671. * vm86 mode using the known zero-based code segment and 'fix up' the registers
  672. * to reflect this.
  673. *
  674. * Intel PEBS/LBR appear to typically provide the effective address, nothing
  675. * much we can do about that but pray and treat it like a linear address.
  676. */
  677. static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
  678. {
  679. regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
  680. if (regs->flags & X86_VM_MASK)
  681. regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
  682. regs->ip = ip;
  683. }
  684. ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
  685. ssize_t intel_event_sysfs_show(char *page, u64 config);
  686. struct attribute **merge_attr(struct attribute **a, struct attribute **b);
  687. ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
  688. char *page);
  689. ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
  690. char *page);
  691. #ifdef CONFIG_CPU_SUP_AMD
  692. int amd_pmu_init(void);
  693. #else /* CONFIG_CPU_SUP_AMD */
  694. static inline int amd_pmu_init(void)
  695. {
  696. return 0;
  697. }
  698. #endif /* CONFIG_CPU_SUP_AMD */
  699. #ifdef CONFIG_CPU_SUP_INTEL
  700. static inline bool intel_pmu_has_bts(struct perf_event *event)
  701. {
  702. if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
  703. !event->attr.freq && event->hw.sample_period == 1)
  704. return true;
  705. return false;
  706. }
  707. int intel_pmu_save_and_restart(struct perf_event *event);
  708. struct event_constraint *
  709. x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
  710. struct perf_event *event);
  711. struct intel_shared_regs *allocate_shared_regs(int cpu);
  712. int intel_pmu_init(void);
  713. void init_debug_store_on_cpu(int cpu);
  714. void fini_debug_store_on_cpu(int cpu);
  715. void release_ds_buffers(void);
  716. void reserve_ds_buffers(void);
  717. extern struct event_constraint bts_constraint;
  718. void intel_pmu_enable_bts(u64 config);
  719. void intel_pmu_disable_bts(void);
  720. int intel_pmu_drain_bts_buffer(void);
  721. extern struct event_constraint intel_core2_pebs_event_constraints[];
  722. extern struct event_constraint intel_atom_pebs_event_constraints[];
  723. extern struct event_constraint intel_slm_pebs_event_constraints[];
  724. extern struct event_constraint intel_glm_pebs_event_constraints[];
  725. extern struct event_constraint intel_nehalem_pebs_event_constraints[];
  726. extern struct event_constraint intel_westmere_pebs_event_constraints[];
  727. extern struct event_constraint intel_snb_pebs_event_constraints[];
  728. extern struct event_constraint intel_ivb_pebs_event_constraints[];
  729. extern struct event_constraint intel_hsw_pebs_event_constraints[];
  730. extern struct event_constraint intel_bdw_pebs_event_constraints[];
  731. extern struct event_constraint intel_skl_pebs_event_constraints[];
  732. struct event_constraint *intel_pebs_constraints(struct perf_event *event);
  733. void intel_pmu_pebs_add(struct perf_event *event);
  734. void intel_pmu_pebs_del(struct perf_event *event);
  735. void intel_pmu_pebs_enable(struct perf_event *event);
  736. void intel_pmu_pebs_disable(struct perf_event *event);
  737. void intel_pmu_pebs_enable_all(void);
  738. void intel_pmu_pebs_disable_all(void);
  739. void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
  740. void intel_ds_init(void);
  741. void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
  742. u64 lbr_from_signext_quirk_wr(u64 val);
  743. void intel_pmu_lbr_reset(void);
  744. void intel_pmu_lbr_add(struct perf_event *event);
  745. void intel_pmu_lbr_del(struct perf_event *event);
  746. void intel_pmu_lbr_enable_all(bool pmi);
  747. void intel_pmu_lbr_disable_all(void);
  748. void intel_pmu_lbr_read(void);
  749. void intel_pmu_lbr_init_core(void);
  750. void intel_pmu_lbr_init_nhm(void);
  751. void intel_pmu_lbr_init_atom(void);
  752. void intel_pmu_lbr_init_slm(void);
  753. void intel_pmu_lbr_init_snb(void);
  754. void intel_pmu_lbr_init_hsw(void);
  755. void intel_pmu_lbr_init_skl(void);
  756. void intel_pmu_lbr_init_knl(void);
  757. void intel_pmu_pebs_data_source_nhm(void);
  758. int intel_pmu_setup_lbr_filter(struct perf_event *event);
  759. void intel_pt_interrupt(void);
  760. int intel_bts_interrupt(void);
  761. void intel_bts_enable_local(void);
  762. void intel_bts_disable_local(void);
  763. int p4_pmu_init(void);
  764. int p6_pmu_init(void);
  765. int knc_pmu_init(void);
  766. static inline int is_ht_workaround_enabled(void)
  767. {
  768. return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
  769. }
  770. #else /* CONFIG_CPU_SUP_INTEL */
  771. static inline void reserve_ds_buffers(void)
  772. {
  773. }
  774. static inline void release_ds_buffers(void)
  775. {
  776. }
  777. static inline int intel_pmu_init(void)
  778. {
  779. return 0;
  780. }
  781. static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
  782. {
  783. return NULL;
  784. }
  785. static inline int is_ht_workaround_enabled(void)
  786. {
  787. return 0;
  788. }
  789. #endif /* CONFIG_CPU_SUP_INTEL */