uncore_nhmex.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. /* Nehalem-EX/Westmere-EX uncore support */
  2. #include "uncore.h"
  3. /* NHM-EX event control */
  4. #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
  5. #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
  6. #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
  7. #define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
  8. #define NHMEX_PMON_CTL_PMI_EN (1 << 20)
  9. #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
  10. #define NHMEX_PMON_CTL_INVERT (1 << 23)
  11. #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
  12. #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
  13. NHMEX_PMON_CTL_UMASK_MASK | \
  14. NHMEX_PMON_CTL_EDGE_DET | \
  15. NHMEX_PMON_CTL_INVERT | \
  16. NHMEX_PMON_CTL_TRESH_MASK)
  17. /* NHM-EX Ubox */
  18. #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
  19. #define NHMEX_U_MSR_PMON_CTR 0xc11
  20. #define NHMEX_U_MSR_PMON_EV_SEL 0xc10
  21. #define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
  22. #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
  23. #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
  24. #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
  25. #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
  26. #define NHMEX_U_PMON_RAW_EVENT_MASK \
  27. (NHMEX_PMON_CTL_EV_SEL_MASK | \
  28. NHMEX_PMON_CTL_EDGE_DET)
  29. /* NHM-EX Cbox */
  30. #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
  31. #define NHMEX_C0_MSR_PMON_CTR0 0xd11
  32. #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
  33. #define NHMEX_C_MSR_OFFSET 0x20
  34. /* NHM-EX Bbox */
  35. #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
  36. #define NHMEX_B0_MSR_PMON_CTR0 0xc31
  37. #define NHMEX_B0_MSR_PMON_CTL0 0xc30
  38. #define NHMEX_B_MSR_OFFSET 0x40
  39. #define NHMEX_B0_MSR_MATCH 0xe45
  40. #define NHMEX_B0_MSR_MASK 0xe46
  41. #define NHMEX_B1_MSR_MATCH 0xe4d
  42. #define NHMEX_B1_MSR_MASK 0xe4e
  43. #define NHMEX_B_PMON_CTL_EN (1 << 0)
  44. #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
  45. #define NHMEX_B_PMON_CTL_EV_SEL_MASK \
  46. (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
  47. #define NHMEX_B_PMON_CTR_SHIFT 6
  48. #define NHMEX_B_PMON_CTR_MASK \
  49. (0x3 << NHMEX_B_PMON_CTR_SHIFT)
  50. #define NHMEX_B_PMON_RAW_EVENT_MASK \
  51. (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
  52. NHMEX_B_PMON_CTR_MASK)
  53. /* NHM-EX Sbox */
  54. #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
  55. #define NHMEX_S0_MSR_PMON_CTR0 0xc51
  56. #define NHMEX_S0_MSR_PMON_CTL0 0xc50
  57. #define NHMEX_S_MSR_OFFSET 0x80
  58. #define NHMEX_S0_MSR_MM_CFG 0xe48
  59. #define NHMEX_S0_MSR_MATCH 0xe49
  60. #define NHMEX_S0_MSR_MASK 0xe4a
  61. #define NHMEX_S1_MSR_MM_CFG 0xe58
  62. #define NHMEX_S1_MSR_MATCH 0xe59
  63. #define NHMEX_S1_MSR_MASK 0xe5a
  64. #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
  65. #define NHMEX_S_EVENT_TO_R_PROG_EV 0
  66. /* NHM-EX Mbox */
  67. #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
  68. #define NHMEX_M0_MSR_PMU_DSP 0xca5
  69. #define NHMEX_M0_MSR_PMU_ISS 0xca6
  70. #define NHMEX_M0_MSR_PMU_MAP 0xca7
  71. #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
  72. #define NHMEX_M0_MSR_PMU_PGT 0xca9
  73. #define NHMEX_M0_MSR_PMU_PLD 0xcaa
  74. #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
  75. #define NHMEX_M0_MSR_PMU_CTL0 0xcb0
  76. #define NHMEX_M0_MSR_PMU_CNT0 0xcb1
  77. #define NHMEX_M_MSR_OFFSET 0x40
  78. #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
  79. #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
  80. #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
  81. #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
  82. #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
  83. #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
  84. #define NHMEX_M_PMON_CTL_EN (1 << 0)
  85. #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
  86. #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
  87. #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
  88. (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
  89. #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
  90. #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
  91. (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
  92. #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
  93. #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
  94. #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
  95. #define NHMEX_M_PMON_CTL_INC_SEL_MASK \
  96. (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
  97. #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
  98. #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
  99. (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
  100. #define NHMEX_M_PMON_RAW_EVENT_MASK \
  101. (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
  102. NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
  103. NHMEX_M_PMON_CTL_WRAP_MODE | \
  104. NHMEX_M_PMON_CTL_FLAG_MODE | \
  105. NHMEX_M_PMON_CTL_INC_SEL_MASK | \
  106. NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
  107. #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
  108. #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n)))
  109. #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
  110. #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n)))
  111. /*
  112. * use the 9~13 bits to select event If the 7th bit is not set,
  113. * otherwise use the 19~21 bits to select event.
  114. */
  115. #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
  116. #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
  117. NHMEX_M_PMON_CTL_FLAG_MODE)
  118. #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
  119. NHMEX_M_PMON_CTL_FLAG_MODE)
  120. #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
  121. NHMEX_M_PMON_CTL_FLAG_MODE)
  122. #define MBOX_INC_SEL_EXTAR_REG(c, r) \
  123. EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
  124. MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
  125. #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
  126. EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
  127. MBOX_SET_FLAG_SEL_MASK, \
  128. (u64)-1, NHMEX_M_##r)
  129. /* NHM-EX Rbox */
  130. #define NHMEX_R_MSR_GLOBAL_CTL 0xe00
  131. #define NHMEX_R_MSR_PMON_CTL0 0xe10
  132. #define NHMEX_R_MSR_PMON_CNT0 0xe11
  133. #define NHMEX_R_MSR_OFFSET 0x20
  134. #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
  135. ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
  136. #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
  137. #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
  138. #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
  139. (((n) < 4 ? 0 : 0x10) + (n) * 4)
  140. #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
  141. (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
  142. #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
  143. (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
  144. #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
  145. (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
  146. #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
  147. (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
  148. #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
  149. (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
  150. #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
  151. (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
  152. #define NHMEX_R_PMON_CTL_EN (1 << 0)
  153. #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
  154. #define NHMEX_R_PMON_CTL_EV_SEL_MASK \
  155. (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
  156. #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
  157. #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
  158. /* NHM-EX Wbox */
  159. #define NHMEX_W_MSR_GLOBAL_CTL 0xc80
  160. #define NHMEX_W_MSR_PMON_CNT0 0xc90
  161. #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
  162. #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
  163. #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
  164. #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
  165. #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
  166. ((1ULL << (n)) - 1)))
  167. DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
  168. DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
  169. DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
  170. DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
  171. DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
  172. DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
  173. DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
  174. DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
  175. DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
  176. static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
  177. {
  178. wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
  179. }
  180. static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
  181. {
  182. wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
  183. }
  184. static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
  185. {
  186. unsigned msr = uncore_msr_box_ctl(box);
  187. u64 config;
  188. if (msr) {
  189. rdmsrl(msr, config);
  190. config &= ~((1ULL << uncore_num_counters(box)) - 1);
  191. /* WBox has a fixed counter */
  192. if (uncore_msr_fixed_ctl(box))
  193. config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
  194. wrmsrl(msr, config);
  195. }
  196. }
  197. static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
  198. {
  199. unsigned msr = uncore_msr_box_ctl(box);
  200. u64 config;
  201. if (msr) {
  202. rdmsrl(msr, config);
  203. config |= (1ULL << uncore_num_counters(box)) - 1;
  204. /* WBox has a fixed counter */
  205. if (uncore_msr_fixed_ctl(box))
  206. config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
  207. wrmsrl(msr, config);
  208. }
  209. }
  210. static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  211. {
  212. wrmsrl(event->hw.config_base, 0);
  213. }
  214. static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  215. {
  216. struct hw_perf_event *hwc = &event->hw;
  217. if (hwc->idx == UNCORE_PMC_IDX_FIXED)
  218. wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
  219. else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
  220. wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
  221. else
  222. wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
  223. }
  224. #define NHMEX_UNCORE_OPS_COMMON_INIT() \
  225. .init_box = nhmex_uncore_msr_init_box, \
  226. .exit_box = nhmex_uncore_msr_exit_box, \
  227. .disable_box = nhmex_uncore_msr_disable_box, \
  228. .enable_box = nhmex_uncore_msr_enable_box, \
  229. .disable_event = nhmex_uncore_msr_disable_event, \
  230. .read_counter = uncore_msr_read_counter
  231. static struct intel_uncore_ops nhmex_uncore_ops = {
  232. NHMEX_UNCORE_OPS_COMMON_INIT(),
  233. .enable_event = nhmex_uncore_msr_enable_event,
  234. };
  235. static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
  236. &format_attr_event.attr,
  237. &format_attr_edge.attr,
  238. NULL,
  239. };
  240. static struct attribute_group nhmex_uncore_ubox_format_group = {
  241. .name = "format",
  242. .attrs = nhmex_uncore_ubox_formats_attr,
  243. };
  244. static struct intel_uncore_type nhmex_uncore_ubox = {
  245. .name = "ubox",
  246. .num_counters = 1,
  247. .num_boxes = 1,
  248. .perf_ctr_bits = 48,
  249. .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
  250. .perf_ctr = NHMEX_U_MSR_PMON_CTR,
  251. .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
  252. .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
  253. .ops = &nhmex_uncore_ops,
  254. .format_group = &nhmex_uncore_ubox_format_group
  255. };
  256. static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
  257. &format_attr_event.attr,
  258. &format_attr_umask.attr,
  259. &format_attr_edge.attr,
  260. &format_attr_inv.attr,
  261. &format_attr_thresh8.attr,
  262. NULL,
  263. };
  264. static struct attribute_group nhmex_uncore_cbox_format_group = {
  265. .name = "format",
  266. .attrs = nhmex_uncore_cbox_formats_attr,
  267. };
  268. /* msr offset for each instance of cbox */
  269. static unsigned nhmex_cbox_msr_offsets[] = {
  270. 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
  271. };
  272. static struct intel_uncore_type nhmex_uncore_cbox = {
  273. .name = "cbox",
  274. .num_counters = 6,
  275. .num_boxes = 10,
  276. .perf_ctr_bits = 48,
  277. .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
  278. .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
  279. .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
  280. .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
  281. .msr_offsets = nhmex_cbox_msr_offsets,
  282. .pair_ctr_ctl = 1,
  283. .ops = &nhmex_uncore_ops,
  284. .format_group = &nhmex_uncore_cbox_format_group
  285. };
  286. static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
  287. INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
  288. { /* end: all zeroes */ },
  289. };
  290. static struct intel_uncore_type nhmex_uncore_wbox = {
  291. .name = "wbox",
  292. .num_counters = 4,
  293. .num_boxes = 1,
  294. .perf_ctr_bits = 48,
  295. .event_ctl = NHMEX_W_MSR_PMON_CNT0,
  296. .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
  297. .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
  298. .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
  299. .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
  300. .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
  301. .pair_ctr_ctl = 1,
  302. .event_descs = nhmex_uncore_wbox_events,
  303. .ops = &nhmex_uncore_ops,
  304. .format_group = &nhmex_uncore_cbox_format_group
  305. };
  306. static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  307. {
  308. struct hw_perf_event *hwc = &event->hw;
  309. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  310. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  311. int ctr, ev_sel;
  312. ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
  313. NHMEX_B_PMON_CTR_SHIFT;
  314. ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
  315. NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
  316. /* events that do not use the match/mask registers */
  317. if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
  318. (ctr == 2 && ev_sel != 0x4) || ctr == 3)
  319. return 0;
  320. if (box->pmu->pmu_idx == 0)
  321. reg1->reg = NHMEX_B0_MSR_MATCH;
  322. else
  323. reg1->reg = NHMEX_B1_MSR_MATCH;
  324. reg1->idx = 0;
  325. reg1->config = event->attr.config1;
  326. reg2->config = event->attr.config2;
  327. return 0;
  328. }
  329. static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  330. {
  331. struct hw_perf_event *hwc = &event->hw;
  332. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  333. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  334. if (reg1->idx != EXTRA_REG_NONE) {
  335. wrmsrl(reg1->reg, reg1->config);
  336. wrmsrl(reg1->reg + 1, reg2->config);
  337. }
  338. wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
  339. (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
  340. }
  341. /*
  342. * The Bbox has 4 counters, but each counter monitors different events.
  343. * Use bits 6-7 in the event config to select counter.
  344. */
  345. static struct event_constraint nhmex_uncore_bbox_constraints[] = {
  346. EVENT_CONSTRAINT(0 , 1, 0xc0),
  347. EVENT_CONSTRAINT(0x40, 2, 0xc0),
  348. EVENT_CONSTRAINT(0x80, 4, 0xc0),
  349. EVENT_CONSTRAINT(0xc0, 8, 0xc0),
  350. EVENT_CONSTRAINT_END,
  351. };
  352. static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
  353. &format_attr_event5.attr,
  354. &format_attr_counter.attr,
  355. &format_attr_match.attr,
  356. &format_attr_mask.attr,
  357. NULL,
  358. };
  359. static struct attribute_group nhmex_uncore_bbox_format_group = {
  360. .name = "format",
  361. .attrs = nhmex_uncore_bbox_formats_attr,
  362. };
  363. static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
  364. NHMEX_UNCORE_OPS_COMMON_INIT(),
  365. .enable_event = nhmex_bbox_msr_enable_event,
  366. .hw_config = nhmex_bbox_hw_config,
  367. .get_constraint = uncore_get_constraint,
  368. .put_constraint = uncore_put_constraint,
  369. };
  370. static struct intel_uncore_type nhmex_uncore_bbox = {
  371. .name = "bbox",
  372. .num_counters = 4,
  373. .num_boxes = 2,
  374. .perf_ctr_bits = 48,
  375. .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
  376. .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
  377. .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
  378. .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
  379. .msr_offset = NHMEX_B_MSR_OFFSET,
  380. .pair_ctr_ctl = 1,
  381. .num_shared_regs = 1,
  382. .constraints = nhmex_uncore_bbox_constraints,
  383. .ops = &nhmex_uncore_bbox_ops,
  384. .format_group = &nhmex_uncore_bbox_format_group
  385. };
  386. static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  387. {
  388. struct hw_perf_event *hwc = &event->hw;
  389. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  390. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  391. /* only TO_R_PROG_EV event uses the match/mask register */
  392. if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
  393. NHMEX_S_EVENT_TO_R_PROG_EV)
  394. return 0;
  395. if (box->pmu->pmu_idx == 0)
  396. reg1->reg = NHMEX_S0_MSR_MM_CFG;
  397. else
  398. reg1->reg = NHMEX_S1_MSR_MM_CFG;
  399. reg1->idx = 0;
  400. reg1->config = event->attr.config1;
  401. reg2->config = event->attr.config2;
  402. return 0;
  403. }
  404. static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  405. {
  406. struct hw_perf_event *hwc = &event->hw;
  407. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  408. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  409. if (reg1->idx != EXTRA_REG_NONE) {
  410. wrmsrl(reg1->reg, 0);
  411. wrmsrl(reg1->reg + 1, reg1->config);
  412. wrmsrl(reg1->reg + 2, reg2->config);
  413. wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
  414. }
  415. wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
  416. }
  417. static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
  418. &format_attr_event.attr,
  419. &format_attr_umask.attr,
  420. &format_attr_edge.attr,
  421. &format_attr_inv.attr,
  422. &format_attr_thresh8.attr,
  423. &format_attr_match.attr,
  424. &format_attr_mask.attr,
  425. NULL,
  426. };
  427. static struct attribute_group nhmex_uncore_sbox_format_group = {
  428. .name = "format",
  429. .attrs = nhmex_uncore_sbox_formats_attr,
  430. };
  431. static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
  432. NHMEX_UNCORE_OPS_COMMON_INIT(),
  433. .enable_event = nhmex_sbox_msr_enable_event,
  434. .hw_config = nhmex_sbox_hw_config,
  435. .get_constraint = uncore_get_constraint,
  436. .put_constraint = uncore_put_constraint,
  437. };
  438. static struct intel_uncore_type nhmex_uncore_sbox = {
  439. .name = "sbox",
  440. .num_counters = 4,
  441. .num_boxes = 2,
  442. .perf_ctr_bits = 48,
  443. .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
  444. .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
  445. .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
  446. .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
  447. .msr_offset = NHMEX_S_MSR_OFFSET,
  448. .pair_ctr_ctl = 1,
  449. .num_shared_regs = 1,
  450. .ops = &nhmex_uncore_sbox_ops,
  451. .format_group = &nhmex_uncore_sbox_format_group
  452. };
  453. enum {
  454. EXTRA_REG_NHMEX_M_FILTER,
  455. EXTRA_REG_NHMEX_M_DSP,
  456. EXTRA_REG_NHMEX_M_ISS,
  457. EXTRA_REG_NHMEX_M_MAP,
  458. EXTRA_REG_NHMEX_M_MSC_THR,
  459. EXTRA_REG_NHMEX_M_PGT,
  460. EXTRA_REG_NHMEX_M_PLD,
  461. EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
  462. };
  463. static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
  464. MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
  465. MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
  466. MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
  467. MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
  468. /* event 0xa uses two extra registers */
  469. MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
  470. MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
  471. MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
  472. /* events 0xd ~ 0x10 use the same extra register */
  473. MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
  474. MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
  475. MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
  476. MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
  477. MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
  478. MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
  479. MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
  480. MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
  481. MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
  482. EVENT_EXTRA_END
  483. };
  484. /* Nehalem-EX or Westmere-EX ? */
  485. static bool uncore_nhmex;
  486. static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
  487. {
  488. struct intel_uncore_extra_reg *er;
  489. unsigned long flags;
  490. bool ret = false;
  491. u64 mask;
  492. if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
  493. er = &box->shared_regs[idx];
  494. raw_spin_lock_irqsave(&er->lock, flags);
  495. if (!atomic_read(&er->ref) || er->config == config) {
  496. atomic_inc(&er->ref);
  497. er->config = config;
  498. ret = true;
  499. }
  500. raw_spin_unlock_irqrestore(&er->lock, flags);
  501. return ret;
  502. }
  503. /*
  504. * The ZDP_CTL_FVC MSR has 4 fields which are used to control
  505. * events 0xd ~ 0x10. Besides these 4 fields, there are additional
  506. * fields which are shared.
  507. */
  508. idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
  509. if (WARN_ON_ONCE(idx >= 4))
  510. return false;
  511. /* mask of the shared fields */
  512. if (uncore_nhmex)
  513. mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
  514. else
  515. mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
  516. er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
  517. raw_spin_lock_irqsave(&er->lock, flags);
  518. /* add mask of the non-shared field if it's in use */
  519. if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
  520. if (uncore_nhmex)
  521. mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
  522. else
  523. mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
  524. }
  525. if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
  526. atomic_add(1 << (idx * 8), &er->ref);
  527. if (uncore_nhmex)
  528. mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
  529. NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
  530. else
  531. mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
  532. WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
  533. er->config &= ~mask;
  534. er->config |= (config & mask);
  535. ret = true;
  536. }
  537. raw_spin_unlock_irqrestore(&er->lock, flags);
  538. return ret;
  539. }
  540. static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
  541. {
  542. struct intel_uncore_extra_reg *er;
  543. if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
  544. er = &box->shared_regs[idx];
  545. atomic_dec(&er->ref);
  546. return;
  547. }
  548. idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
  549. er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
  550. atomic_sub(1 << (idx * 8), &er->ref);
  551. }
  552. static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
  553. {
  554. struct hw_perf_event *hwc = &event->hw;
  555. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  556. u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
  557. u64 config = reg1->config;
  558. /* get the non-shared control bits and shift them */
  559. idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
  560. if (uncore_nhmex)
  561. config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
  562. else
  563. config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
  564. if (new_idx > orig_idx) {
  565. idx = new_idx - orig_idx;
  566. config <<= 3 * idx;
  567. } else {
  568. idx = orig_idx - new_idx;
  569. config >>= 3 * idx;
  570. }
  571. /* add the shared control bits back */
  572. if (uncore_nhmex)
  573. config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
  574. else
  575. config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
  576. config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
  577. if (modify) {
  578. /* adjust the main event selector */
  579. if (new_idx > orig_idx)
  580. hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
  581. else
  582. hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
  583. reg1->config = config;
  584. reg1->idx = ~0xff | new_idx;
  585. }
  586. return config;
  587. }
  588. static struct event_constraint *
  589. nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  590. {
  591. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  592. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  593. int i, idx[2], alloc = 0;
  594. u64 config1 = reg1->config;
  595. idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
  596. idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
  597. again:
  598. for (i = 0; i < 2; i++) {
  599. if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
  600. idx[i] = 0xff;
  601. if (idx[i] == 0xff)
  602. continue;
  603. if (!nhmex_mbox_get_shared_reg(box, idx[i],
  604. __BITS_VALUE(config1, i, 32)))
  605. goto fail;
  606. alloc |= (0x1 << i);
  607. }
  608. /* for the match/mask registers */
  609. if (reg2->idx != EXTRA_REG_NONE &&
  610. (uncore_box_is_fake(box) || !reg2->alloc) &&
  611. !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
  612. goto fail;
  613. /*
  614. * If it's a fake box -- as per validate_{group,event}() we
  615. * shouldn't touch event state and we can avoid doing so
  616. * since both will only call get_event_constraints() once
  617. * on each event, this avoids the need for reg->alloc.
  618. */
  619. if (!uncore_box_is_fake(box)) {
  620. if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
  621. nhmex_mbox_alter_er(event, idx[0], true);
  622. reg1->alloc |= alloc;
  623. if (reg2->idx != EXTRA_REG_NONE)
  624. reg2->alloc = 1;
  625. }
  626. return NULL;
  627. fail:
  628. if (idx[0] != 0xff && !(alloc & 0x1) &&
  629. idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
  630. /*
  631. * events 0xd ~ 0x10 are functional identical, but are
  632. * controlled by different fields in the ZDP_CTL_FVC
  633. * register. If we failed to take one field, try the
  634. * rest 3 choices.
  635. */
  636. BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
  637. idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
  638. idx[0] = (idx[0] + 1) % 4;
  639. idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
  640. if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
  641. config1 = nhmex_mbox_alter_er(event, idx[0], false);
  642. goto again;
  643. }
  644. }
  645. if (alloc & 0x1)
  646. nhmex_mbox_put_shared_reg(box, idx[0]);
  647. if (alloc & 0x2)
  648. nhmex_mbox_put_shared_reg(box, idx[1]);
  649. return &uncore_constraint_empty;
  650. }
  651. static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  652. {
  653. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  654. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  655. if (uncore_box_is_fake(box))
  656. return;
  657. if (reg1->alloc & 0x1)
  658. nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
  659. if (reg1->alloc & 0x2)
  660. nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
  661. reg1->alloc = 0;
  662. if (reg2->alloc) {
  663. nhmex_mbox_put_shared_reg(box, reg2->idx);
  664. reg2->alloc = 0;
  665. }
  666. }
  667. static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
  668. {
  669. if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
  670. return er->idx;
  671. return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
  672. }
  673. static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  674. {
  675. struct intel_uncore_type *type = box->pmu->type;
  676. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  677. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  678. struct extra_reg *er;
  679. unsigned msr;
  680. int reg_idx = 0;
  681. /*
  682. * The mbox events may require 2 extra MSRs at the most. But only
  683. * the lower 32 bits in these MSRs are significant, so we can use
  684. * config1 to pass two MSRs' config.
  685. */
  686. for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
  687. if (er->event != (event->hw.config & er->config_mask))
  688. continue;
  689. if (event->attr.config1 & ~er->valid_mask)
  690. return -EINVAL;
  691. msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
  692. if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
  693. return -EINVAL;
  694. /* always use the 32~63 bits to pass the PLD config */
  695. if (er->idx == EXTRA_REG_NHMEX_M_PLD)
  696. reg_idx = 1;
  697. else if (WARN_ON_ONCE(reg_idx > 0))
  698. return -EINVAL;
  699. reg1->idx &= ~(0xff << (reg_idx * 8));
  700. reg1->reg &= ~(0xffff << (reg_idx * 16));
  701. reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
  702. reg1->reg |= msr << (reg_idx * 16);
  703. reg1->config = event->attr.config1;
  704. reg_idx++;
  705. }
  706. /*
  707. * The mbox only provides ability to perform address matching
  708. * for the PLD events.
  709. */
  710. if (reg_idx == 2) {
  711. reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
  712. if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
  713. reg2->config = event->attr.config2;
  714. else
  715. reg2->config = ~0ULL;
  716. if (box->pmu->pmu_idx == 0)
  717. reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
  718. else
  719. reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
  720. }
  721. return 0;
  722. }
  723. static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
  724. {
  725. struct intel_uncore_extra_reg *er;
  726. unsigned long flags;
  727. u64 config;
  728. if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
  729. return box->shared_regs[idx].config;
  730. er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
  731. raw_spin_lock_irqsave(&er->lock, flags);
  732. config = er->config;
  733. raw_spin_unlock_irqrestore(&er->lock, flags);
  734. return config;
  735. }
  736. static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  737. {
  738. struct hw_perf_event *hwc = &event->hw;
  739. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  740. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  741. int idx;
  742. idx = __BITS_VALUE(reg1->idx, 0, 8);
  743. if (idx != 0xff)
  744. wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
  745. nhmex_mbox_shared_reg_config(box, idx));
  746. idx = __BITS_VALUE(reg1->idx, 1, 8);
  747. if (idx != 0xff)
  748. wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
  749. nhmex_mbox_shared_reg_config(box, idx));
  750. if (reg2->idx != EXTRA_REG_NONE) {
  751. wrmsrl(reg2->reg, 0);
  752. if (reg2->config != ~0ULL) {
  753. wrmsrl(reg2->reg + 1,
  754. reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
  755. wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
  756. (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
  757. wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
  758. }
  759. }
  760. wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
  761. }
  762. DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
  763. DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
  764. DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
  765. DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
  766. DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
  767. DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
  768. DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
  769. DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
  770. DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
  771. DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
  772. DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
  773. DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
  774. DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
  775. DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
  776. DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
  777. DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
  778. static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
  779. &format_attr_count_mode.attr,
  780. &format_attr_storage_mode.attr,
  781. &format_attr_wrap_mode.attr,
  782. &format_attr_flag_mode.attr,
  783. &format_attr_inc_sel.attr,
  784. &format_attr_set_flag_sel.attr,
  785. &format_attr_filter_cfg_en.attr,
  786. &format_attr_filter_match.attr,
  787. &format_attr_filter_mask.attr,
  788. &format_attr_dsp.attr,
  789. &format_attr_thr.attr,
  790. &format_attr_fvc.attr,
  791. &format_attr_pgt.attr,
  792. &format_attr_map.attr,
  793. &format_attr_iss.attr,
  794. &format_attr_pld.attr,
  795. NULL,
  796. };
  797. static struct attribute_group nhmex_uncore_mbox_format_group = {
  798. .name = "format",
  799. .attrs = nhmex_uncore_mbox_formats_attr,
  800. };
  801. static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
  802. INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
  803. INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
  804. { /* end: all zeroes */ },
  805. };
  806. static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
  807. INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
  808. INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
  809. { /* end: all zeroes */ },
  810. };
  811. static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
  812. NHMEX_UNCORE_OPS_COMMON_INIT(),
  813. .enable_event = nhmex_mbox_msr_enable_event,
  814. .hw_config = nhmex_mbox_hw_config,
  815. .get_constraint = nhmex_mbox_get_constraint,
  816. .put_constraint = nhmex_mbox_put_constraint,
  817. };
  818. static struct intel_uncore_type nhmex_uncore_mbox = {
  819. .name = "mbox",
  820. .num_counters = 6,
  821. .num_boxes = 2,
  822. .perf_ctr_bits = 48,
  823. .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
  824. .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
  825. .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
  826. .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
  827. .msr_offset = NHMEX_M_MSR_OFFSET,
  828. .pair_ctr_ctl = 1,
  829. .num_shared_regs = 8,
  830. .event_descs = nhmex_uncore_mbox_events,
  831. .ops = &nhmex_uncore_mbox_ops,
  832. .format_group = &nhmex_uncore_mbox_format_group,
  833. };
  834. static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
  835. {
  836. struct hw_perf_event *hwc = &event->hw;
  837. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  838. /* adjust the main event selector and extra register index */
  839. if (reg1->idx % 2) {
  840. reg1->idx--;
  841. hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
  842. } else {
  843. reg1->idx++;
  844. hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
  845. }
  846. /* adjust extra register config */
  847. switch (reg1->idx % 6) {
  848. case 2:
  849. /* shift the 8~15 bits to the 0~7 bits */
  850. reg1->config >>= 8;
  851. break;
  852. case 3:
  853. /* shift the 0~7 bits to the 8~15 bits */
  854. reg1->config <<= 8;
  855. break;
  856. }
  857. }
  858. /*
  859. * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
  860. * An event set consists of 6 events, the 3rd and 4th events in
  861. * an event set use the same extra register. So an event set uses
  862. * 5 extra registers.
  863. */
  864. static struct event_constraint *
  865. nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  866. {
  867. struct hw_perf_event *hwc = &event->hw;
  868. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  869. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  870. struct intel_uncore_extra_reg *er;
  871. unsigned long flags;
  872. int idx, er_idx;
  873. u64 config1;
  874. bool ok = false;
  875. if (!uncore_box_is_fake(box) && reg1->alloc)
  876. return NULL;
  877. idx = reg1->idx % 6;
  878. config1 = reg1->config;
  879. again:
  880. er_idx = idx;
  881. /* the 3rd and 4th events use the same extra register */
  882. if (er_idx > 2)
  883. er_idx--;
  884. er_idx += (reg1->idx / 6) * 5;
  885. er = &box->shared_regs[er_idx];
  886. raw_spin_lock_irqsave(&er->lock, flags);
  887. if (idx < 2) {
  888. if (!atomic_read(&er->ref) || er->config == reg1->config) {
  889. atomic_inc(&er->ref);
  890. er->config = reg1->config;
  891. ok = true;
  892. }
  893. } else if (idx == 2 || idx == 3) {
  894. /*
  895. * these two events use different fields in a extra register,
  896. * the 0~7 bits and the 8~15 bits respectively.
  897. */
  898. u64 mask = 0xff << ((idx - 2) * 8);
  899. if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
  900. !((er->config ^ config1) & mask)) {
  901. atomic_add(1 << ((idx - 2) * 8), &er->ref);
  902. er->config &= ~mask;
  903. er->config |= config1 & mask;
  904. ok = true;
  905. }
  906. } else {
  907. if (!atomic_read(&er->ref) ||
  908. (er->config == (hwc->config >> 32) &&
  909. er->config1 == reg1->config &&
  910. er->config2 == reg2->config)) {
  911. atomic_inc(&er->ref);
  912. er->config = (hwc->config >> 32);
  913. er->config1 = reg1->config;
  914. er->config2 = reg2->config;
  915. ok = true;
  916. }
  917. }
  918. raw_spin_unlock_irqrestore(&er->lock, flags);
  919. if (!ok) {
  920. /*
  921. * The Rbox events are always in pairs. The paired
  922. * events are functional identical, but use different
  923. * extra registers. If we failed to take an extra
  924. * register, try the alternative.
  925. */
  926. idx ^= 1;
  927. if (idx != reg1->idx % 6) {
  928. if (idx == 2)
  929. config1 >>= 8;
  930. else if (idx == 3)
  931. config1 <<= 8;
  932. goto again;
  933. }
  934. } else {
  935. if (!uncore_box_is_fake(box)) {
  936. if (idx != reg1->idx % 6)
  937. nhmex_rbox_alter_er(box, event);
  938. reg1->alloc = 1;
  939. }
  940. return NULL;
  941. }
  942. return &uncore_constraint_empty;
  943. }
  944. static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  945. {
  946. struct intel_uncore_extra_reg *er;
  947. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  948. int idx, er_idx;
  949. if (uncore_box_is_fake(box) || !reg1->alloc)
  950. return;
  951. idx = reg1->idx % 6;
  952. er_idx = idx;
  953. if (er_idx > 2)
  954. er_idx--;
  955. er_idx += (reg1->idx / 6) * 5;
  956. er = &box->shared_regs[er_idx];
  957. if (idx == 2 || idx == 3)
  958. atomic_sub(1 << ((idx - 2) * 8), &er->ref);
  959. else
  960. atomic_dec(&er->ref);
  961. reg1->alloc = 0;
  962. }
  963. static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  964. {
  965. struct hw_perf_event *hwc = &event->hw;
  966. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  967. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  968. int idx;
  969. idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
  970. NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
  971. if (idx >= 0x18)
  972. return -EINVAL;
  973. reg1->idx = idx;
  974. reg1->config = event->attr.config1;
  975. switch (idx % 6) {
  976. case 4:
  977. case 5:
  978. hwc->config |= event->attr.config & (~0ULL << 32);
  979. reg2->config = event->attr.config2;
  980. break;
  981. }
  982. return 0;
  983. }
  984. static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  985. {
  986. struct hw_perf_event *hwc = &event->hw;
  987. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  988. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  989. int idx, port;
  990. idx = reg1->idx;
  991. port = idx / 6 + box->pmu->pmu_idx * 4;
  992. switch (idx % 6) {
  993. case 0:
  994. wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
  995. break;
  996. case 1:
  997. wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
  998. break;
  999. case 2:
  1000. case 3:
  1001. wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
  1002. uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
  1003. break;
  1004. case 4:
  1005. wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
  1006. hwc->config >> 32);
  1007. wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
  1008. wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
  1009. break;
  1010. case 5:
  1011. wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
  1012. hwc->config >> 32);
  1013. wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
  1014. wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
  1015. break;
  1016. }
  1017. wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
  1018. (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
  1019. }
  1020. DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
  1021. DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
  1022. DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
  1023. DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
  1024. DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
  1025. static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
  1026. &format_attr_event5.attr,
  1027. &format_attr_xbr_mm_cfg.attr,
  1028. &format_attr_xbr_match.attr,
  1029. &format_attr_xbr_mask.attr,
  1030. &format_attr_qlx_cfg.attr,
  1031. &format_attr_iperf_cfg.attr,
  1032. NULL,
  1033. };
  1034. static struct attribute_group nhmex_uncore_rbox_format_group = {
  1035. .name = "format",
  1036. .attrs = nhmex_uncore_rbox_formats_attr,
  1037. };
  1038. static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
  1039. INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
  1040. INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
  1041. INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
  1042. INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
  1043. INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
  1044. INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
  1045. { /* end: all zeroes */ },
  1046. };
  1047. static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
  1048. NHMEX_UNCORE_OPS_COMMON_INIT(),
  1049. .enable_event = nhmex_rbox_msr_enable_event,
  1050. .hw_config = nhmex_rbox_hw_config,
  1051. .get_constraint = nhmex_rbox_get_constraint,
  1052. .put_constraint = nhmex_rbox_put_constraint,
  1053. };
  1054. static struct intel_uncore_type nhmex_uncore_rbox = {
  1055. .name = "rbox",
  1056. .num_counters = 8,
  1057. .num_boxes = 2,
  1058. .perf_ctr_bits = 48,
  1059. .event_ctl = NHMEX_R_MSR_PMON_CTL0,
  1060. .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
  1061. .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
  1062. .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
  1063. .msr_offset = NHMEX_R_MSR_OFFSET,
  1064. .pair_ctr_ctl = 1,
  1065. .num_shared_regs = 20,
  1066. .event_descs = nhmex_uncore_rbox_events,
  1067. .ops = &nhmex_uncore_rbox_ops,
  1068. .format_group = &nhmex_uncore_rbox_format_group
  1069. };
  1070. static struct intel_uncore_type *nhmex_msr_uncores[] = {
  1071. &nhmex_uncore_ubox,
  1072. &nhmex_uncore_cbox,
  1073. &nhmex_uncore_bbox,
  1074. &nhmex_uncore_sbox,
  1075. &nhmex_uncore_mbox,
  1076. &nhmex_uncore_rbox,
  1077. &nhmex_uncore_wbox,
  1078. NULL,
  1079. };
  1080. void nhmex_uncore_cpu_init(void)
  1081. {
  1082. if (boot_cpu_data.x86_model == 46)
  1083. uncore_nhmex = true;
  1084. else
  1085. nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
  1086. if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  1087. nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  1088. uncore_msr_uncores = nhmex_msr_uncores;
  1089. }
  1090. /* end of Nehalem-EX uncore support */