uncore_snbep.c 115 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804
  1. /* SandyBridge-EP/IvyTown uncore support */
  2. #include "uncore.h"
  3. /* SNB-EP pci bus to socket mapping */
  4. #define SNBEP_CPUNODEID 0x40
  5. #define SNBEP_GIDNIDMAP 0x54
  6. /* SNB-EP Box level control */
  7. #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
  8. #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
  9. #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
  10. #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
  11. #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
  12. SNBEP_PMON_BOX_CTL_RST_CTRS | \
  13. SNBEP_PMON_BOX_CTL_FRZ_EN)
  14. /* SNB-EP event control */
  15. #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
  16. #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
  17. #define SNBEP_PMON_CTL_RST (1 << 17)
  18. #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
  19. #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
  20. #define SNBEP_PMON_CTL_EN (1 << 22)
  21. #define SNBEP_PMON_CTL_INVERT (1 << 23)
  22. #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
  23. #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
  24. SNBEP_PMON_CTL_UMASK_MASK | \
  25. SNBEP_PMON_CTL_EDGE_DET | \
  26. SNBEP_PMON_CTL_INVERT | \
  27. SNBEP_PMON_CTL_TRESH_MASK)
  28. /* SNB-EP Ubox event control */
  29. #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
  30. #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
  31. (SNBEP_PMON_CTL_EV_SEL_MASK | \
  32. SNBEP_PMON_CTL_UMASK_MASK | \
  33. SNBEP_PMON_CTL_EDGE_DET | \
  34. SNBEP_PMON_CTL_INVERT | \
  35. SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
  36. #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
  37. #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
  38. SNBEP_CBO_PMON_CTL_TID_EN)
  39. /* SNB-EP PCU event control */
  40. #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
  41. #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
  42. #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
  43. #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
  44. #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
  45. (SNBEP_PMON_CTL_EV_SEL_MASK | \
  46. SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
  47. SNBEP_PMON_CTL_EDGE_DET | \
  48. SNBEP_PMON_CTL_INVERT | \
  49. SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
  50. SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
  51. SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
  52. #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
  53. (SNBEP_PMON_RAW_EVENT_MASK | \
  54. SNBEP_PMON_CTL_EV_SEL_EXT)
  55. /* SNB-EP pci control register */
  56. #define SNBEP_PCI_PMON_BOX_CTL 0xf4
  57. #define SNBEP_PCI_PMON_CTL0 0xd8
  58. /* SNB-EP pci counter register */
  59. #define SNBEP_PCI_PMON_CTR0 0xa0
  60. /* SNB-EP home agent register */
  61. #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
  62. #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
  63. #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
  64. /* SNB-EP memory controller register */
  65. #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
  66. #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
  67. /* SNB-EP QPI register */
  68. #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
  69. #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
  70. #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
  71. #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
  72. /* SNB-EP Ubox register */
  73. #define SNBEP_U_MSR_PMON_CTR0 0xc16
  74. #define SNBEP_U_MSR_PMON_CTL0 0xc10
  75. #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
  76. #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
  77. /* SNB-EP Cbo register */
  78. #define SNBEP_C0_MSR_PMON_CTR0 0xd16
  79. #define SNBEP_C0_MSR_PMON_CTL0 0xd10
  80. #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
  81. #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
  82. #define SNBEP_CBO_MSR_OFFSET 0x20
  83. #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
  84. #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
  85. #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
  86. #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
  87. #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
  88. .event = (e), \
  89. .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
  90. .config_mask = (m), \
  91. .idx = (i) \
  92. }
  93. /* SNB-EP PCU register */
  94. #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
  95. #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
  96. #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
  97. #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
  98. #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
  99. #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
  100. #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
  101. /* IVBEP event control */
  102. #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
  103. SNBEP_PMON_BOX_CTL_RST_CTRS)
  104. #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
  105. SNBEP_PMON_CTL_UMASK_MASK | \
  106. SNBEP_PMON_CTL_EDGE_DET | \
  107. SNBEP_PMON_CTL_TRESH_MASK)
  108. /* IVBEP Ubox */
  109. #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
  110. #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
  111. #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
  112. #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
  113. (SNBEP_PMON_CTL_EV_SEL_MASK | \
  114. SNBEP_PMON_CTL_UMASK_MASK | \
  115. SNBEP_PMON_CTL_EDGE_DET | \
  116. SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
  117. /* IVBEP Cbo */
  118. #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
  119. SNBEP_CBO_PMON_CTL_TID_EN)
  120. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
  121. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
  122. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
  123. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
  124. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
  125. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
  126. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
  127. #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
  128. /* IVBEP home agent */
  129. #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
  130. #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
  131. (IVBEP_PMON_RAW_EVENT_MASK | \
  132. IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
  133. /* IVBEP PCU */
  134. #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
  135. (SNBEP_PMON_CTL_EV_SEL_MASK | \
  136. SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
  137. SNBEP_PMON_CTL_EDGE_DET | \
  138. SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
  139. SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
  140. SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
  141. /* IVBEP QPI */
  142. #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
  143. (IVBEP_PMON_RAW_EVENT_MASK | \
  144. SNBEP_PMON_CTL_EV_SEL_EXT)
  145. #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
  146. ((1ULL << (n)) - 1)))
  147. /* Haswell-EP Ubox */
  148. #define HSWEP_U_MSR_PMON_CTR0 0x709
  149. #define HSWEP_U_MSR_PMON_CTL0 0x705
  150. #define HSWEP_U_MSR_PMON_FILTER 0x707
  151. #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
  152. #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
  153. #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
  154. #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
  155. #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
  156. (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
  157. HSWEP_U_MSR_PMON_BOX_FILTER_CID)
  158. /* Haswell-EP CBo */
  159. #define HSWEP_C0_MSR_PMON_CTR0 0xe08
  160. #define HSWEP_C0_MSR_PMON_CTL0 0xe01
  161. #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
  162. #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
  163. #define HSWEP_CBO_MSR_OFFSET 0x10
  164. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
  165. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
  166. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
  167. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
  168. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
  169. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
  170. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
  171. #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
  172. /* Haswell-EP Sbox */
  173. #define HSWEP_S0_MSR_PMON_CTR0 0x726
  174. #define HSWEP_S0_MSR_PMON_CTL0 0x721
  175. #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
  176. #define HSWEP_SBOX_MSR_OFFSET 0xa
  177. #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
  178. SNBEP_CBO_PMON_CTL_TID_EN)
  179. /* Haswell-EP PCU */
  180. #define HSWEP_PCU_MSR_PMON_CTR0 0x717
  181. #define HSWEP_PCU_MSR_PMON_CTL0 0x711
  182. #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
  183. #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
  184. /* KNL Ubox */
  185. #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
  186. (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
  187. SNBEP_CBO_PMON_CTL_TID_EN)
  188. /* KNL CHA */
  189. #define KNL_CHA_MSR_OFFSET 0xc
  190. #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
  191. #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
  192. (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
  193. KNL_CHA_MSR_PMON_CTL_QOR)
  194. #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
  195. #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
  196. #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
  197. #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
  198. #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
  199. #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
  200. /* KNL EDC/MC UCLK */
  201. #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
  202. #define KNL_UCLK_MSR_PMON_CTL0 0x420
  203. #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
  204. #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
  205. #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
  206. #define KNL_PMON_FIXED_CTL_EN 0x1
  207. /* KNL EDC */
  208. #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
  209. #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
  210. #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
  211. #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
  212. #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
  213. /* KNL MC */
  214. #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
  215. #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
  216. #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
  217. #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
  218. #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
  219. /* KNL IRP */
  220. #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
  221. #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
  222. KNL_CHA_MSR_PMON_CTL_QOR)
  223. /* KNL PCU */
  224. #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
  225. #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
  226. #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
  227. #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
  228. (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
  229. KNL_PCU_PMON_CTL_USE_OCC_CTR | \
  230. SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
  231. SNBEP_PMON_CTL_EDGE_DET | \
  232. SNBEP_CBO_PMON_CTL_TID_EN | \
  233. SNBEP_PMON_CTL_INVERT | \
  234. KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
  235. SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
  236. SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
  237. /* SKX pci bus to socket mapping */
  238. #define SKX_CPUNODEID 0xc0
  239. #define SKX_GIDNIDMAP 0xd4
  240. /* SKX CHA */
  241. #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
  242. #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
  243. #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
  244. #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
  245. #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
  246. #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
  247. #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
  248. #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
  249. #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
  250. #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
  251. #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
  252. #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
  253. #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
  254. /* SKX IIO */
  255. #define SKX_IIO0_MSR_PMON_CTL0 0xa48
  256. #define SKX_IIO0_MSR_PMON_CTR0 0xa41
  257. #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
  258. #define SKX_IIO_MSR_OFFSET 0x20
  259. #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
  260. #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
  261. #define SKX_PMON_CTL_CH_MASK (0xff << 4)
  262. #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
  263. #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
  264. SNBEP_PMON_CTL_UMASK_MASK | \
  265. SNBEP_PMON_CTL_EDGE_DET | \
  266. SNBEP_PMON_CTL_INVERT | \
  267. SKX_PMON_CTL_TRESH_MASK)
  268. #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
  269. SKX_PMON_CTL_CH_MASK | \
  270. SKX_PMON_CTL_FC_MASK)
  271. /* SKX IRP */
  272. #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
  273. #define SKX_IRP0_MSR_PMON_CTR0 0xa59
  274. #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
  275. #define SKX_IRP_MSR_OFFSET 0x20
  276. /* SKX UPI */
  277. #define SKX_UPI_PCI_PMON_CTL0 0x350
  278. #define SKX_UPI_PCI_PMON_CTR0 0x318
  279. #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
  280. #define SKX_PMON_CTL_UMASK_EXT 0xff
  281. /* SKX M2M */
  282. #define SKX_M2M_PCI_PMON_CTL0 0x228
  283. #define SKX_M2M_PCI_PMON_CTR0 0x200
  284. #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
  285. DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
  286. DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
  287. DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
  288. DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
  289. DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
  290. DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39");
  291. DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
  292. DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
  293. DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
  294. DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
  295. DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
  296. DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
  297. DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
  298. DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
  299. DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
  300. DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
  301. DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
  302. DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
  303. DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
  304. DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
  305. DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
  306. DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
  307. DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
  308. DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
  309. DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
  310. DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
  311. DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
  312. DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
  313. DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
  314. DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
  315. DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
  316. DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
  317. DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
  318. DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
  319. DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
  320. DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
  321. DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
  322. DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
  323. DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
  324. DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
  325. DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
  326. DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
  327. DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
  328. DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
  329. DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
  330. DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
  331. DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
  332. DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
  333. DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
  334. DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
  335. DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
  336. DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
  337. DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
  338. DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
  339. DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
  340. DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
  341. DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
  342. DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
  343. DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
  344. DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
  345. DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
  346. DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
  347. DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
  348. DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
  349. DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
  350. DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
  351. DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
  352. DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
  353. DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
  354. DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
  355. DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
  356. DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
  357. DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
  358. static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
  359. {
  360. struct pci_dev *pdev = box->pci_dev;
  361. int box_ctl = uncore_pci_box_ctl(box);
  362. u32 config = 0;
  363. if (!pci_read_config_dword(pdev, box_ctl, &config)) {
  364. config |= SNBEP_PMON_BOX_CTL_FRZ;
  365. pci_write_config_dword(pdev, box_ctl, config);
  366. }
  367. }
  368. static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
  369. {
  370. struct pci_dev *pdev = box->pci_dev;
  371. int box_ctl = uncore_pci_box_ctl(box);
  372. u32 config = 0;
  373. if (!pci_read_config_dword(pdev, box_ctl, &config)) {
  374. config &= ~SNBEP_PMON_BOX_CTL_FRZ;
  375. pci_write_config_dword(pdev, box_ctl, config);
  376. }
  377. }
  378. static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  379. {
  380. struct pci_dev *pdev = box->pci_dev;
  381. struct hw_perf_event *hwc = &event->hw;
  382. pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  383. }
  384. static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  385. {
  386. struct pci_dev *pdev = box->pci_dev;
  387. struct hw_perf_event *hwc = &event->hw;
  388. pci_write_config_dword(pdev, hwc->config_base, hwc->config);
  389. }
  390. static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  391. {
  392. struct pci_dev *pdev = box->pci_dev;
  393. struct hw_perf_event *hwc = &event->hw;
  394. u64 count = 0;
  395. pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
  396. pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
  397. return count;
  398. }
  399. static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
  400. {
  401. struct pci_dev *pdev = box->pci_dev;
  402. int box_ctl = uncore_pci_box_ctl(box);
  403. pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
  404. }
  405. static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
  406. {
  407. u64 config;
  408. unsigned msr;
  409. msr = uncore_msr_box_ctl(box);
  410. if (msr) {
  411. rdmsrl(msr, config);
  412. config |= SNBEP_PMON_BOX_CTL_FRZ;
  413. wrmsrl(msr, config);
  414. }
  415. }
  416. static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
  417. {
  418. u64 config;
  419. unsigned msr;
  420. msr = uncore_msr_box_ctl(box);
  421. if (msr) {
  422. rdmsrl(msr, config);
  423. config &= ~SNBEP_PMON_BOX_CTL_FRZ;
  424. wrmsrl(msr, config);
  425. }
  426. }
  427. static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  428. {
  429. struct hw_perf_event *hwc = &event->hw;
  430. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  431. if (reg1->idx != EXTRA_REG_NONE)
  432. wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
  433. wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  434. }
  435. static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
  436. struct perf_event *event)
  437. {
  438. struct hw_perf_event *hwc = &event->hw;
  439. wrmsrl(hwc->config_base, hwc->config);
  440. }
  441. static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
  442. {
  443. unsigned msr = uncore_msr_box_ctl(box);
  444. if (msr)
  445. wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
  446. }
  447. static struct attribute *snbep_uncore_formats_attr[] = {
  448. &format_attr_event.attr,
  449. &format_attr_umask.attr,
  450. &format_attr_edge.attr,
  451. &format_attr_inv.attr,
  452. &format_attr_thresh8.attr,
  453. NULL,
  454. };
  455. static struct attribute *snbep_uncore_ubox_formats_attr[] = {
  456. &format_attr_event.attr,
  457. &format_attr_umask.attr,
  458. &format_attr_edge.attr,
  459. &format_attr_inv.attr,
  460. &format_attr_thresh5.attr,
  461. NULL,
  462. };
  463. static struct attribute *snbep_uncore_cbox_formats_attr[] = {
  464. &format_attr_event.attr,
  465. &format_attr_umask.attr,
  466. &format_attr_edge.attr,
  467. &format_attr_tid_en.attr,
  468. &format_attr_inv.attr,
  469. &format_attr_thresh8.attr,
  470. &format_attr_filter_tid.attr,
  471. &format_attr_filter_nid.attr,
  472. &format_attr_filter_state.attr,
  473. &format_attr_filter_opc.attr,
  474. NULL,
  475. };
  476. static struct attribute *snbep_uncore_pcu_formats_attr[] = {
  477. &format_attr_event.attr,
  478. &format_attr_occ_sel.attr,
  479. &format_attr_edge.attr,
  480. &format_attr_inv.attr,
  481. &format_attr_thresh5.attr,
  482. &format_attr_occ_invert.attr,
  483. &format_attr_occ_edge.attr,
  484. &format_attr_filter_band0.attr,
  485. &format_attr_filter_band1.attr,
  486. &format_attr_filter_band2.attr,
  487. &format_attr_filter_band3.attr,
  488. NULL,
  489. };
  490. static struct attribute *snbep_uncore_qpi_formats_attr[] = {
  491. &format_attr_event_ext.attr,
  492. &format_attr_umask.attr,
  493. &format_attr_edge.attr,
  494. &format_attr_inv.attr,
  495. &format_attr_thresh8.attr,
  496. &format_attr_match_rds.attr,
  497. &format_attr_match_rnid30.attr,
  498. &format_attr_match_rnid4.attr,
  499. &format_attr_match_dnid.attr,
  500. &format_attr_match_mc.attr,
  501. &format_attr_match_opc.attr,
  502. &format_attr_match_vnw.attr,
  503. &format_attr_match0.attr,
  504. &format_attr_match1.attr,
  505. &format_attr_mask_rds.attr,
  506. &format_attr_mask_rnid30.attr,
  507. &format_attr_mask_rnid4.attr,
  508. &format_attr_mask_dnid.attr,
  509. &format_attr_mask_mc.attr,
  510. &format_attr_mask_opc.attr,
  511. &format_attr_mask_vnw.attr,
  512. &format_attr_mask0.attr,
  513. &format_attr_mask1.attr,
  514. NULL,
  515. };
  516. static struct uncore_event_desc snbep_uncore_imc_events[] = {
  517. INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
  518. INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
  519. INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
  520. INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
  521. INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
  522. INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
  523. INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
  524. { /* end: all zeroes */ },
  525. };
  526. static struct uncore_event_desc snbep_uncore_qpi_events[] = {
  527. INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
  528. INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
  529. INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
  530. INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
  531. { /* end: all zeroes */ },
  532. };
  533. static struct attribute_group snbep_uncore_format_group = {
  534. .name = "format",
  535. .attrs = snbep_uncore_formats_attr,
  536. };
  537. static struct attribute_group snbep_uncore_ubox_format_group = {
  538. .name = "format",
  539. .attrs = snbep_uncore_ubox_formats_attr,
  540. };
  541. static struct attribute_group snbep_uncore_cbox_format_group = {
  542. .name = "format",
  543. .attrs = snbep_uncore_cbox_formats_attr,
  544. };
  545. static struct attribute_group snbep_uncore_pcu_format_group = {
  546. .name = "format",
  547. .attrs = snbep_uncore_pcu_formats_attr,
  548. };
  549. static struct attribute_group snbep_uncore_qpi_format_group = {
  550. .name = "format",
  551. .attrs = snbep_uncore_qpi_formats_attr,
  552. };
  553. #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
  554. .disable_box = snbep_uncore_msr_disable_box, \
  555. .enable_box = snbep_uncore_msr_enable_box, \
  556. .disable_event = snbep_uncore_msr_disable_event, \
  557. .enable_event = snbep_uncore_msr_enable_event, \
  558. .read_counter = uncore_msr_read_counter
  559. #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
  560. __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
  561. .init_box = snbep_uncore_msr_init_box \
  562. static struct intel_uncore_ops snbep_uncore_msr_ops = {
  563. SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  564. };
  565. #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
  566. .init_box = snbep_uncore_pci_init_box, \
  567. .disable_box = snbep_uncore_pci_disable_box, \
  568. .enable_box = snbep_uncore_pci_enable_box, \
  569. .disable_event = snbep_uncore_pci_disable_event, \
  570. .read_counter = snbep_uncore_pci_read_counter
  571. static struct intel_uncore_ops snbep_uncore_pci_ops = {
  572. SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
  573. .enable_event = snbep_uncore_pci_enable_event, \
  574. };
  575. static struct event_constraint snbep_uncore_cbox_constraints[] = {
  576. UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
  577. UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
  578. UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
  579. UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
  580. UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
  581. UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
  582. UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
  583. UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
  584. UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
  585. UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
  586. UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
  587. UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
  588. UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
  589. EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
  590. UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
  591. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  592. UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
  593. UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
  594. UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
  595. UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
  596. UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
  597. UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
  598. UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
  599. UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
  600. UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
  601. UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
  602. EVENT_CONSTRAINT_END
  603. };
  604. static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
  605. UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
  606. UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
  607. UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
  608. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  609. UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
  610. UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
  611. UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
  612. UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
  613. UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
  614. UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
  615. EVENT_CONSTRAINT_END
  616. };
  617. static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
  618. UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
  619. UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
  620. UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
  621. UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
  622. UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
  623. UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
  624. UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
  625. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  626. UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
  627. UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
  628. UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
  629. UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
  630. UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
  631. UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
  632. UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
  633. UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
  634. UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
  635. UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
  636. UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
  637. UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
  638. UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
  639. UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
  640. UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
  641. UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
  642. UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
  643. UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
  644. UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
  645. UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
  646. EVENT_CONSTRAINT_END
  647. };
  648. static struct intel_uncore_type snbep_uncore_ubox = {
  649. .name = "ubox",
  650. .num_counters = 2,
  651. .num_boxes = 1,
  652. .perf_ctr_bits = 44,
  653. .fixed_ctr_bits = 48,
  654. .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
  655. .event_ctl = SNBEP_U_MSR_PMON_CTL0,
  656. .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
  657. .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
  658. .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
  659. .ops = &snbep_uncore_msr_ops,
  660. .format_group = &snbep_uncore_ubox_format_group,
  661. };
  662. static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
  663. SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
  664. SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
  665. SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
  666. SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
  667. SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
  668. SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
  669. SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
  670. SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
  671. SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
  672. SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
  673. SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
  674. SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
  675. SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
  676. SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
  677. SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
  678. SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
  679. SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
  680. SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
  681. SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
  682. SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
  683. SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
  684. SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
  685. SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
  686. SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
  687. SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
  688. EVENT_EXTRA_END
  689. };
  690. static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  691. {
  692. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  693. struct intel_uncore_extra_reg *er = &box->shared_regs[0];
  694. int i;
  695. if (uncore_box_is_fake(box))
  696. return;
  697. for (i = 0; i < 5; i++) {
  698. if (reg1->alloc & (0x1 << i))
  699. atomic_sub(1 << (i * 6), &er->ref);
  700. }
  701. reg1->alloc = 0;
  702. }
  703. static struct event_constraint *
  704. __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
  705. u64 (*cbox_filter_mask)(int fields))
  706. {
  707. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  708. struct intel_uncore_extra_reg *er = &box->shared_regs[0];
  709. int i, alloc = 0;
  710. unsigned long flags;
  711. u64 mask;
  712. if (reg1->idx == EXTRA_REG_NONE)
  713. return NULL;
  714. raw_spin_lock_irqsave(&er->lock, flags);
  715. for (i = 0; i < 5; i++) {
  716. if (!(reg1->idx & (0x1 << i)))
  717. continue;
  718. if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
  719. continue;
  720. mask = cbox_filter_mask(0x1 << i);
  721. if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
  722. !((reg1->config ^ er->config) & mask)) {
  723. atomic_add(1 << (i * 6), &er->ref);
  724. er->config &= ~mask;
  725. er->config |= reg1->config & mask;
  726. alloc |= (0x1 << i);
  727. } else {
  728. break;
  729. }
  730. }
  731. raw_spin_unlock_irqrestore(&er->lock, flags);
  732. if (i < 5)
  733. goto fail;
  734. if (!uncore_box_is_fake(box))
  735. reg1->alloc |= alloc;
  736. return NULL;
  737. fail:
  738. for (; i >= 0; i--) {
  739. if (alloc & (0x1 << i))
  740. atomic_sub(1 << (i * 6), &er->ref);
  741. }
  742. return &uncore_constraint_empty;
  743. }
  744. static u64 snbep_cbox_filter_mask(int fields)
  745. {
  746. u64 mask = 0;
  747. if (fields & 0x1)
  748. mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
  749. if (fields & 0x2)
  750. mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
  751. if (fields & 0x4)
  752. mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
  753. if (fields & 0x8)
  754. mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
  755. return mask;
  756. }
  757. static struct event_constraint *
  758. snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  759. {
  760. return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
  761. }
  762. static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  763. {
  764. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  765. struct extra_reg *er;
  766. int idx = 0;
  767. for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
  768. if (er->event != (event->hw.config & er->config_mask))
  769. continue;
  770. idx |= er->idx;
  771. }
  772. if (idx) {
  773. reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
  774. SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
  775. reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
  776. reg1->idx = idx;
  777. }
  778. return 0;
  779. }
  780. static struct intel_uncore_ops snbep_uncore_cbox_ops = {
  781. SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  782. .hw_config = snbep_cbox_hw_config,
  783. .get_constraint = snbep_cbox_get_constraint,
  784. .put_constraint = snbep_cbox_put_constraint,
  785. };
  786. static struct intel_uncore_type snbep_uncore_cbox = {
  787. .name = "cbox",
  788. .num_counters = 4,
  789. .num_boxes = 8,
  790. .perf_ctr_bits = 44,
  791. .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
  792. .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
  793. .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
  794. .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
  795. .msr_offset = SNBEP_CBO_MSR_OFFSET,
  796. .num_shared_regs = 1,
  797. .constraints = snbep_uncore_cbox_constraints,
  798. .ops = &snbep_uncore_cbox_ops,
  799. .format_group = &snbep_uncore_cbox_format_group,
  800. };
  801. static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
  802. {
  803. struct hw_perf_event *hwc = &event->hw;
  804. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  805. u64 config = reg1->config;
  806. if (new_idx > reg1->idx)
  807. config <<= 8 * (new_idx - reg1->idx);
  808. else
  809. config >>= 8 * (reg1->idx - new_idx);
  810. if (modify) {
  811. hwc->config += new_idx - reg1->idx;
  812. reg1->config = config;
  813. reg1->idx = new_idx;
  814. }
  815. return config;
  816. }
  817. static struct event_constraint *
  818. snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  819. {
  820. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  821. struct intel_uncore_extra_reg *er = &box->shared_regs[0];
  822. unsigned long flags;
  823. int idx = reg1->idx;
  824. u64 mask, config1 = reg1->config;
  825. bool ok = false;
  826. if (reg1->idx == EXTRA_REG_NONE ||
  827. (!uncore_box_is_fake(box) && reg1->alloc))
  828. return NULL;
  829. again:
  830. mask = 0xffULL << (idx * 8);
  831. raw_spin_lock_irqsave(&er->lock, flags);
  832. if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
  833. !((config1 ^ er->config) & mask)) {
  834. atomic_add(1 << (idx * 8), &er->ref);
  835. er->config &= ~mask;
  836. er->config |= config1 & mask;
  837. ok = true;
  838. }
  839. raw_spin_unlock_irqrestore(&er->lock, flags);
  840. if (!ok) {
  841. idx = (idx + 1) % 4;
  842. if (idx != reg1->idx) {
  843. config1 = snbep_pcu_alter_er(event, idx, false);
  844. goto again;
  845. }
  846. return &uncore_constraint_empty;
  847. }
  848. if (!uncore_box_is_fake(box)) {
  849. if (idx != reg1->idx)
  850. snbep_pcu_alter_er(event, idx, true);
  851. reg1->alloc = 1;
  852. }
  853. return NULL;
  854. }
  855. static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  856. {
  857. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  858. struct intel_uncore_extra_reg *er = &box->shared_regs[0];
  859. if (uncore_box_is_fake(box) || !reg1->alloc)
  860. return;
  861. atomic_sub(1 << (reg1->idx * 8), &er->ref);
  862. reg1->alloc = 0;
  863. }
  864. static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  865. {
  866. struct hw_perf_event *hwc = &event->hw;
  867. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  868. int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
  869. if (ev_sel >= 0xb && ev_sel <= 0xe) {
  870. reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
  871. reg1->idx = ev_sel - 0xb;
  872. reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
  873. }
  874. return 0;
  875. }
  876. static struct intel_uncore_ops snbep_uncore_pcu_ops = {
  877. SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  878. .hw_config = snbep_pcu_hw_config,
  879. .get_constraint = snbep_pcu_get_constraint,
  880. .put_constraint = snbep_pcu_put_constraint,
  881. };
  882. static struct intel_uncore_type snbep_uncore_pcu = {
  883. .name = "pcu",
  884. .num_counters = 4,
  885. .num_boxes = 1,
  886. .perf_ctr_bits = 48,
  887. .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
  888. .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
  889. .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
  890. .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
  891. .num_shared_regs = 1,
  892. .ops = &snbep_uncore_pcu_ops,
  893. .format_group = &snbep_uncore_pcu_format_group,
  894. };
  895. static struct intel_uncore_type *snbep_msr_uncores[] = {
  896. &snbep_uncore_ubox,
  897. &snbep_uncore_cbox,
  898. &snbep_uncore_pcu,
  899. NULL,
  900. };
  901. void snbep_uncore_cpu_init(void)
  902. {
  903. if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  904. snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  905. uncore_msr_uncores = snbep_msr_uncores;
  906. }
  907. enum {
  908. SNBEP_PCI_QPI_PORT0_FILTER,
  909. SNBEP_PCI_QPI_PORT1_FILTER,
  910. HSWEP_PCI_PCU_3,
  911. };
  912. static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  913. {
  914. struct hw_perf_event *hwc = &event->hw;
  915. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  916. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  917. if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
  918. reg1->idx = 0;
  919. reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
  920. reg1->config = event->attr.config1;
  921. reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
  922. reg2->config = event->attr.config2;
  923. }
  924. return 0;
  925. }
  926. static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  927. {
  928. struct pci_dev *pdev = box->pci_dev;
  929. struct hw_perf_event *hwc = &event->hw;
  930. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  931. struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
  932. if (reg1->idx != EXTRA_REG_NONE) {
  933. int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
  934. int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
  935. struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
  936. if (filter_pdev) {
  937. pci_write_config_dword(filter_pdev, reg1->reg,
  938. (u32)reg1->config);
  939. pci_write_config_dword(filter_pdev, reg1->reg + 4,
  940. (u32)(reg1->config >> 32));
  941. pci_write_config_dword(filter_pdev, reg2->reg,
  942. (u32)reg2->config);
  943. pci_write_config_dword(filter_pdev, reg2->reg + 4,
  944. (u32)(reg2->config >> 32));
  945. }
  946. }
  947. pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  948. }
  949. static struct intel_uncore_ops snbep_uncore_qpi_ops = {
  950. SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
  951. .enable_event = snbep_qpi_enable_event,
  952. .hw_config = snbep_qpi_hw_config,
  953. .get_constraint = uncore_get_constraint,
  954. .put_constraint = uncore_put_constraint,
  955. };
  956. #define SNBEP_UNCORE_PCI_COMMON_INIT() \
  957. .perf_ctr = SNBEP_PCI_PMON_CTR0, \
  958. .event_ctl = SNBEP_PCI_PMON_CTL0, \
  959. .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
  960. .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
  961. .ops = &snbep_uncore_pci_ops, \
  962. .format_group = &snbep_uncore_format_group
  963. static struct intel_uncore_type snbep_uncore_ha = {
  964. .name = "ha",
  965. .num_counters = 4,
  966. .num_boxes = 1,
  967. .perf_ctr_bits = 48,
  968. SNBEP_UNCORE_PCI_COMMON_INIT(),
  969. };
  970. static struct intel_uncore_type snbep_uncore_imc = {
  971. .name = "imc",
  972. .num_counters = 4,
  973. .num_boxes = 4,
  974. .perf_ctr_bits = 48,
  975. .fixed_ctr_bits = 48,
  976. .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
  977. .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
  978. .event_descs = snbep_uncore_imc_events,
  979. SNBEP_UNCORE_PCI_COMMON_INIT(),
  980. };
  981. static struct intel_uncore_type snbep_uncore_qpi = {
  982. .name = "qpi",
  983. .num_counters = 4,
  984. .num_boxes = 2,
  985. .perf_ctr_bits = 48,
  986. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  987. .event_ctl = SNBEP_PCI_PMON_CTL0,
  988. .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
  989. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  990. .num_shared_regs = 1,
  991. .ops = &snbep_uncore_qpi_ops,
  992. .event_descs = snbep_uncore_qpi_events,
  993. .format_group = &snbep_uncore_qpi_format_group,
  994. };
  995. static struct intel_uncore_type snbep_uncore_r2pcie = {
  996. .name = "r2pcie",
  997. .num_counters = 4,
  998. .num_boxes = 1,
  999. .perf_ctr_bits = 44,
  1000. .constraints = snbep_uncore_r2pcie_constraints,
  1001. SNBEP_UNCORE_PCI_COMMON_INIT(),
  1002. };
  1003. static struct intel_uncore_type snbep_uncore_r3qpi = {
  1004. .name = "r3qpi",
  1005. .num_counters = 3,
  1006. .num_boxes = 2,
  1007. .perf_ctr_bits = 44,
  1008. .constraints = snbep_uncore_r3qpi_constraints,
  1009. SNBEP_UNCORE_PCI_COMMON_INIT(),
  1010. };
  1011. enum {
  1012. SNBEP_PCI_UNCORE_HA,
  1013. SNBEP_PCI_UNCORE_IMC,
  1014. SNBEP_PCI_UNCORE_QPI,
  1015. SNBEP_PCI_UNCORE_R2PCIE,
  1016. SNBEP_PCI_UNCORE_R3QPI,
  1017. };
  1018. static struct intel_uncore_type *snbep_pci_uncores[] = {
  1019. [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
  1020. [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
  1021. [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
  1022. [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
  1023. [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
  1024. NULL,
  1025. };
  1026. static const struct pci_device_id snbep_uncore_pci_ids[] = {
  1027. { /* Home Agent */
  1028. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
  1029. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
  1030. },
  1031. { /* MC Channel 0 */
  1032. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
  1033. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
  1034. },
  1035. { /* MC Channel 1 */
  1036. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
  1037. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
  1038. },
  1039. { /* MC Channel 2 */
  1040. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
  1041. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
  1042. },
  1043. { /* MC Channel 3 */
  1044. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
  1045. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
  1046. },
  1047. { /* QPI Port 0 */
  1048. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
  1049. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
  1050. },
  1051. { /* QPI Port 1 */
  1052. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
  1053. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
  1054. },
  1055. { /* R2PCIe */
  1056. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
  1057. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
  1058. },
  1059. { /* R3QPI Link 0 */
  1060. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
  1061. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
  1062. },
  1063. { /* R3QPI Link 1 */
  1064. PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
  1065. .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
  1066. },
  1067. { /* QPI Port 0 filter */
  1068. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
  1069. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  1070. SNBEP_PCI_QPI_PORT0_FILTER),
  1071. },
  1072. { /* QPI Port 0 filter */
  1073. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
  1074. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  1075. SNBEP_PCI_QPI_PORT1_FILTER),
  1076. },
  1077. { /* end: all zeroes */ }
  1078. };
  1079. static struct pci_driver snbep_uncore_pci_driver = {
  1080. .name = "snbep_uncore",
  1081. .id_table = snbep_uncore_pci_ids,
  1082. };
  1083. /*
  1084. * build pci bus to socket mapping
  1085. */
  1086. static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
  1087. {
  1088. struct pci_dev *ubox_dev = NULL;
  1089. int i, bus, nodeid, segment;
  1090. struct pci2phy_map *map;
  1091. int err = 0;
  1092. u32 config = 0;
  1093. while (1) {
  1094. /* find the UBOX device */
  1095. ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
  1096. if (!ubox_dev)
  1097. break;
  1098. bus = ubox_dev->bus->number;
  1099. /* get the Node ID of the local register */
  1100. err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
  1101. if (err)
  1102. break;
  1103. nodeid = config;
  1104. /* get the Node ID mapping */
  1105. err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
  1106. if (err)
  1107. break;
  1108. segment = pci_domain_nr(ubox_dev->bus);
  1109. raw_spin_lock(&pci2phy_map_lock);
  1110. map = __find_pci2phy_map(segment);
  1111. if (!map) {
  1112. raw_spin_unlock(&pci2phy_map_lock);
  1113. err = -ENOMEM;
  1114. break;
  1115. }
  1116. /*
  1117. * every three bits in the Node ID mapping register maps
  1118. * to a particular node.
  1119. */
  1120. for (i = 0; i < 8; i++) {
  1121. if (nodeid == ((config >> (3 * i)) & 0x7)) {
  1122. map->pbus_to_physid[bus] = i;
  1123. break;
  1124. }
  1125. }
  1126. raw_spin_unlock(&pci2phy_map_lock);
  1127. }
  1128. if (!err) {
  1129. /*
  1130. * For PCI bus with no UBOX device, find the next bus
  1131. * that has UBOX device and use its mapping.
  1132. */
  1133. raw_spin_lock(&pci2phy_map_lock);
  1134. list_for_each_entry(map, &pci2phy_map_head, list) {
  1135. i = -1;
  1136. if (reverse) {
  1137. for (bus = 255; bus >= 0; bus--) {
  1138. if (map->pbus_to_physid[bus] >= 0)
  1139. i = map->pbus_to_physid[bus];
  1140. else
  1141. map->pbus_to_physid[bus] = i;
  1142. }
  1143. } else {
  1144. for (bus = 0; bus <= 255; bus++) {
  1145. if (map->pbus_to_physid[bus] >= 0)
  1146. i = map->pbus_to_physid[bus];
  1147. else
  1148. map->pbus_to_physid[bus] = i;
  1149. }
  1150. }
  1151. }
  1152. raw_spin_unlock(&pci2phy_map_lock);
  1153. }
  1154. pci_dev_put(ubox_dev);
  1155. return err ? pcibios_err_to_errno(err) : 0;
  1156. }
  1157. int snbep_uncore_pci_init(void)
  1158. {
  1159. int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
  1160. if (ret)
  1161. return ret;
  1162. uncore_pci_uncores = snbep_pci_uncores;
  1163. uncore_pci_driver = &snbep_uncore_pci_driver;
  1164. return 0;
  1165. }
  1166. /* end of Sandy Bridge-EP uncore support */
  1167. /* IvyTown uncore support */
  1168. static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
  1169. {
  1170. unsigned msr = uncore_msr_box_ctl(box);
  1171. if (msr)
  1172. wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
  1173. }
  1174. static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
  1175. {
  1176. struct pci_dev *pdev = box->pci_dev;
  1177. pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
  1178. }
  1179. #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
  1180. .init_box = ivbep_uncore_msr_init_box, \
  1181. .disable_box = snbep_uncore_msr_disable_box, \
  1182. .enable_box = snbep_uncore_msr_enable_box, \
  1183. .disable_event = snbep_uncore_msr_disable_event, \
  1184. .enable_event = snbep_uncore_msr_enable_event, \
  1185. .read_counter = uncore_msr_read_counter
  1186. static struct intel_uncore_ops ivbep_uncore_msr_ops = {
  1187. IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  1188. };
  1189. static struct intel_uncore_ops ivbep_uncore_pci_ops = {
  1190. .init_box = ivbep_uncore_pci_init_box,
  1191. .disable_box = snbep_uncore_pci_disable_box,
  1192. .enable_box = snbep_uncore_pci_enable_box,
  1193. .disable_event = snbep_uncore_pci_disable_event,
  1194. .enable_event = snbep_uncore_pci_enable_event,
  1195. .read_counter = snbep_uncore_pci_read_counter,
  1196. };
  1197. #define IVBEP_UNCORE_PCI_COMMON_INIT() \
  1198. .perf_ctr = SNBEP_PCI_PMON_CTR0, \
  1199. .event_ctl = SNBEP_PCI_PMON_CTL0, \
  1200. .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
  1201. .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
  1202. .ops = &ivbep_uncore_pci_ops, \
  1203. .format_group = &ivbep_uncore_format_group
  1204. static struct attribute *ivbep_uncore_formats_attr[] = {
  1205. &format_attr_event.attr,
  1206. &format_attr_umask.attr,
  1207. &format_attr_edge.attr,
  1208. &format_attr_inv.attr,
  1209. &format_attr_thresh8.attr,
  1210. NULL,
  1211. };
  1212. static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
  1213. &format_attr_event.attr,
  1214. &format_attr_umask.attr,
  1215. &format_attr_edge.attr,
  1216. &format_attr_inv.attr,
  1217. &format_attr_thresh5.attr,
  1218. NULL,
  1219. };
  1220. static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
  1221. &format_attr_event.attr,
  1222. &format_attr_umask.attr,
  1223. &format_attr_edge.attr,
  1224. &format_attr_tid_en.attr,
  1225. &format_attr_thresh8.attr,
  1226. &format_attr_filter_tid.attr,
  1227. &format_attr_filter_link.attr,
  1228. &format_attr_filter_state2.attr,
  1229. &format_attr_filter_nid2.attr,
  1230. &format_attr_filter_opc2.attr,
  1231. &format_attr_filter_nc.attr,
  1232. &format_attr_filter_c6.attr,
  1233. &format_attr_filter_isoc.attr,
  1234. NULL,
  1235. };
  1236. static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
  1237. &format_attr_event.attr,
  1238. &format_attr_occ_sel.attr,
  1239. &format_attr_edge.attr,
  1240. &format_attr_thresh5.attr,
  1241. &format_attr_occ_invert.attr,
  1242. &format_attr_occ_edge.attr,
  1243. &format_attr_filter_band0.attr,
  1244. &format_attr_filter_band1.attr,
  1245. &format_attr_filter_band2.attr,
  1246. &format_attr_filter_band3.attr,
  1247. NULL,
  1248. };
  1249. static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
  1250. &format_attr_event_ext.attr,
  1251. &format_attr_umask.attr,
  1252. &format_attr_edge.attr,
  1253. &format_attr_thresh8.attr,
  1254. &format_attr_match_rds.attr,
  1255. &format_attr_match_rnid30.attr,
  1256. &format_attr_match_rnid4.attr,
  1257. &format_attr_match_dnid.attr,
  1258. &format_attr_match_mc.attr,
  1259. &format_attr_match_opc.attr,
  1260. &format_attr_match_vnw.attr,
  1261. &format_attr_match0.attr,
  1262. &format_attr_match1.attr,
  1263. &format_attr_mask_rds.attr,
  1264. &format_attr_mask_rnid30.attr,
  1265. &format_attr_mask_rnid4.attr,
  1266. &format_attr_mask_dnid.attr,
  1267. &format_attr_mask_mc.attr,
  1268. &format_attr_mask_opc.attr,
  1269. &format_attr_mask_vnw.attr,
  1270. &format_attr_mask0.attr,
  1271. &format_attr_mask1.attr,
  1272. NULL,
  1273. };
  1274. static struct attribute_group ivbep_uncore_format_group = {
  1275. .name = "format",
  1276. .attrs = ivbep_uncore_formats_attr,
  1277. };
  1278. static struct attribute_group ivbep_uncore_ubox_format_group = {
  1279. .name = "format",
  1280. .attrs = ivbep_uncore_ubox_formats_attr,
  1281. };
  1282. static struct attribute_group ivbep_uncore_cbox_format_group = {
  1283. .name = "format",
  1284. .attrs = ivbep_uncore_cbox_formats_attr,
  1285. };
  1286. static struct attribute_group ivbep_uncore_pcu_format_group = {
  1287. .name = "format",
  1288. .attrs = ivbep_uncore_pcu_formats_attr,
  1289. };
  1290. static struct attribute_group ivbep_uncore_qpi_format_group = {
  1291. .name = "format",
  1292. .attrs = ivbep_uncore_qpi_formats_attr,
  1293. };
  1294. static struct intel_uncore_type ivbep_uncore_ubox = {
  1295. .name = "ubox",
  1296. .num_counters = 2,
  1297. .num_boxes = 1,
  1298. .perf_ctr_bits = 44,
  1299. .fixed_ctr_bits = 48,
  1300. .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
  1301. .event_ctl = SNBEP_U_MSR_PMON_CTL0,
  1302. .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
  1303. .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
  1304. .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
  1305. .ops = &ivbep_uncore_msr_ops,
  1306. .format_group = &ivbep_uncore_ubox_format_group,
  1307. };
  1308. static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
  1309. SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
  1310. SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
  1311. SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
  1312. SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
  1313. SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
  1314. SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
  1315. SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
  1316. SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
  1317. SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
  1318. SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
  1319. SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
  1320. SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
  1321. SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
  1322. SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
  1323. SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
  1324. SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
  1325. SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
  1326. SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
  1327. SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
  1328. SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
  1329. SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
  1330. SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
  1331. SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
  1332. SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
  1333. SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
  1334. SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
  1335. SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
  1336. SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
  1337. SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
  1338. SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
  1339. SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
  1340. SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
  1341. SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
  1342. SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
  1343. SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
  1344. SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
  1345. SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
  1346. EVENT_EXTRA_END
  1347. };
  1348. static u64 ivbep_cbox_filter_mask(int fields)
  1349. {
  1350. u64 mask = 0;
  1351. if (fields & 0x1)
  1352. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
  1353. if (fields & 0x2)
  1354. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
  1355. if (fields & 0x4)
  1356. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
  1357. if (fields & 0x8)
  1358. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
  1359. if (fields & 0x10) {
  1360. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
  1361. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
  1362. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
  1363. mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
  1364. }
  1365. return mask;
  1366. }
  1367. static struct event_constraint *
  1368. ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  1369. {
  1370. return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
  1371. }
  1372. static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  1373. {
  1374. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  1375. struct extra_reg *er;
  1376. int idx = 0;
  1377. for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
  1378. if (er->event != (event->hw.config & er->config_mask))
  1379. continue;
  1380. idx |= er->idx;
  1381. }
  1382. if (idx) {
  1383. reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
  1384. SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
  1385. reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
  1386. reg1->idx = idx;
  1387. }
  1388. return 0;
  1389. }
  1390. static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  1391. {
  1392. struct hw_perf_event *hwc = &event->hw;
  1393. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  1394. if (reg1->idx != EXTRA_REG_NONE) {
  1395. u64 filter = uncore_shared_reg_config(box, 0);
  1396. wrmsrl(reg1->reg, filter & 0xffffffff);
  1397. wrmsrl(reg1->reg + 6, filter >> 32);
  1398. }
  1399. wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  1400. }
  1401. static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
  1402. .init_box = ivbep_uncore_msr_init_box,
  1403. .disable_box = snbep_uncore_msr_disable_box,
  1404. .enable_box = snbep_uncore_msr_enable_box,
  1405. .disable_event = snbep_uncore_msr_disable_event,
  1406. .enable_event = ivbep_cbox_enable_event,
  1407. .read_counter = uncore_msr_read_counter,
  1408. .hw_config = ivbep_cbox_hw_config,
  1409. .get_constraint = ivbep_cbox_get_constraint,
  1410. .put_constraint = snbep_cbox_put_constraint,
  1411. };
  1412. static struct intel_uncore_type ivbep_uncore_cbox = {
  1413. .name = "cbox",
  1414. .num_counters = 4,
  1415. .num_boxes = 15,
  1416. .perf_ctr_bits = 44,
  1417. .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
  1418. .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
  1419. .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
  1420. .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
  1421. .msr_offset = SNBEP_CBO_MSR_OFFSET,
  1422. .num_shared_regs = 1,
  1423. .constraints = snbep_uncore_cbox_constraints,
  1424. .ops = &ivbep_uncore_cbox_ops,
  1425. .format_group = &ivbep_uncore_cbox_format_group,
  1426. };
  1427. static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
  1428. IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  1429. .hw_config = snbep_pcu_hw_config,
  1430. .get_constraint = snbep_pcu_get_constraint,
  1431. .put_constraint = snbep_pcu_put_constraint,
  1432. };
  1433. static struct intel_uncore_type ivbep_uncore_pcu = {
  1434. .name = "pcu",
  1435. .num_counters = 4,
  1436. .num_boxes = 1,
  1437. .perf_ctr_bits = 48,
  1438. .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
  1439. .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
  1440. .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
  1441. .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
  1442. .num_shared_regs = 1,
  1443. .ops = &ivbep_uncore_pcu_ops,
  1444. .format_group = &ivbep_uncore_pcu_format_group,
  1445. };
  1446. static struct intel_uncore_type *ivbep_msr_uncores[] = {
  1447. &ivbep_uncore_ubox,
  1448. &ivbep_uncore_cbox,
  1449. &ivbep_uncore_pcu,
  1450. NULL,
  1451. };
  1452. void ivbep_uncore_cpu_init(void)
  1453. {
  1454. if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  1455. ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  1456. uncore_msr_uncores = ivbep_msr_uncores;
  1457. }
  1458. static struct intel_uncore_type ivbep_uncore_ha = {
  1459. .name = "ha",
  1460. .num_counters = 4,
  1461. .num_boxes = 2,
  1462. .perf_ctr_bits = 48,
  1463. IVBEP_UNCORE_PCI_COMMON_INIT(),
  1464. };
  1465. static struct intel_uncore_type ivbep_uncore_imc = {
  1466. .name = "imc",
  1467. .num_counters = 4,
  1468. .num_boxes = 8,
  1469. .perf_ctr_bits = 48,
  1470. .fixed_ctr_bits = 48,
  1471. .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
  1472. .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
  1473. .event_descs = snbep_uncore_imc_events,
  1474. IVBEP_UNCORE_PCI_COMMON_INIT(),
  1475. };
  1476. /* registers in IRP boxes are not properly aligned */
  1477. static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
  1478. static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
  1479. static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  1480. {
  1481. struct pci_dev *pdev = box->pci_dev;
  1482. struct hw_perf_event *hwc = &event->hw;
  1483. pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
  1484. hwc->config | SNBEP_PMON_CTL_EN);
  1485. }
  1486. static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  1487. {
  1488. struct pci_dev *pdev = box->pci_dev;
  1489. struct hw_perf_event *hwc = &event->hw;
  1490. pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
  1491. }
  1492. static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  1493. {
  1494. struct pci_dev *pdev = box->pci_dev;
  1495. struct hw_perf_event *hwc = &event->hw;
  1496. u64 count = 0;
  1497. pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
  1498. pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
  1499. return count;
  1500. }
  1501. static struct intel_uncore_ops ivbep_uncore_irp_ops = {
  1502. .init_box = ivbep_uncore_pci_init_box,
  1503. .disable_box = snbep_uncore_pci_disable_box,
  1504. .enable_box = snbep_uncore_pci_enable_box,
  1505. .disable_event = ivbep_uncore_irp_disable_event,
  1506. .enable_event = ivbep_uncore_irp_enable_event,
  1507. .read_counter = ivbep_uncore_irp_read_counter,
  1508. };
  1509. static struct intel_uncore_type ivbep_uncore_irp = {
  1510. .name = "irp",
  1511. .num_counters = 4,
  1512. .num_boxes = 1,
  1513. .perf_ctr_bits = 48,
  1514. .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
  1515. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  1516. .ops = &ivbep_uncore_irp_ops,
  1517. .format_group = &ivbep_uncore_format_group,
  1518. };
  1519. static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
  1520. .init_box = ivbep_uncore_pci_init_box,
  1521. .disable_box = snbep_uncore_pci_disable_box,
  1522. .enable_box = snbep_uncore_pci_enable_box,
  1523. .disable_event = snbep_uncore_pci_disable_event,
  1524. .enable_event = snbep_qpi_enable_event,
  1525. .read_counter = snbep_uncore_pci_read_counter,
  1526. .hw_config = snbep_qpi_hw_config,
  1527. .get_constraint = uncore_get_constraint,
  1528. .put_constraint = uncore_put_constraint,
  1529. };
  1530. static struct intel_uncore_type ivbep_uncore_qpi = {
  1531. .name = "qpi",
  1532. .num_counters = 4,
  1533. .num_boxes = 3,
  1534. .perf_ctr_bits = 48,
  1535. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  1536. .event_ctl = SNBEP_PCI_PMON_CTL0,
  1537. .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
  1538. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  1539. .num_shared_regs = 1,
  1540. .ops = &ivbep_uncore_qpi_ops,
  1541. .format_group = &ivbep_uncore_qpi_format_group,
  1542. };
  1543. static struct intel_uncore_type ivbep_uncore_r2pcie = {
  1544. .name = "r2pcie",
  1545. .num_counters = 4,
  1546. .num_boxes = 1,
  1547. .perf_ctr_bits = 44,
  1548. .constraints = snbep_uncore_r2pcie_constraints,
  1549. IVBEP_UNCORE_PCI_COMMON_INIT(),
  1550. };
  1551. static struct intel_uncore_type ivbep_uncore_r3qpi = {
  1552. .name = "r3qpi",
  1553. .num_counters = 3,
  1554. .num_boxes = 2,
  1555. .perf_ctr_bits = 44,
  1556. .constraints = snbep_uncore_r3qpi_constraints,
  1557. IVBEP_UNCORE_PCI_COMMON_INIT(),
  1558. };
  1559. enum {
  1560. IVBEP_PCI_UNCORE_HA,
  1561. IVBEP_PCI_UNCORE_IMC,
  1562. IVBEP_PCI_UNCORE_IRP,
  1563. IVBEP_PCI_UNCORE_QPI,
  1564. IVBEP_PCI_UNCORE_R2PCIE,
  1565. IVBEP_PCI_UNCORE_R3QPI,
  1566. };
  1567. static struct intel_uncore_type *ivbep_pci_uncores[] = {
  1568. [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
  1569. [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
  1570. [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
  1571. [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
  1572. [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
  1573. [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
  1574. NULL,
  1575. };
  1576. static const struct pci_device_id ivbep_uncore_pci_ids[] = {
  1577. { /* Home Agent 0 */
  1578. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
  1579. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
  1580. },
  1581. { /* Home Agent 1 */
  1582. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
  1583. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
  1584. },
  1585. { /* MC0 Channel 0 */
  1586. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
  1587. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
  1588. },
  1589. { /* MC0 Channel 1 */
  1590. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
  1591. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
  1592. },
  1593. { /* MC0 Channel 3 */
  1594. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
  1595. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
  1596. },
  1597. { /* MC0 Channel 4 */
  1598. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
  1599. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
  1600. },
  1601. { /* MC1 Channel 0 */
  1602. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
  1603. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
  1604. },
  1605. { /* MC1 Channel 1 */
  1606. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
  1607. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
  1608. },
  1609. { /* MC1 Channel 3 */
  1610. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
  1611. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
  1612. },
  1613. { /* MC1 Channel 4 */
  1614. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
  1615. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
  1616. },
  1617. { /* IRP */
  1618. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
  1619. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
  1620. },
  1621. { /* QPI0 Port 0 */
  1622. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
  1623. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
  1624. },
  1625. { /* QPI0 Port 1 */
  1626. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
  1627. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
  1628. },
  1629. { /* QPI1 Port 2 */
  1630. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
  1631. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
  1632. },
  1633. { /* R2PCIe */
  1634. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
  1635. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
  1636. },
  1637. { /* R3QPI0 Link 0 */
  1638. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
  1639. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
  1640. },
  1641. { /* R3QPI0 Link 1 */
  1642. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
  1643. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
  1644. },
  1645. { /* R3QPI1 Link 2 */
  1646. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
  1647. .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
  1648. },
  1649. { /* QPI Port 0 filter */
  1650. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
  1651. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  1652. SNBEP_PCI_QPI_PORT0_FILTER),
  1653. },
  1654. { /* QPI Port 0 filter */
  1655. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
  1656. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  1657. SNBEP_PCI_QPI_PORT1_FILTER),
  1658. },
  1659. { /* end: all zeroes */ }
  1660. };
  1661. static struct pci_driver ivbep_uncore_pci_driver = {
  1662. .name = "ivbep_uncore",
  1663. .id_table = ivbep_uncore_pci_ids,
  1664. };
  1665. int ivbep_uncore_pci_init(void)
  1666. {
  1667. int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
  1668. if (ret)
  1669. return ret;
  1670. uncore_pci_uncores = ivbep_pci_uncores;
  1671. uncore_pci_driver = &ivbep_uncore_pci_driver;
  1672. return 0;
  1673. }
  1674. /* end of IvyTown uncore support */
  1675. /* KNL uncore support */
  1676. static struct attribute *knl_uncore_ubox_formats_attr[] = {
  1677. &format_attr_event.attr,
  1678. &format_attr_umask.attr,
  1679. &format_attr_edge.attr,
  1680. &format_attr_tid_en.attr,
  1681. &format_attr_inv.attr,
  1682. &format_attr_thresh5.attr,
  1683. NULL,
  1684. };
  1685. static struct attribute_group knl_uncore_ubox_format_group = {
  1686. .name = "format",
  1687. .attrs = knl_uncore_ubox_formats_attr,
  1688. };
  1689. static struct intel_uncore_type knl_uncore_ubox = {
  1690. .name = "ubox",
  1691. .num_counters = 2,
  1692. .num_boxes = 1,
  1693. .perf_ctr_bits = 48,
  1694. .fixed_ctr_bits = 48,
  1695. .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
  1696. .event_ctl = HSWEP_U_MSR_PMON_CTL0,
  1697. .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
  1698. .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
  1699. .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
  1700. .ops = &snbep_uncore_msr_ops,
  1701. .format_group = &knl_uncore_ubox_format_group,
  1702. };
  1703. static struct attribute *knl_uncore_cha_formats_attr[] = {
  1704. &format_attr_event.attr,
  1705. &format_attr_umask.attr,
  1706. &format_attr_qor.attr,
  1707. &format_attr_edge.attr,
  1708. &format_attr_tid_en.attr,
  1709. &format_attr_inv.attr,
  1710. &format_attr_thresh8.attr,
  1711. &format_attr_filter_tid4.attr,
  1712. &format_attr_filter_link3.attr,
  1713. &format_attr_filter_state4.attr,
  1714. &format_attr_filter_local.attr,
  1715. &format_attr_filter_all_op.attr,
  1716. &format_attr_filter_nnm.attr,
  1717. &format_attr_filter_opc3.attr,
  1718. &format_attr_filter_nc.attr,
  1719. &format_attr_filter_isoc.attr,
  1720. NULL,
  1721. };
  1722. static struct attribute_group knl_uncore_cha_format_group = {
  1723. .name = "format",
  1724. .attrs = knl_uncore_cha_formats_attr,
  1725. };
  1726. static struct event_constraint knl_uncore_cha_constraints[] = {
  1727. UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
  1728. UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
  1729. UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
  1730. EVENT_CONSTRAINT_END
  1731. };
  1732. static struct extra_reg knl_uncore_cha_extra_regs[] = {
  1733. SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
  1734. SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
  1735. SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
  1736. SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
  1737. SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
  1738. EVENT_EXTRA_END
  1739. };
  1740. static u64 knl_cha_filter_mask(int fields)
  1741. {
  1742. u64 mask = 0;
  1743. if (fields & 0x1)
  1744. mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
  1745. if (fields & 0x2)
  1746. mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
  1747. if (fields & 0x4)
  1748. mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
  1749. return mask;
  1750. }
  1751. static struct event_constraint *
  1752. knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  1753. {
  1754. return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
  1755. }
  1756. static int knl_cha_hw_config(struct intel_uncore_box *box,
  1757. struct perf_event *event)
  1758. {
  1759. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  1760. struct extra_reg *er;
  1761. int idx = 0;
  1762. for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
  1763. if (er->event != (event->hw.config & er->config_mask))
  1764. continue;
  1765. idx |= er->idx;
  1766. }
  1767. if (idx) {
  1768. reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
  1769. KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
  1770. reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
  1771. reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
  1772. reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
  1773. reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
  1774. reg1->idx = idx;
  1775. }
  1776. return 0;
  1777. }
  1778. static void hswep_cbox_enable_event(struct intel_uncore_box *box,
  1779. struct perf_event *event);
  1780. static struct intel_uncore_ops knl_uncore_cha_ops = {
  1781. .init_box = snbep_uncore_msr_init_box,
  1782. .disable_box = snbep_uncore_msr_disable_box,
  1783. .enable_box = snbep_uncore_msr_enable_box,
  1784. .disable_event = snbep_uncore_msr_disable_event,
  1785. .enable_event = hswep_cbox_enable_event,
  1786. .read_counter = uncore_msr_read_counter,
  1787. .hw_config = knl_cha_hw_config,
  1788. .get_constraint = knl_cha_get_constraint,
  1789. .put_constraint = snbep_cbox_put_constraint,
  1790. };
  1791. static struct intel_uncore_type knl_uncore_cha = {
  1792. .name = "cha",
  1793. .num_counters = 4,
  1794. .num_boxes = 38,
  1795. .perf_ctr_bits = 48,
  1796. .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
  1797. .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
  1798. .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
  1799. .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
  1800. .msr_offset = KNL_CHA_MSR_OFFSET,
  1801. .num_shared_regs = 1,
  1802. .constraints = knl_uncore_cha_constraints,
  1803. .ops = &knl_uncore_cha_ops,
  1804. .format_group = &knl_uncore_cha_format_group,
  1805. };
  1806. static struct attribute *knl_uncore_pcu_formats_attr[] = {
  1807. &format_attr_event2.attr,
  1808. &format_attr_use_occ_ctr.attr,
  1809. &format_attr_occ_sel.attr,
  1810. &format_attr_edge.attr,
  1811. &format_attr_tid_en.attr,
  1812. &format_attr_inv.attr,
  1813. &format_attr_thresh6.attr,
  1814. &format_attr_occ_invert.attr,
  1815. &format_attr_occ_edge_det.attr,
  1816. NULL,
  1817. };
  1818. static struct attribute_group knl_uncore_pcu_format_group = {
  1819. .name = "format",
  1820. .attrs = knl_uncore_pcu_formats_attr,
  1821. };
  1822. static struct intel_uncore_type knl_uncore_pcu = {
  1823. .name = "pcu",
  1824. .num_counters = 4,
  1825. .num_boxes = 1,
  1826. .perf_ctr_bits = 48,
  1827. .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
  1828. .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
  1829. .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
  1830. .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
  1831. .ops = &snbep_uncore_msr_ops,
  1832. .format_group = &knl_uncore_pcu_format_group,
  1833. };
  1834. static struct intel_uncore_type *knl_msr_uncores[] = {
  1835. &knl_uncore_ubox,
  1836. &knl_uncore_cha,
  1837. &knl_uncore_pcu,
  1838. NULL,
  1839. };
  1840. void knl_uncore_cpu_init(void)
  1841. {
  1842. uncore_msr_uncores = knl_msr_uncores;
  1843. }
  1844. static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
  1845. {
  1846. struct pci_dev *pdev = box->pci_dev;
  1847. int box_ctl = uncore_pci_box_ctl(box);
  1848. pci_write_config_dword(pdev, box_ctl, 0);
  1849. }
  1850. static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
  1851. struct perf_event *event)
  1852. {
  1853. struct pci_dev *pdev = box->pci_dev;
  1854. struct hw_perf_event *hwc = &event->hw;
  1855. if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
  1856. == UNCORE_FIXED_EVENT)
  1857. pci_write_config_dword(pdev, hwc->config_base,
  1858. hwc->config | KNL_PMON_FIXED_CTL_EN);
  1859. else
  1860. pci_write_config_dword(pdev, hwc->config_base,
  1861. hwc->config | SNBEP_PMON_CTL_EN);
  1862. }
  1863. static struct intel_uncore_ops knl_uncore_imc_ops = {
  1864. .init_box = snbep_uncore_pci_init_box,
  1865. .disable_box = snbep_uncore_pci_disable_box,
  1866. .enable_box = knl_uncore_imc_enable_box,
  1867. .read_counter = snbep_uncore_pci_read_counter,
  1868. .enable_event = knl_uncore_imc_enable_event,
  1869. .disable_event = snbep_uncore_pci_disable_event,
  1870. };
  1871. static struct intel_uncore_type knl_uncore_imc_uclk = {
  1872. .name = "imc_uclk",
  1873. .num_counters = 4,
  1874. .num_boxes = 2,
  1875. .perf_ctr_bits = 48,
  1876. .fixed_ctr_bits = 48,
  1877. .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
  1878. .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
  1879. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  1880. .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
  1881. .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
  1882. .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
  1883. .ops = &knl_uncore_imc_ops,
  1884. .format_group = &snbep_uncore_format_group,
  1885. };
  1886. static struct intel_uncore_type knl_uncore_imc_dclk = {
  1887. .name = "imc",
  1888. .num_counters = 4,
  1889. .num_boxes = 6,
  1890. .perf_ctr_bits = 48,
  1891. .fixed_ctr_bits = 48,
  1892. .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
  1893. .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
  1894. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  1895. .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
  1896. .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
  1897. .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
  1898. .ops = &knl_uncore_imc_ops,
  1899. .format_group = &snbep_uncore_format_group,
  1900. };
  1901. static struct intel_uncore_type knl_uncore_edc_uclk = {
  1902. .name = "edc_uclk",
  1903. .num_counters = 4,
  1904. .num_boxes = 8,
  1905. .perf_ctr_bits = 48,
  1906. .fixed_ctr_bits = 48,
  1907. .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
  1908. .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
  1909. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  1910. .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
  1911. .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
  1912. .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
  1913. .ops = &knl_uncore_imc_ops,
  1914. .format_group = &snbep_uncore_format_group,
  1915. };
  1916. static struct intel_uncore_type knl_uncore_edc_eclk = {
  1917. .name = "edc_eclk",
  1918. .num_counters = 4,
  1919. .num_boxes = 8,
  1920. .perf_ctr_bits = 48,
  1921. .fixed_ctr_bits = 48,
  1922. .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
  1923. .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
  1924. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  1925. .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
  1926. .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
  1927. .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
  1928. .ops = &knl_uncore_imc_ops,
  1929. .format_group = &snbep_uncore_format_group,
  1930. };
  1931. static struct event_constraint knl_uncore_m2pcie_constraints[] = {
  1932. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  1933. EVENT_CONSTRAINT_END
  1934. };
  1935. static struct intel_uncore_type knl_uncore_m2pcie = {
  1936. .name = "m2pcie",
  1937. .num_counters = 4,
  1938. .num_boxes = 1,
  1939. .perf_ctr_bits = 48,
  1940. .constraints = knl_uncore_m2pcie_constraints,
  1941. SNBEP_UNCORE_PCI_COMMON_INIT(),
  1942. };
  1943. static struct attribute *knl_uncore_irp_formats_attr[] = {
  1944. &format_attr_event.attr,
  1945. &format_attr_umask.attr,
  1946. &format_attr_qor.attr,
  1947. &format_attr_edge.attr,
  1948. &format_attr_inv.attr,
  1949. &format_attr_thresh8.attr,
  1950. NULL,
  1951. };
  1952. static struct attribute_group knl_uncore_irp_format_group = {
  1953. .name = "format",
  1954. .attrs = knl_uncore_irp_formats_attr,
  1955. };
  1956. static struct intel_uncore_type knl_uncore_irp = {
  1957. .name = "irp",
  1958. .num_counters = 2,
  1959. .num_boxes = 1,
  1960. .perf_ctr_bits = 48,
  1961. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  1962. .event_ctl = SNBEP_PCI_PMON_CTL0,
  1963. .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
  1964. .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
  1965. .ops = &snbep_uncore_pci_ops,
  1966. .format_group = &knl_uncore_irp_format_group,
  1967. };
  1968. enum {
  1969. KNL_PCI_UNCORE_MC_UCLK,
  1970. KNL_PCI_UNCORE_MC_DCLK,
  1971. KNL_PCI_UNCORE_EDC_UCLK,
  1972. KNL_PCI_UNCORE_EDC_ECLK,
  1973. KNL_PCI_UNCORE_M2PCIE,
  1974. KNL_PCI_UNCORE_IRP,
  1975. };
  1976. static struct intel_uncore_type *knl_pci_uncores[] = {
  1977. [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
  1978. [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
  1979. [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
  1980. [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
  1981. [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
  1982. [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
  1983. NULL,
  1984. };
  1985. /*
  1986. * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
  1987. * device type. prior to KNL, each instance of a PMU device type had a unique
  1988. * device ID.
  1989. *
  1990. * PCI Device ID Uncore PMU Devices
  1991. * ----------------------------------
  1992. * 0x7841 MC0 UClk, MC1 UClk
  1993. * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
  1994. * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
  1995. * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
  1996. * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
  1997. * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
  1998. * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
  1999. * 0x7817 M2PCIe
  2000. * 0x7814 IRP
  2001. */
  2002. static const struct pci_device_id knl_uncore_pci_ids[] = {
  2003. { /* MC0 UClk */
  2004. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
  2005. .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
  2006. },
  2007. { /* MC1 UClk */
  2008. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
  2009. .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
  2010. },
  2011. { /* MC0 DClk CH 0 */
  2012. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
  2013. .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
  2014. },
  2015. { /* MC0 DClk CH 1 */
  2016. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
  2017. .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
  2018. },
  2019. { /* MC0 DClk CH 2 */
  2020. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
  2021. .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
  2022. },
  2023. { /* MC1 DClk CH 0 */
  2024. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
  2025. .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
  2026. },
  2027. { /* MC1 DClk CH 1 */
  2028. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
  2029. .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
  2030. },
  2031. { /* MC1 DClk CH 2 */
  2032. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
  2033. .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
  2034. },
  2035. { /* EDC0 UClk */
  2036. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2037. .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
  2038. },
  2039. { /* EDC1 UClk */
  2040. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2041. .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
  2042. },
  2043. { /* EDC2 UClk */
  2044. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2045. .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
  2046. },
  2047. { /* EDC3 UClk */
  2048. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2049. .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
  2050. },
  2051. { /* EDC4 UClk */
  2052. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2053. .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
  2054. },
  2055. { /* EDC5 UClk */
  2056. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2057. .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
  2058. },
  2059. { /* EDC6 UClk */
  2060. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2061. .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
  2062. },
  2063. { /* EDC7 UClk */
  2064. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
  2065. .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
  2066. },
  2067. { /* EDC0 EClk */
  2068. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2069. .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
  2070. },
  2071. { /* EDC1 EClk */
  2072. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2073. .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
  2074. },
  2075. { /* EDC2 EClk */
  2076. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2077. .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
  2078. },
  2079. { /* EDC3 EClk */
  2080. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2081. .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
  2082. },
  2083. { /* EDC4 EClk */
  2084. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2085. .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
  2086. },
  2087. { /* EDC5 EClk */
  2088. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2089. .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
  2090. },
  2091. { /* EDC6 EClk */
  2092. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2093. .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
  2094. },
  2095. { /* EDC7 EClk */
  2096. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
  2097. .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
  2098. },
  2099. { /* M2PCIe */
  2100. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
  2101. .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
  2102. },
  2103. { /* IRP */
  2104. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
  2105. .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
  2106. },
  2107. { /* end: all zeroes */ }
  2108. };
  2109. static struct pci_driver knl_uncore_pci_driver = {
  2110. .name = "knl_uncore",
  2111. .id_table = knl_uncore_pci_ids,
  2112. };
  2113. int knl_uncore_pci_init(void)
  2114. {
  2115. int ret;
  2116. /* All KNL PCI based PMON units are on the same PCI bus except IRP */
  2117. ret = snb_pci2phy_map_init(0x7814); /* IRP */
  2118. if (ret)
  2119. return ret;
  2120. ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
  2121. if (ret)
  2122. return ret;
  2123. uncore_pci_uncores = knl_pci_uncores;
  2124. uncore_pci_driver = &knl_uncore_pci_driver;
  2125. return 0;
  2126. }
  2127. /* end of KNL uncore support */
  2128. /* Haswell-EP uncore support */
  2129. static struct attribute *hswep_uncore_ubox_formats_attr[] = {
  2130. &format_attr_event.attr,
  2131. &format_attr_umask.attr,
  2132. &format_attr_edge.attr,
  2133. &format_attr_inv.attr,
  2134. &format_attr_thresh5.attr,
  2135. &format_attr_filter_tid2.attr,
  2136. &format_attr_filter_cid.attr,
  2137. NULL,
  2138. };
  2139. static struct attribute_group hswep_uncore_ubox_format_group = {
  2140. .name = "format",
  2141. .attrs = hswep_uncore_ubox_formats_attr,
  2142. };
  2143. static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  2144. {
  2145. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  2146. reg1->reg = HSWEP_U_MSR_PMON_FILTER;
  2147. reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
  2148. reg1->idx = 0;
  2149. return 0;
  2150. }
  2151. static struct intel_uncore_ops hswep_uncore_ubox_ops = {
  2152. SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  2153. .hw_config = hswep_ubox_hw_config,
  2154. .get_constraint = uncore_get_constraint,
  2155. .put_constraint = uncore_put_constraint,
  2156. };
  2157. static struct intel_uncore_type hswep_uncore_ubox = {
  2158. .name = "ubox",
  2159. .num_counters = 2,
  2160. .num_boxes = 1,
  2161. .perf_ctr_bits = 44,
  2162. .fixed_ctr_bits = 48,
  2163. .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
  2164. .event_ctl = HSWEP_U_MSR_PMON_CTL0,
  2165. .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
  2166. .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
  2167. .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
  2168. .num_shared_regs = 1,
  2169. .ops = &hswep_uncore_ubox_ops,
  2170. .format_group = &hswep_uncore_ubox_format_group,
  2171. };
  2172. static struct attribute *hswep_uncore_cbox_formats_attr[] = {
  2173. &format_attr_event.attr,
  2174. &format_attr_umask.attr,
  2175. &format_attr_edge.attr,
  2176. &format_attr_tid_en.attr,
  2177. &format_attr_thresh8.attr,
  2178. &format_attr_filter_tid3.attr,
  2179. &format_attr_filter_link2.attr,
  2180. &format_attr_filter_state3.attr,
  2181. &format_attr_filter_nid2.attr,
  2182. &format_attr_filter_opc2.attr,
  2183. &format_attr_filter_nc.attr,
  2184. &format_attr_filter_c6.attr,
  2185. &format_attr_filter_isoc.attr,
  2186. NULL,
  2187. };
  2188. static struct attribute_group hswep_uncore_cbox_format_group = {
  2189. .name = "format",
  2190. .attrs = hswep_uncore_cbox_formats_attr,
  2191. };
  2192. static struct event_constraint hswep_uncore_cbox_constraints[] = {
  2193. UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
  2194. UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
  2195. UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
  2196. UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
  2197. UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
  2198. UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
  2199. UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
  2200. EVENT_CONSTRAINT_END
  2201. };
  2202. static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
  2203. SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
  2204. SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
  2205. SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
  2206. SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
  2207. SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
  2208. SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
  2209. SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
  2210. SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
  2211. SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
  2212. SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
  2213. SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
  2214. SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
  2215. SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
  2216. SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
  2217. SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
  2218. SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
  2219. SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
  2220. SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
  2221. SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
  2222. SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
  2223. SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
  2224. SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
  2225. SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
  2226. SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
  2227. SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
  2228. SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
  2229. SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
  2230. SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
  2231. SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
  2232. SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
  2233. SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
  2234. SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
  2235. SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
  2236. SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
  2237. SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
  2238. SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
  2239. SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
  2240. SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
  2241. EVENT_EXTRA_END
  2242. };
  2243. static u64 hswep_cbox_filter_mask(int fields)
  2244. {
  2245. u64 mask = 0;
  2246. if (fields & 0x1)
  2247. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
  2248. if (fields & 0x2)
  2249. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
  2250. if (fields & 0x4)
  2251. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
  2252. if (fields & 0x8)
  2253. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
  2254. if (fields & 0x10) {
  2255. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
  2256. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
  2257. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
  2258. mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
  2259. }
  2260. return mask;
  2261. }
  2262. static struct event_constraint *
  2263. hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  2264. {
  2265. return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
  2266. }
  2267. static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  2268. {
  2269. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  2270. struct extra_reg *er;
  2271. int idx = 0;
  2272. for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
  2273. if (er->event != (event->hw.config & er->config_mask))
  2274. continue;
  2275. idx |= er->idx;
  2276. }
  2277. if (idx) {
  2278. reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
  2279. HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
  2280. reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
  2281. reg1->idx = idx;
  2282. }
  2283. return 0;
  2284. }
  2285. static void hswep_cbox_enable_event(struct intel_uncore_box *box,
  2286. struct perf_event *event)
  2287. {
  2288. struct hw_perf_event *hwc = &event->hw;
  2289. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  2290. if (reg1->idx != EXTRA_REG_NONE) {
  2291. u64 filter = uncore_shared_reg_config(box, 0);
  2292. wrmsrl(reg1->reg, filter & 0xffffffff);
  2293. wrmsrl(reg1->reg + 1, filter >> 32);
  2294. }
  2295. wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  2296. }
  2297. static struct intel_uncore_ops hswep_uncore_cbox_ops = {
  2298. .init_box = snbep_uncore_msr_init_box,
  2299. .disable_box = snbep_uncore_msr_disable_box,
  2300. .enable_box = snbep_uncore_msr_enable_box,
  2301. .disable_event = snbep_uncore_msr_disable_event,
  2302. .enable_event = hswep_cbox_enable_event,
  2303. .read_counter = uncore_msr_read_counter,
  2304. .hw_config = hswep_cbox_hw_config,
  2305. .get_constraint = hswep_cbox_get_constraint,
  2306. .put_constraint = snbep_cbox_put_constraint,
  2307. };
  2308. static struct intel_uncore_type hswep_uncore_cbox = {
  2309. .name = "cbox",
  2310. .num_counters = 4,
  2311. .num_boxes = 18,
  2312. .perf_ctr_bits = 48,
  2313. .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
  2314. .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
  2315. .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
  2316. .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
  2317. .msr_offset = HSWEP_CBO_MSR_OFFSET,
  2318. .num_shared_regs = 1,
  2319. .constraints = hswep_uncore_cbox_constraints,
  2320. .ops = &hswep_uncore_cbox_ops,
  2321. .format_group = &hswep_uncore_cbox_format_group,
  2322. };
  2323. /*
  2324. * Write SBOX Initialization register bit by bit to avoid spurious #GPs
  2325. */
  2326. static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
  2327. {
  2328. unsigned msr = uncore_msr_box_ctl(box);
  2329. if (msr) {
  2330. u64 init = SNBEP_PMON_BOX_CTL_INT;
  2331. u64 flags = 0;
  2332. int i;
  2333. for_each_set_bit(i, (unsigned long *)&init, 64) {
  2334. flags |= (1ULL << i);
  2335. wrmsrl(msr, flags);
  2336. }
  2337. }
  2338. }
  2339. static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
  2340. __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  2341. .init_box = hswep_uncore_sbox_msr_init_box
  2342. };
  2343. static struct attribute *hswep_uncore_sbox_formats_attr[] = {
  2344. &format_attr_event.attr,
  2345. &format_attr_umask.attr,
  2346. &format_attr_edge.attr,
  2347. &format_attr_tid_en.attr,
  2348. &format_attr_inv.attr,
  2349. &format_attr_thresh8.attr,
  2350. NULL,
  2351. };
  2352. static struct attribute_group hswep_uncore_sbox_format_group = {
  2353. .name = "format",
  2354. .attrs = hswep_uncore_sbox_formats_attr,
  2355. };
  2356. static struct intel_uncore_type hswep_uncore_sbox = {
  2357. .name = "sbox",
  2358. .num_counters = 4,
  2359. .num_boxes = 4,
  2360. .perf_ctr_bits = 44,
  2361. .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
  2362. .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
  2363. .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
  2364. .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
  2365. .msr_offset = HSWEP_SBOX_MSR_OFFSET,
  2366. .ops = &hswep_uncore_sbox_msr_ops,
  2367. .format_group = &hswep_uncore_sbox_format_group,
  2368. };
  2369. static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  2370. {
  2371. struct hw_perf_event *hwc = &event->hw;
  2372. struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  2373. int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
  2374. if (ev_sel >= 0xb && ev_sel <= 0xe) {
  2375. reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
  2376. reg1->idx = ev_sel - 0xb;
  2377. reg1->config = event->attr.config1 & (0xff << reg1->idx);
  2378. }
  2379. return 0;
  2380. }
  2381. static struct intel_uncore_ops hswep_uncore_pcu_ops = {
  2382. SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  2383. .hw_config = hswep_pcu_hw_config,
  2384. .get_constraint = snbep_pcu_get_constraint,
  2385. .put_constraint = snbep_pcu_put_constraint,
  2386. };
  2387. static struct intel_uncore_type hswep_uncore_pcu = {
  2388. .name = "pcu",
  2389. .num_counters = 4,
  2390. .num_boxes = 1,
  2391. .perf_ctr_bits = 48,
  2392. .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
  2393. .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
  2394. .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
  2395. .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
  2396. .num_shared_regs = 1,
  2397. .ops = &hswep_uncore_pcu_ops,
  2398. .format_group = &snbep_uncore_pcu_format_group,
  2399. };
  2400. static struct intel_uncore_type *hswep_msr_uncores[] = {
  2401. &hswep_uncore_ubox,
  2402. &hswep_uncore_cbox,
  2403. &hswep_uncore_sbox,
  2404. &hswep_uncore_pcu,
  2405. NULL,
  2406. };
  2407. void hswep_uncore_cpu_init(void)
  2408. {
  2409. int pkg = boot_cpu_data.logical_proc_id;
  2410. if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  2411. hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  2412. /* Detect 6-8 core systems with only two SBOXes */
  2413. if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
  2414. u32 capid4;
  2415. pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
  2416. 0x94, &capid4);
  2417. if (((capid4 >> 6) & 0x3) == 0)
  2418. hswep_uncore_sbox.num_boxes = 2;
  2419. }
  2420. uncore_msr_uncores = hswep_msr_uncores;
  2421. }
  2422. static struct intel_uncore_type hswep_uncore_ha = {
  2423. .name = "ha",
  2424. .num_counters = 4,
  2425. .num_boxes = 2,
  2426. .perf_ctr_bits = 48,
  2427. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2428. };
  2429. static struct uncore_event_desc hswep_uncore_imc_events[] = {
  2430. INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
  2431. INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
  2432. INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
  2433. INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
  2434. INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
  2435. INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
  2436. INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
  2437. { /* end: all zeroes */ },
  2438. };
  2439. static struct intel_uncore_type hswep_uncore_imc = {
  2440. .name = "imc",
  2441. .num_counters = 4,
  2442. .num_boxes = 8,
  2443. .perf_ctr_bits = 48,
  2444. .fixed_ctr_bits = 48,
  2445. .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
  2446. .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
  2447. .event_descs = hswep_uncore_imc_events,
  2448. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2449. };
  2450. static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
  2451. static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  2452. {
  2453. struct pci_dev *pdev = box->pci_dev;
  2454. struct hw_perf_event *hwc = &event->hw;
  2455. u64 count = 0;
  2456. pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
  2457. pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
  2458. return count;
  2459. }
  2460. static struct intel_uncore_ops hswep_uncore_irp_ops = {
  2461. .init_box = snbep_uncore_pci_init_box,
  2462. .disable_box = snbep_uncore_pci_disable_box,
  2463. .enable_box = snbep_uncore_pci_enable_box,
  2464. .disable_event = ivbep_uncore_irp_disable_event,
  2465. .enable_event = ivbep_uncore_irp_enable_event,
  2466. .read_counter = hswep_uncore_irp_read_counter,
  2467. };
  2468. static struct intel_uncore_type hswep_uncore_irp = {
  2469. .name = "irp",
  2470. .num_counters = 4,
  2471. .num_boxes = 1,
  2472. .perf_ctr_bits = 48,
  2473. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  2474. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  2475. .ops = &hswep_uncore_irp_ops,
  2476. .format_group = &snbep_uncore_format_group,
  2477. };
  2478. static struct intel_uncore_type hswep_uncore_qpi = {
  2479. .name = "qpi",
  2480. .num_counters = 4,
  2481. .num_boxes = 3,
  2482. .perf_ctr_bits = 48,
  2483. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  2484. .event_ctl = SNBEP_PCI_PMON_CTL0,
  2485. .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
  2486. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  2487. .num_shared_regs = 1,
  2488. .ops = &snbep_uncore_qpi_ops,
  2489. .format_group = &snbep_uncore_qpi_format_group,
  2490. };
  2491. static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
  2492. UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
  2493. UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
  2494. UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
  2495. UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
  2496. UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
  2497. UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
  2498. UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
  2499. UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
  2500. UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
  2501. UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
  2502. UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
  2503. UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
  2504. UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
  2505. UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
  2506. UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
  2507. UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
  2508. UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
  2509. UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
  2510. EVENT_CONSTRAINT_END
  2511. };
  2512. static struct intel_uncore_type hswep_uncore_r2pcie = {
  2513. .name = "r2pcie",
  2514. .num_counters = 4,
  2515. .num_boxes = 1,
  2516. .perf_ctr_bits = 48,
  2517. .constraints = hswep_uncore_r2pcie_constraints,
  2518. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2519. };
  2520. static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
  2521. UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
  2522. UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
  2523. UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
  2524. UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
  2525. UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
  2526. UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
  2527. UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
  2528. UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
  2529. UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
  2530. UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
  2531. UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
  2532. UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
  2533. UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
  2534. UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
  2535. UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
  2536. UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
  2537. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  2538. UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
  2539. UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
  2540. UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
  2541. UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
  2542. UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
  2543. UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
  2544. UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
  2545. UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
  2546. UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
  2547. UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
  2548. UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
  2549. UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
  2550. UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
  2551. UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
  2552. UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
  2553. UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
  2554. EVENT_CONSTRAINT_END
  2555. };
  2556. static struct intel_uncore_type hswep_uncore_r3qpi = {
  2557. .name = "r3qpi",
  2558. .num_counters = 3,
  2559. .num_boxes = 3,
  2560. .perf_ctr_bits = 44,
  2561. .constraints = hswep_uncore_r3qpi_constraints,
  2562. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2563. };
  2564. enum {
  2565. HSWEP_PCI_UNCORE_HA,
  2566. HSWEP_PCI_UNCORE_IMC,
  2567. HSWEP_PCI_UNCORE_IRP,
  2568. HSWEP_PCI_UNCORE_QPI,
  2569. HSWEP_PCI_UNCORE_R2PCIE,
  2570. HSWEP_PCI_UNCORE_R3QPI,
  2571. };
  2572. static struct intel_uncore_type *hswep_pci_uncores[] = {
  2573. [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
  2574. [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
  2575. [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
  2576. [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
  2577. [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
  2578. [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
  2579. NULL,
  2580. };
  2581. static const struct pci_device_id hswep_uncore_pci_ids[] = {
  2582. { /* Home Agent 0 */
  2583. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
  2584. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
  2585. },
  2586. { /* Home Agent 1 */
  2587. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
  2588. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
  2589. },
  2590. { /* MC0 Channel 0 */
  2591. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
  2592. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
  2593. },
  2594. { /* MC0 Channel 1 */
  2595. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
  2596. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
  2597. },
  2598. { /* MC0 Channel 2 */
  2599. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
  2600. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
  2601. },
  2602. { /* MC0 Channel 3 */
  2603. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
  2604. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
  2605. },
  2606. { /* MC1 Channel 0 */
  2607. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
  2608. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
  2609. },
  2610. { /* MC1 Channel 1 */
  2611. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
  2612. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
  2613. },
  2614. { /* MC1 Channel 2 */
  2615. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
  2616. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
  2617. },
  2618. { /* MC1 Channel 3 */
  2619. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
  2620. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
  2621. },
  2622. { /* IRP */
  2623. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
  2624. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
  2625. },
  2626. { /* QPI0 Port 0 */
  2627. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
  2628. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
  2629. },
  2630. { /* QPI0 Port 1 */
  2631. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
  2632. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
  2633. },
  2634. { /* QPI1 Port 2 */
  2635. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
  2636. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
  2637. },
  2638. { /* R2PCIe */
  2639. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
  2640. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
  2641. },
  2642. { /* R3QPI0 Link 0 */
  2643. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
  2644. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
  2645. },
  2646. { /* R3QPI0 Link 1 */
  2647. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
  2648. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
  2649. },
  2650. { /* R3QPI1 Link 2 */
  2651. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
  2652. .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
  2653. },
  2654. { /* QPI Port 0 filter */
  2655. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
  2656. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  2657. SNBEP_PCI_QPI_PORT0_FILTER),
  2658. },
  2659. { /* QPI Port 1 filter */
  2660. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
  2661. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  2662. SNBEP_PCI_QPI_PORT1_FILTER),
  2663. },
  2664. { /* PCU.3 (for Capability registers) */
  2665. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
  2666. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
  2667. HSWEP_PCI_PCU_3),
  2668. },
  2669. { /* end: all zeroes */ }
  2670. };
  2671. static struct pci_driver hswep_uncore_pci_driver = {
  2672. .name = "hswep_uncore",
  2673. .id_table = hswep_uncore_pci_ids,
  2674. };
  2675. int hswep_uncore_pci_init(void)
  2676. {
  2677. int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
  2678. if (ret)
  2679. return ret;
  2680. uncore_pci_uncores = hswep_pci_uncores;
  2681. uncore_pci_driver = &hswep_uncore_pci_driver;
  2682. return 0;
  2683. }
  2684. /* end of Haswell-EP uncore support */
  2685. /* BDX uncore support */
  2686. static struct intel_uncore_type bdx_uncore_ubox = {
  2687. .name = "ubox",
  2688. .num_counters = 2,
  2689. .num_boxes = 1,
  2690. .perf_ctr_bits = 48,
  2691. .fixed_ctr_bits = 48,
  2692. .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
  2693. .event_ctl = HSWEP_U_MSR_PMON_CTL0,
  2694. .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
  2695. .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
  2696. .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
  2697. .num_shared_regs = 1,
  2698. .ops = &ivbep_uncore_msr_ops,
  2699. .format_group = &ivbep_uncore_ubox_format_group,
  2700. };
  2701. static struct event_constraint bdx_uncore_cbox_constraints[] = {
  2702. UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
  2703. UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
  2704. UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
  2705. UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
  2706. EVENT_CONSTRAINT_END
  2707. };
  2708. static struct intel_uncore_type bdx_uncore_cbox = {
  2709. .name = "cbox",
  2710. .num_counters = 4,
  2711. .num_boxes = 24,
  2712. .perf_ctr_bits = 48,
  2713. .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
  2714. .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
  2715. .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
  2716. .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
  2717. .msr_offset = HSWEP_CBO_MSR_OFFSET,
  2718. .num_shared_regs = 1,
  2719. .constraints = bdx_uncore_cbox_constraints,
  2720. .ops = &hswep_uncore_cbox_ops,
  2721. .format_group = &hswep_uncore_cbox_format_group,
  2722. };
  2723. static struct intel_uncore_type *bdx_msr_uncores[] = {
  2724. &bdx_uncore_ubox,
  2725. &bdx_uncore_cbox,
  2726. &hswep_uncore_pcu,
  2727. NULL,
  2728. };
  2729. void bdx_uncore_cpu_init(void)
  2730. {
  2731. if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
  2732. bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
  2733. uncore_msr_uncores = bdx_msr_uncores;
  2734. }
  2735. static struct intel_uncore_type bdx_uncore_ha = {
  2736. .name = "ha",
  2737. .num_counters = 4,
  2738. .num_boxes = 2,
  2739. .perf_ctr_bits = 48,
  2740. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2741. };
  2742. static struct intel_uncore_type bdx_uncore_imc = {
  2743. .name = "imc",
  2744. .num_counters = 4,
  2745. .num_boxes = 8,
  2746. .perf_ctr_bits = 48,
  2747. .fixed_ctr_bits = 48,
  2748. .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
  2749. .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
  2750. .event_descs = hswep_uncore_imc_events,
  2751. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2752. };
  2753. static struct intel_uncore_type bdx_uncore_irp = {
  2754. .name = "irp",
  2755. .num_counters = 4,
  2756. .num_boxes = 1,
  2757. .perf_ctr_bits = 48,
  2758. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  2759. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  2760. .ops = &hswep_uncore_irp_ops,
  2761. .format_group = &snbep_uncore_format_group,
  2762. };
  2763. static struct intel_uncore_type bdx_uncore_qpi = {
  2764. .name = "qpi",
  2765. .num_counters = 4,
  2766. .num_boxes = 3,
  2767. .perf_ctr_bits = 48,
  2768. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  2769. .event_ctl = SNBEP_PCI_PMON_CTL0,
  2770. .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
  2771. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  2772. .num_shared_regs = 1,
  2773. .ops = &snbep_uncore_qpi_ops,
  2774. .format_group = &snbep_uncore_qpi_format_group,
  2775. };
  2776. static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
  2777. UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
  2778. UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
  2779. UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
  2780. UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
  2781. UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
  2782. UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
  2783. UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
  2784. UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
  2785. UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
  2786. EVENT_CONSTRAINT_END
  2787. };
  2788. static struct intel_uncore_type bdx_uncore_r2pcie = {
  2789. .name = "r2pcie",
  2790. .num_counters = 4,
  2791. .num_boxes = 1,
  2792. .perf_ctr_bits = 48,
  2793. .constraints = bdx_uncore_r2pcie_constraints,
  2794. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2795. };
  2796. static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
  2797. UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
  2798. UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
  2799. UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
  2800. UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
  2801. UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
  2802. UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
  2803. UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
  2804. UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
  2805. UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
  2806. UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
  2807. UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
  2808. UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
  2809. UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
  2810. UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
  2811. UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
  2812. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  2813. UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
  2814. UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
  2815. UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
  2816. UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
  2817. UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
  2818. UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
  2819. UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
  2820. UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
  2821. UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
  2822. UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
  2823. UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
  2824. UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
  2825. UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
  2826. UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
  2827. EVENT_CONSTRAINT_END
  2828. };
  2829. static struct intel_uncore_type bdx_uncore_r3qpi = {
  2830. .name = "r3qpi",
  2831. .num_counters = 3,
  2832. .num_boxes = 3,
  2833. .perf_ctr_bits = 48,
  2834. .constraints = bdx_uncore_r3qpi_constraints,
  2835. SNBEP_UNCORE_PCI_COMMON_INIT(),
  2836. };
  2837. enum {
  2838. BDX_PCI_UNCORE_HA,
  2839. BDX_PCI_UNCORE_IMC,
  2840. BDX_PCI_UNCORE_IRP,
  2841. BDX_PCI_UNCORE_QPI,
  2842. BDX_PCI_UNCORE_R2PCIE,
  2843. BDX_PCI_UNCORE_R3QPI,
  2844. };
  2845. static struct intel_uncore_type *bdx_pci_uncores[] = {
  2846. [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
  2847. [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
  2848. [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
  2849. [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
  2850. [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
  2851. [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
  2852. NULL,
  2853. };
  2854. static const struct pci_device_id bdx_uncore_pci_ids[] = {
  2855. { /* Home Agent 0 */
  2856. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
  2857. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
  2858. },
  2859. { /* Home Agent 1 */
  2860. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
  2861. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
  2862. },
  2863. { /* MC0 Channel 0 */
  2864. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
  2865. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
  2866. },
  2867. { /* MC0 Channel 1 */
  2868. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
  2869. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
  2870. },
  2871. { /* MC0 Channel 2 */
  2872. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
  2873. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
  2874. },
  2875. { /* MC0 Channel 3 */
  2876. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
  2877. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
  2878. },
  2879. { /* MC1 Channel 0 */
  2880. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
  2881. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
  2882. },
  2883. { /* MC1 Channel 1 */
  2884. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
  2885. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
  2886. },
  2887. { /* MC1 Channel 2 */
  2888. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
  2889. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
  2890. },
  2891. { /* MC1 Channel 3 */
  2892. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
  2893. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
  2894. },
  2895. { /* IRP */
  2896. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
  2897. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
  2898. },
  2899. { /* QPI0 Port 0 */
  2900. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
  2901. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
  2902. },
  2903. { /* QPI0 Port 1 */
  2904. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
  2905. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
  2906. },
  2907. { /* QPI1 Port 2 */
  2908. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
  2909. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
  2910. },
  2911. { /* R2PCIe */
  2912. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
  2913. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
  2914. },
  2915. { /* R3QPI0 Link 0 */
  2916. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
  2917. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
  2918. },
  2919. { /* R3QPI0 Link 1 */
  2920. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
  2921. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
  2922. },
  2923. { /* R3QPI1 Link 2 */
  2924. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
  2925. .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
  2926. },
  2927. { /* QPI Port 0 filter */
  2928. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
  2929. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
  2930. },
  2931. { /* QPI Port 1 filter */
  2932. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
  2933. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
  2934. },
  2935. { /* QPI Port 2 filter */
  2936. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
  2937. .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
  2938. },
  2939. { /* end: all zeroes */ }
  2940. };
  2941. static struct pci_driver bdx_uncore_pci_driver = {
  2942. .name = "bdx_uncore",
  2943. .id_table = bdx_uncore_pci_ids,
  2944. };
  2945. int bdx_uncore_pci_init(void)
  2946. {
  2947. int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
  2948. if (ret)
  2949. return ret;
  2950. uncore_pci_uncores = bdx_pci_uncores;
  2951. uncore_pci_driver = &bdx_uncore_pci_driver;
  2952. return 0;
  2953. }
  2954. /* end of BDX uncore support */
  2955. /* SKX uncore support */
  2956. static struct intel_uncore_type skx_uncore_ubox = {
  2957. .name = "ubox",
  2958. .num_counters = 2,
  2959. .num_boxes = 1,
  2960. .perf_ctr_bits = 48,
  2961. .fixed_ctr_bits = 48,
  2962. .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
  2963. .event_ctl = HSWEP_U_MSR_PMON_CTL0,
  2964. .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
  2965. .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
  2966. .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
  2967. .ops = &ivbep_uncore_msr_ops,
  2968. .format_group = &ivbep_uncore_ubox_format_group,
  2969. };
  2970. static struct attribute *skx_uncore_cha_formats_attr[] = {
  2971. &format_attr_event.attr,
  2972. &format_attr_umask.attr,
  2973. &format_attr_edge.attr,
  2974. &format_attr_tid_en.attr,
  2975. &format_attr_inv.attr,
  2976. &format_attr_thresh8.attr,
  2977. &format_attr_filter_tid4.attr,
  2978. &format_attr_filter_link4.attr,
  2979. &format_attr_filter_state5.attr,
  2980. &format_attr_filter_rem.attr,
  2981. &format_attr_filter_loc.attr,
  2982. &format_attr_filter_nm.attr,
  2983. &format_attr_filter_all_op.attr,
  2984. &format_attr_filter_not_nm.attr,
  2985. &format_attr_filter_opc_0.attr,
  2986. &format_attr_filter_opc_1.attr,
  2987. &format_attr_filter_nc.attr,
  2988. &format_attr_filter_c6.attr,
  2989. &format_attr_filter_isoc.attr,
  2990. NULL,
  2991. };
  2992. static struct attribute_group skx_uncore_chabox_format_group = {
  2993. .name = "format",
  2994. .attrs = skx_uncore_cha_formats_attr,
  2995. };
  2996. static struct event_constraint skx_uncore_chabox_constraints[] = {
  2997. UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
  2998. UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
  2999. EVENT_CONSTRAINT_END
  3000. };
  3001. static struct extra_reg skx_uncore_cha_extra_regs[] = {
  3002. SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
  3003. SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
  3004. SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
  3005. SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
  3006. SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
  3007. SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4),
  3008. };
  3009. static u64 skx_cha_filter_mask(int fields)
  3010. {
  3011. u64 mask = 0;
  3012. if (fields & 0x1)
  3013. mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
  3014. if (fields & 0x2)
  3015. mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
  3016. if (fields & 0x4)
  3017. mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
  3018. return mask;
  3019. }
  3020. static struct event_constraint *
  3021. skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  3022. {
  3023. return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
  3024. }
  3025. static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
  3026. {
  3027. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  3028. struct extra_reg *er;
  3029. int idx = 0;
  3030. for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
  3031. if (er->event != (event->hw.config & er->config_mask))
  3032. continue;
  3033. idx |= er->idx;
  3034. }
  3035. if (idx) {
  3036. reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
  3037. HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
  3038. reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
  3039. reg1->idx = idx;
  3040. }
  3041. return 0;
  3042. }
  3043. static struct intel_uncore_ops skx_uncore_chabox_ops = {
  3044. /* There is no frz_en for chabox ctl */
  3045. .init_box = ivbep_uncore_msr_init_box,
  3046. .disable_box = snbep_uncore_msr_disable_box,
  3047. .enable_box = snbep_uncore_msr_enable_box,
  3048. .disable_event = snbep_uncore_msr_disable_event,
  3049. .enable_event = hswep_cbox_enable_event,
  3050. .read_counter = uncore_msr_read_counter,
  3051. .hw_config = skx_cha_hw_config,
  3052. .get_constraint = skx_cha_get_constraint,
  3053. .put_constraint = snbep_cbox_put_constraint,
  3054. };
  3055. static struct intel_uncore_type skx_uncore_chabox = {
  3056. .name = "cha",
  3057. .num_counters = 4,
  3058. .perf_ctr_bits = 48,
  3059. .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
  3060. .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
  3061. .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
  3062. .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
  3063. .msr_offset = HSWEP_CBO_MSR_OFFSET,
  3064. .num_shared_regs = 1,
  3065. .constraints = skx_uncore_chabox_constraints,
  3066. .ops = &skx_uncore_chabox_ops,
  3067. .format_group = &skx_uncore_chabox_format_group,
  3068. };
  3069. static struct attribute *skx_uncore_iio_formats_attr[] = {
  3070. &format_attr_event.attr,
  3071. &format_attr_umask.attr,
  3072. &format_attr_edge.attr,
  3073. &format_attr_inv.attr,
  3074. &format_attr_thresh9.attr,
  3075. &format_attr_ch_mask.attr,
  3076. &format_attr_fc_mask.attr,
  3077. NULL,
  3078. };
  3079. static struct attribute_group skx_uncore_iio_format_group = {
  3080. .name = "format",
  3081. .attrs = skx_uncore_iio_formats_attr,
  3082. };
  3083. static struct event_constraint skx_uncore_iio_constraints[] = {
  3084. UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
  3085. UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
  3086. UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
  3087. UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
  3088. UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
  3089. UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
  3090. EVENT_CONSTRAINT_END
  3091. };
  3092. static void skx_iio_enable_event(struct intel_uncore_box *box,
  3093. struct perf_event *event)
  3094. {
  3095. struct hw_perf_event *hwc = &event->hw;
  3096. wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  3097. }
  3098. static struct intel_uncore_ops skx_uncore_iio_ops = {
  3099. .init_box = ivbep_uncore_msr_init_box,
  3100. .disable_box = snbep_uncore_msr_disable_box,
  3101. .enable_box = snbep_uncore_msr_enable_box,
  3102. .disable_event = snbep_uncore_msr_disable_event,
  3103. .enable_event = skx_iio_enable_event,
  3104. .read_counter = uncore_msr_read_counter,
  3105. };
  3106. static struct intel_uncore_type skx_uncore_iio = {
  3107. .name = "iio",
  3108. .num_counters = 4,
  3109. .num_boxes = 5,
  3110. .perf_ctr_bits = 48,
  3111. .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
  3112. .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
  3113. .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
  3114. .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
  3115. .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
  3116. .msr_offset = SKX_IIO_MSR_OFFSET,
  3117. .constraints = skx_uncore_iio_constraints,
  3118. .ops = &skx_uncore_iio_ops,
  3119. .format_group = &skx_uncore_iio_format_group,
  3120. };
  3121. static struct attribute *skx_uncore_formats_attr[] = {
  3122. &format_attr_event.attr,
  3123. &format_attr_umask.attr,
  3124. &format_attr_edge.attr,
  3125. &format_attr_inv.attr,
  3126. &format_attr_thresh8.attr,
  3127. NULL,
  3128. };
  3129. static struct attribute_group skx_uncore_format_group = {
  3130. .name = "format",
  3131. .attrs = skx_uncore_formats_attr,
  3132. };
  3133. static struct intel_uncore_type skx_uncore_irp = {
  3134. .name = "irp",
  3135. .num_counters = 2,
  3136. .num_boxes = 5,
  3137. .perf_ctr_bits = 48,
  3138. .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
  3139. .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
  3140. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  3141. .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
  3142. .msr_offset = SKX_IRP_MSR_OFFSET,
  3143. .ops = &skx_uncore_iio_ops,
  3144. .format_group = &skx_uncore_format_group,
  3145. };
  3146. static struct intel_uncore_ops skx_uncore_pcu_ops = {
  3147. IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  3148. .hw_config = hswep_pcu_hw_config,
  3149. .get_constraint = snbep_pcu_get_constraint,
  3150. .put_constraint = snbep_pcu_put_constraint,
  3151. };
  3152. static struct intel_uncore_type skx_uncore_pcu = {
  3153. .name = "pcu",
  3154. .num_counters = 4,
  3155. .num_boxes = 1,
  3156. .perf_ctr_bits = 48,
  3157. .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
  3158. .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
  3159. .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
  3160. .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
  3161. .num_shared_regs = 1,
  3162. .ops = &skx_uncore_pcu_ops,
  3163. .format_group = &snbep_uncore_pcu_format_group,
  3164. };
  3165. static struct intel_uncore_type *skx_msr_uncores[] = {
  3166. &skx_uncore_ubox,
  3167. &skx_uncore_chabox,
  3168. &skx_uncore_iio,
  3169. &skx_uncore_irp,
  3170. &skx_uncore_pcu,
  3171. NULL,
  3172. };
  3173. /*
  3174. * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
  3175. * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
  3176. */
  3177. #define SKX_CAPID6 0x9c
  3178. #define SKX_CHA_BIT_MASK GENMASK(27, 0)
  3179. static int skx_count_chabox(void)
  3180. {
  3181. struct pci_dev *dev = NULL;
  3182. u32 val = 0;
  3183. dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
  3184. if (!dev)
  3185. goto out;
  3186. pci_read_config_dword(dev, SKX_CAPID6, &val);
  3187. val &= SKX_CHA_BIT_MASK;
  3188. out:
  3189. pci_dev_put(dev);
  3190. return hweight32(val);
  3191. }
  3192. void skx_uncore_cpu_init(void)
  3193. {
  3194. skx_uncore_chabox.num_boxes = skx_count_chabox();
  3195. uncore_msr_uncores = skx_msr_uncores;
  3196. }
  3197. static struct intel_uncore_type skx_uncore_imc = {
  3198. .name = "imc",
  3199. .num_counters = 4,
  3200. .num_boxes = 6,
  3201. .perf_ctr_bits = 48,
  3202. .fixed_ctr_bits = 48,
  3203. .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
  3204. .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
  3205. .event_descs = hswep_uncore_imc_events,
  3206. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  3207. .event_ctl = SNBEP_PCI_PMON_CTL0,
  3208. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  3209. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  3210. .ops = &ivbep_uncore_pci_ops,
  3211. .format_group = &skx_uncore_format_group,
  3212. };
  3213. static struct attribute *skx_upi_uncore_formats_attr[] = {
  3214. &format_attr_event.attr,
  3215. &format_attr_umask_ext.attr,
  3216. &format_attr_edge.attr,
  3217. &format_attr_inv.attr,
  3218. &format_attr_thresh8.attr,
  3219. NULL,
  3220. };
  3221. static struct attribute_group skx_upi_uncore_format_group = {
  3222. .name = "format",
  3223. .attrs = skx_upi_uncore_formats_attr,
  3224. };
  3225. static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
  3226. {
  3227. struct pci_dev *pdev = box->pci_dev;
  3228. __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
  3229. pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
  3230. }
  3231. static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
  3232. .init_box = skx_upi_uncore_pci_init_box,
  3233. .disable_box = snbep_uncore_pci_disable_box,
  3234. .enable_box = snbep_uncore_pci_enable_box,
  3235. .disable_event = snbep_uncore_pci_disable_event,
  3236. .enable_event = snbep_uncore_pci_enable_event,
  3237. .read_counter = snbep_uncore_pci_read_counter,
  3238. };
  3239. static struct intel_uncore_type skx_uncore_upi = {
  3240. .name = "upi",
  3241. .num_counters = 4,
  3242. .num_boxes = 3,
  3243. .perf_ctr_bits = 48,
  3244. .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
  3245. .event_ctl = SKX_UPI_PCI_PMON_CTL0,
  3246. .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
  3247. .event_mask_ext = SKX_PMON_CTL_UMASK_EXT,
  3248. .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
  3249. .ops = &skx_upi_uncore_pci_ops,
  3250. .format_group = &skx_upi_uncore_format_group,
  3251. };
  3252. static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
  3253. {
  3254. struct pci_dev *pdev = box->pci_dev;
  3255. __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
  3256. pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
  3257. }
  3258. static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
  3259. .init_box = skx_m2m_uncore_pci_init_box,
  3260. .disable_box = snbep_uncore_pci_disable_box,
  3261. .enable_box = snbep_uncore_pci_enable_box,
  3262. .disable_event = snbep_uncore_pci_disable_event,
  3263. .enable_event = snbep_uncore_pci_enable_event,
  3264. .read_counter = snbep_uncore_pci_read_counter,
  3265. };
  3266. static struct intel_uncore_type skx_uncore_m2m = {
  3267. .name = "m2m",
  3268. .num_counters = 4,
  3269. .num_boxes = 2,
  3270. .perf_ctr_bits = 48,
  3271. .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
  3272. .event_ctl = SKX_M2M_PCI_PMON_CTL0,
  3273. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  3274. .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
  3275. .ops = &skx_m2m_uncore_pci_ops,
  3276. .format_group = &skx_uncore_format_group,
  3277. };
  3278. static struct event_constraint skx_uncore_m2pcie_constraints[] = {
  3279. UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
  3280. EVENT_CONSTRAINT_END
  3281. };
  3282. static struct intel_uncore_type skx_uncore_m2pcie = {
  3283. .name = "m2pcie",
  3284. .num_counters = 4,
  3285. .num_boxes = 4,
  3286. .perf_ctr_bits = 48,
  3287. .constraints = skx_uncore_m2pcie_constraints,
  3288. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  3289. .event_ctl = SNBEP_PCI_PMON_CTL0,
  3290. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  3291. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  3292. .ops = &ivbep_uncore_pci_ops,
  3293. .format_group = &skx_uncore_format_group,
  3294. };
  3295. static struct event_constraint skx_uncore_m3upi_constraints[] = {
  3296. UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
  3297. UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
  3298. UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
  3299. UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
  3300. UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
  3301. UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
  3302. UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
  3303. UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
  3304. EVENT_CONSTRAINT_END
  3305. };
  3306. static struct intel_uncore_type skx_uncore_m3upi = {
  3307. .name = "m3upi",
  3308. .num_counters = 3,
  3309. .num_boxes = 3,
  3310. .perf_ctr_bits = 48,
  3311. .constraints = skx_uncore_m3upi_constraints,
  3312. .perf_ctr = SNBEP_PCI_PMON_CTR0,
  3313. .event_ctl = SNBEP_PCI_PMON_CTL0,
  3314. .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
  3315. .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
  3316. .ops = &ivbep_uncore_pci_ops,
  3317. .format_group = &skx_uncore_format_group,
  3318. };
  3319. enum {
  3320. SKX_PCI_UNCORE_IMC,
  3321. SKX_PCI_UNCORE_M2M,
  3322. SKX_PCI_UNCORE_UPI,
  3323. SKX_PCI_UNCORE_M2PCIE,
  3324. SKX_PCI_UNCORE_M3UPI,
  3325. };
  3326. static struct intel_uncore_type *skx_pci_uncores[] = {
  3327. [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
  3328. [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
  3329. [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
  3330. [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
  3331. [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
  3332. NULL,
  3333. };
  3334. static const struct pci_device_id skx_uncore_pci_ids[] = {
  3335. { /* MC0 Channel 0 */
  3336. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
  3337. .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
  3338. },
  3339. { /* MC0 Channel 1 */
  3340. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
  3341. .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
  3342. },
  3343. { /* MC0 Channel 2 */
  3344. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
  3345. .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
  3346. },
  3347. { /* MC1 Channel 0 */
  3348. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
  3349. .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
  3350. },
  3351. { /* MC1 Channel 1 */
  3352. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
  3353. .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
  3354. },
  3355. { /* MC1 Channel 2 */
  3356. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
  3357. .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
  3358. },
  3359. { /* M2M0 */
  3360. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
  3361. .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
  3362. },
  3363. { /* M2M1 */
  3364. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
  3365. .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
  3366. },
  3367. { /* UPI0 Link 0 */
  3368. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
  3369. .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
  3370. },
  3371. { /* UPI0 Link 1 */
  3372. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
  3373. .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
  3374. },
  3375. { /* UPI1 Link 2 */
  3376. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
  3377. .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
  3378. },
  3379. { /* M2PCIe 0 */
  3380. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
  3381. .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
  3382. },
  3383. { /* M2PCIe 1 */
  3384. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
  3385. .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
  3386. },
  3387. { /* M2PCIe 2 */
  3388. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
  3389. .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
  3390. },
  3391. { /* M2PCIe 3 */
  3392. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
  3393. .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
  3394. },
  3395. { /* M3UPI0 Link 0 */
  3396. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
  3397. .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
  3398. },
  3399. { /* M3UPI0 Link 1 */
  3400. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
  3401. .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
  3402. },
  3403. { /* M3UPI1 Link 2 */
  3404. PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
  3405. .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
  3406. },
  3407. { /* end: all zeroes */ }
  3408. };
  3409. static struct pci_driver skx_uncore_pci_driver = {
  3410. .name = "skx_uncore",
  3411. .id_table = skx_uncore_pci_ids,
  3412. };
  3413. int skx_uncore_pci_init(void)
  3414. {
  3415. /* need to double check pci address */
  3416. int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
  3417. if (ret)
  3418. return ret;
  3419. uncore_pci_uncores = skx_pci_uncores;
  3420. uncore_pci_driver = &skx_uncore_pci_driver;
  3421. return 0;
  3422. }
  3423. /* end of SKX uncore support */