phy.c 145 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include "coex.h"
  5. #include "debug.h"
  6. #include "fw.h"
  7. #include "mac.h"
  8. #include "phy.h"
  9. #include "ps.h"
  10. #include "reg.h"
  11. #include "sar.h"
  12. #include "txrx.h"
  13. #include "util.h"
  14. static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
  15. const struct rtw89_ra_report *report)
  16. {
  17. u32 bit_rate = report->bit_rate;
  18. /* lower than ofdm, do not aggregate */
  19. if (bit_rate < 550)
  20. return 1;
  21. /* avoid AMSDU for legacy rate */
  22. if (report->might_fallback_legacy)
  23. return 1;
  24. /* lower than 20M vht 2ss mcs8, make it small */
  25. if (bit_rate < 1800)
  26. return 1200;
  27. /* lower than 40M vht 2ss mcs9, make it medium */
  28. if (bit_rate < 4000)
  29. return 2600;
  30. /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
  31. if (bit_rate < 7000)
  32. return 3500;
  33. return rtwdev->chip->max_amsdu_limit;
  34. }
  35. static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
  36. {
  37. u64 ra_mask = 0;
  38. u8 mcs_cap;
  39. int i, nss;
  40. for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
  41. mcs_cap = mcs_map & 0x3;
  42. switch (mcs_cap) {
  43. case 2:
  44. ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
  45. break;
  46. case 1:
  47. ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
  48. break;
  49. case 0:
  50. ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
  51. break;
  52. default:
  53. break;
  54. }
  55. }
  56. return ra_mask;
  57. }
  58. static u64 get_he_ra_mask(struct ieee80211_sta *sta)
  59. {
  60. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  61. struct ieee80211_sta_he_cap cap = sta->he_cap;
  62. #else
  63. struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
  64. #endif
  65. u16 mcs_map;
  66. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  67. switch (sta->bandwidth) {
  68. #else
  69. switch (sta->deflink.bandwidth) {
  70. #endif
  71. case IEEE80211_STA_RX_BW_160:
  72. if (cap.he_cap_elem.phy_cap_info[0] &
  73. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
  74. mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
  75. else
  76. mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
  77. break;
  78. default:
  79. mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
  80. }
  81. /* MCS11, MCS9, MCS7 */
  82. return get_mcs_ra_mask(mcs_map, 11, 2);
  83. }
  84. #define RA_FLOOR_TABLE_SIZE 7
  85. #define RA_FLOOR_UP_GAP 3
  86. static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
  87. u8 ratr_state)
  88. {
  89. u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
  90. u8 rssi_lv = 0;
  91. u8 i;
  92. rssi >>= 1;
  93. for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
  94. if (i >= ratr_state)
  95. rssi_lv_t[i] += RA_FLOOR_UP_GAP;
  96. if (rssi < rssi_lv_t[i]) {
  97. rssi_lv = i;
  98. break;
  99. }
  100. }
  101. if (rssi_lv == 0)
  102. return 0xffffffffffffffffULL;
  103. else if (rssi_lv == 1)
  104. return 0xfffffffffffffff0ULL;
  105. else if (rssi_lv == 2)
  106. return 0xffffffffffffefe0ULL;
  107. else if (rssi_lv == 3)
  108. return 0xffffffffffffcfc0ULL;
  109. else if (rssi_lv == 4)
  110. return 0xffffffffffff8f80ULL;
  111. else if (rssi_lv >= 5)
  112. return 0xffffffffffff0f00ULL;
  113. return 0xffffffffffffffffULL;
  114. }
  115. static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
  116. {
  117. if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
  118. ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
  119. if (ra_mask == 0)
  120. ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
  121. return ra_mask;
  122. }
  123. static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
  124. const struct rtw89_chan *chan)
  125. {
  126. struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
  127. struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
  128. enum nl80211_band band;
  129. u64 cfg_mask;
  130. if (!rtwsta->use_cfg_mask)
  131. return -1;
  132. switch (chan->band_type) {
  133. case RTW89_BAND_2G:
  134. band = NL80211_BAND_2GHZ;
  135. cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
  136. RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
  137. break;
  138. case RTW89_BAND_5G:
  139. band = NL80211_BAND_5GHZ;
  140. cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
  141. RA_MASK_OFDM_RATES);
  142. break;
  143. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  144. case RTW89_BAND_6G:
  145. band = NL80211_BAND_6GHZ;
  146. cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
  147. RA_MASK_OFDM_RATES);
  148. break;
  149. #endif
  150. default:
  151. rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
  152. return -1;
  153. }
  154. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  155. if (sta->he_cap.has_he) {
  156. #else
  157. if (sta->deflink.he_cap.has_he) {
  158. #endif
  159. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  160. cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
  161. RA_MASK_HE_1SS_RATES);
  162. cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
  163. RA_MASK_HE_2SS_RATES);
  164. #endif
  165. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  166. } else if (sta->vht_cap.vht_supported) {
  167. #else
  168. } else if (sta->deflink.vht_cap.vht_supported) {
  169. #endif
  170. cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
  171. RA_MASK_VHT_1SS_RATES);
  172. cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
  173. RA_MASK_VHT_2SS_RATES);
  174. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  175. } else if (sta->ht_cap.ht_supported) {
  176. #else
  177. } else if (sta->deflink.ht_cap.ht_supported) {
  178. #endif
  179. cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
  180. RA_MASK_HT_1SS_RATES);
  181. cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
  182. RA_MASK_HT_2SS_RATES);
  183. }
  184. return cfg_mask;
  185. }
  186. static const u64
  187. rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
  188. RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
  189. static const u64
  190. rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
  191. RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
  192. static const u64
  193. rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
  194. RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
  195. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  196. static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
  197. struct rtw89_sta *rtwsta,
  198. const struct rtw89_chan *chan,
  199. bool *fix_giltf_en, u8 *fix_giltf)
  200. {
  201. struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
  202. u8 band = chan->band_type;
  203. enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
  204. u8 he_gi = mask->control[nl_band].he_gi;
  205. u8 he_ltf = mask->control[nl_band].he_ltf;
  206. if (!rtwsta->use_cfg_mask)
  207. return;
  208. if (he_ltf == 2 && he_gi == 2) {
  209. *fix_giltf = RTW89_GILTF_LGI_4XHE32;
  210. } else if (he_ltf == 2 && he_gi == 0) {
  211. *fix_giltf = RTW89_GILTF_SGI_4XHE08;
  212. } else if (he_ltf == 1 && he_gi == 1) {
  213. *fix_giltf = RTW89_GILTF_2XHE16;
  214. } else if (he_ltf == 1 && he_gi == 0) {
  215. *fix_giltf = RTW89_GILTF_2XHE08;
  216. } else if (he_ltf == 0 && he_gi == 1) {
  217. *fix_giltf = RTW89_GILTF_1XHE16;
  218. } else if (he_ltf == 0 && he_gi == 0) {
  219. *fix_giltf = RTW89_GILTF_1XHE08;
  220. } else {
  221. *fix_giltf_en = false;
  222. return;
  223. }
  224. *fix_giltf_en = true;
  225. }
  226. #endif
  227. static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
  228. struct ieee80211_sta *sta, bool csi)
  229. {
  230. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  231. struct rtw89_vif *rtwvif = rtwsta->rtwvif;
  232. struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
  233. struct rtw89_ra_info *ra = &rtwsta->ra;
  234. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
  235. rtwvif->sub_entity_idx);
  236. struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
  237. const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
  238. u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
  239. u64 ra_mask = 0;
  240. u64 ra_mask_bak;
  241. u8 mode = 0;
  242. u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
  243. u8 bw_mode = 0;
  244. u8 stbc_en = 0;
  245. u8 ldpc_en = 0;
  246. u8 fix_giltf = 0;
  247. u8 i;
  248. bool sgi = false;
  249. bool fix_giltf_en = false;
  250. memset(ra, 0, sizeof(*ra));
  251. /* Set the ra mask from sta's capability */
  252. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  253. if (sta->he_cap.has_he) {
  254. #else
  255. if (sta->deflink.he_cap.has_he) {
  256. #endif
  257. mode |= RTW89_RA_MODE_HE;
  258. csi_mode = RTW89_RA_RPT_MODE_HE;
  259. ra_mask |= get_he_ra_mask(sta);
  260. high_rate_masks = rtw89_ra_mask_he_rates;
  261. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  262. if (sta->he_cap.he_cap_elem.phy_cap_info[2] &
  263. #else
  264. if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
  265. #endif
  266. IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
  267. stbc_en = 1;
  268. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  269. if (sta->he_cap.he_cap_elem.phy_cap_info[1] &
  270. #else
  271. if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
  272. #endif
  273. IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
  274. ldpc_en = 1;
  275. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  276. rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf);
  277. #endif
  278. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  279. } else if (sta->vht_cap.vht_supported) {
  280. u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
  281. #else
  282. } else if (sta->deflink.vht_cap.vht_supported) {
  283. u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
  284. #endif
  285. mode |= RTW89_RA_MODE_VHT;
  286. csi_mode = RTW89_RA_RPT_MODE_VHT;
  287. /* MCS9, MCS8, MCS7 */
  288. ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
  289. high_rate_masks = rtw89_ra_mask_vht_rates;
  290. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  291. if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
  292. #else
  293. if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
  294. #endif
  295. stbc_en = 1;
  296. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  297. if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
  298. #else
  299. if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
  300. #endif
  301. ldpc_en = 1;
  302. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  303. } else if (sta->ht_cap.ht_supported) {
  304. #else
  305. } else if (sta->deflink.ht_cap.ht_supported) {
  306. #endif
  307. mode |= RTW89_RA_MODE_HT;
  308. csi_mode = RTW89_RA_RPT_MODE_HT;
  309. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  310. ra_mask |= ((u64)sta->ht_cap.mcs.rx_mask[3] << 48) |
  311. ((u64)sta->ht_cap.mcs.rx_mask[2] << 36) |
  312. (sta->ht_cap.mcs.rx_mask[1] << 24) |
  313. (sta->ht_cap.mcs.rx_mask[0] << 12);
  314. #else
  315. ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
  316. ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
  317. (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
  318. (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
  319. #endif
  320. high_rate_masks = rtw89_ra_mask_ht_rates;
  321. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  322. if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
  323. #else
  324. if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
  325. #endif
  326. stbc_en = 1;
  327. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  328. if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
  329. #else
  330. if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
  331. #endif
  332. ldpc_en = 1;
  333. }
  334. switch (chan->band_type) {
  335. case RTW89_BAND_2G:
  336. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  337. ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
  338. if (sta->supp_rates[NL80211_BAND_2GHZ] & 0xf)
  339. #else
  340. ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
  341. if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
  342. #endif
  343. mode |= RTW89_RA_MODE_CCK;
  344. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  345. if (sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0)
  346. #else
  347. if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
  348. #endif
  349. mode |= RTW89_RA_MODE_OFDM;
  350. break;
  351. case RTW89_BAND_5G:
  352. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  353. ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
  354. #else
  355. ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
  356. #endif
  357. mode |= RTW89_RA_MODE_OFDM;
  358. break;
  359. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  360. case RTW89_BAND_6G:
  361. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  362. ra_mask |= (u64)sta->supp_rates[NL80211_BAND_6GHZ] << 4;
  363. #else
  364. ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
  365. #endif
  366. mode |= RTW89_RA_MODE_OFDM;
  367. break;
  368. #endif
  369. default:
  370. rtw89_err(rtwdev, "Unknown band type\n");
  371. break;
  372. }
  373. ra_mask_bak = ra_mask;
  374. if (mode >= RTW89_RA_MODE_HT) {
  375. u64 mask = 0;
  376. for (i = 0; i < rtwdev->hal.tx_nss; i++)
  377. mask |= high_rate_masks[i];
  378. if (mode & RTW89_RA_MODE_OFDM)
  379. mask |= RA_MASK_SUBOFDM_RATES;
  380. if (mode & RTW89_RA_MODE_CCK)
  381. mask |= RA_MASK_SUBCCK_RATES;
  382. ra_mask &= mask;
  383. } else if (mode & RTW89_RA_MODE_OFDM) {
  384. ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
  385. }
  386. if (mode != RTW89_RA_MODE_CCK)
  387. ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
  388. ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
  389. ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
  390. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  391. switch (sta->bandwidth) {
  392. #else
  393. switch (sta->deflink.bandwidth) {
  394. #endif
  395. case IEEE80211_STA_RX_BW_160:
  396. bw_mode = RTW89_CHANNEL_WIDTH_160;
  397. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  398. sgi = sta->vht_cap.vht_supported &&
  399. (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
  400. #else
  401. sgi = sta->deflink.vht_cap.vht_supported &&
  402. (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
  403. #endif
  404. break;
  405. case IEEE80211_STA_RX_BW_80:
  406. bw_mode = RTW89_CHANNEL_WIDTH_80;
  407. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  408. sgi = sta->vht_cap.vht_supported &&
  409. (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
  410. #else
  411. sgi = sta->deflink.vht_cap.vht_supported &&
  412. (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
  413. #endif
  414. break;
  415. case IEEE80211_STA_RX_BW_40:
  416. bw_mode = RTW89_CHANNEL_WIDTH_40;
  417. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  418. sgi = sta->ht_cap.ht_supported &&
  419. (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
  420. #else
  421. sgi = sta->deflink.ht_cap.ht_supported &&
  422. (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
  423. #endif
  424. break;
  425. default:
  426. bw_mode = RTW89_CHANNEL_WIDTH_20;
  427. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  428. sgi = sta->ht_cap.ht_supported &&
  429. (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
  430. #else
  431. sgi = sta->deflink.ht_cap.ht_supported &&
  432. (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
  433. #endif
  434. break;
  435. }
  436. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  437. if (sta->he_cap.he_cap_elem.phy_cap_info[3] &
  438. IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
  439. #else
  440. if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
  441. IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
  442. #endif
  443. ra->dcm_cap = 1;
  444. if (rate_pattern->enable && !vif->p2p) {
  445. ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
  446. ra_mask &= rate_pattern->ra_mask;
  447. mode = rate_pattern->ra_mode;
  448. }
  449. ra->bw_cap = bw_mode;
  450. ra->er_cap = rtwsta->er_cap;
  451. ra->mode_ctrl = mode;
  452. ra->macid = rtwsta->mac_id;
  453. ra->stbc_cap = stbc_en;
  454. ra->ldpc_cap = ldpc_en;
  455. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  456. ra->ss_num = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
  457. #else
  458. ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
  459. #endif
  460. ra->en_sgi = sgi;
  461. ra->ra_mask = ra_mask;
  462. ra->fix_giltf_en = fix_giltf_en;
  463. ra->fix_giltf = fix_giltf;
  464. if (!csi)
  465. return;
  466. ra->fixed_csi_rate_en = false;
  467. ra->ra_csi_rate_en = true;
  468. ra->cr_tbl_sel = false;
  469. ra->band_num = rtwvif->phy_idx;
  470. ra->csi_bw = bw_mode;
  471. ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
  472. ra->csi_mcs_ss_idx = 5;
  473. ra->csi_mode = csi_mode;
  474. }
  475. void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
  476. u32 changed)
  477. {
  478. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  479. struct rtw89_ra_info *ra = &rtwsta->ra;
  480. rtw89_phy_ra_sta_update(rtwdev, sta, false);
  481. if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
  482. ra->upd_mask = 1;
  483. if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
  484. ra->upd_bw_nss_mask = 1;
  485. rtw89_debug(rtwdev, RTW89_DBG_RA,
  486. "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
  487. ra->macid,
  488. ra->bw_cap,
  489. ra->ss_num,
  490. ra->en_sgi,
  491. ra->giltf);
  492. rtw89_fw_h2c_ra(rtwdev, ra, false);
  493. }
  494. static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
  495. u16 rate_base, u64 ra_mask, u8 ra_mode,
  496. u32 rate_ctrl, u32 ctrl_skip, bool force)
  497. {
  498. u8 n, c;
  499. if (rate_ctrl == ctrl_skip)
  500. return true;
  501. n = hweight32(rate_ctrl);
  502. if (n == 0)
  503. return true;
  504. if (force && n != 1)
  505. return false;
  506. if (next->enable)
  507. return false;
  508. c = __fls(rate_ctrl);
  509. next->rate = rate_base + c;
  510. next->ra_mode = ra_mode;
  511. next->ra_mask = ra_mask;
  512. next->enable = true;
  513. return true;
  514. }
  515. #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
  516. { \
  517. [RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
  518. [RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
  519. }
  520. void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
  521. struct ieee80211_vif *vif,
  522. const struct cfg80211_bitrate_mask *mask)
  523. {
  524. struct ieee80211_supported_band *sband;
  525. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  526. struct rtw89_phy_rate_pattern next_pattern = {0};
  527. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
  528. rtwvif->sub_entity_idx);
  529. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  530. static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
  531. RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
  532. RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
  533. RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
  534. RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
  535. };
  536. #endif
  537. static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
  538. RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
  539. RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
  540. RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
  541. RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
  542. };
  543. static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
  544. RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
  545. RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
  546. RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
  547. RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
  548. };
  549. u8 band = chan->band_type;
  550. enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
  551. enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
  552. u8 tx_nss = rtwdev->hal.tx_nss;
  553. u8 i;
  554. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  555. for (i = 0; i < tx_nss; i++)
  556. if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
  557. RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
  558. mask->control[nl_band].he_mcs[i],
  559. 0, true))
  560. goto out;
  561. #endif
  562. for (i = 0; i < tx_nss; i++)
  563. if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
  564. RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
  565. mask->control[nl_band].vht_mcs[i],
  566. 0, true))
  567. goto out;
  568. for (i = 0; i < tx_nss; i++)
  569. if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
  570. RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
  571. mask->control[nl_band].ht_mcs[i],
  572. 0, true))
  573. goto out;
  574. /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
  575. * require at least one basic rate for ieee80211_set_bitrate_mask,
  576. * so the decision just depends on if all bitrates are set or not.
  577. */
  578. sband = rtwdev->hw->wiphy->bands[nl_band];
  579. if (band == RTW89_BAND_2G) {
  580. if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
  581. RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
  582. RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
  583. mask->control[nl_band].legacy,
  584. BIT(sband->n_bitrates) - 1, false))
  585. goto out;
  586. } else {
  587. if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
  588. RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
  589. mask->control[nl_band].legacy,
  590. BIT(sband->n_bitrates) - 1, false))
  591. goto out;
  592. }
  593. if (!next_pattern.enable)
  594. goto out;
  595. rtwvif->rate_pattern = next_pattern;
  596. rtw89_debug(rtwdev, RTW89_DBG_RA,
  597. "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
  598. next_pattern.rate,
  599. next_pattern.ra_mask,
  600. next_pattern.ra_mode);
  601. return;
  602. out:
  603. rtwvif->rate_pattern.enable = false;
  604. rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
  605. }
  606. static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
  607. {
  608. struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
  609. rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
  610. }
  611. void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
  612. {
  613. ieee80211_iterate_stations_atomic(rtwdev->hw,
  614. rtw89_phy_ra_updata_sta_iter,
  615. rtwdev);
  616. }
  617. void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
  618. {
  619. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  620. struct rtw89_ra_info *ra = &rtwsta->ra;
  621. u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
  622. bool csi = rtw89_sta_has_beamformer_cap(sta);
  623. rtw89_phy_ra_sta_update(rtwdev, sta, csi);
  624. if (rssi > 40)
  625. ra->init_rate_lv = 1;
  626. else if (rssi > 20)
  627. ra->init_rate_lv = 2;
  628. else if (rssi > 1)
  629. ra->init_rate_lv = 3;
  630. else
  631. ra->init_rate_lv = 0;
  632. ra->upd_all = 1;
  633. rtw89_debug(rtwdev, RTW89_DBG_RA,
  634. "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
  635. ra->macid,
  636. ra->mode_ctrl,
  637. ra->bw_cap,
  638. ra->ss_num,
  639. ra->init_rate_lv);
  640. rtw89_debug(rtwdev, RTW89_DBG_RA,
  641. "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
  642. ra->dcm_cap,
  643. ra->er_cap,
  644. ra->ldpc_cap,
  645. ra->stbc_cap,
  646. ra->en_sgi,
  647. ra->giltf);
  648. rtw89_fw_h2c_ra(rtwdev, ra, csi);
  649. }
  650. u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
  651. const struct rtw89_chan *chan,
  652. enum rtw89_bandwidth dbw)
  653. {
  654. enum rtw89_bandwidth cbw = chan->band_width;
  655. u8 pri_ch = chan->primary_channel;
  656. u8 central_ch = chan->channel;
  657. u8 txsc_idx = 0;
  658. u8 tmp = 0;
  659. if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
  660. return txsc_idx;
  661. switch (cbw) {
  662. case RTW89_CHANNEL_WIDTH_40:
  663. txsc_idx = pri_ch > central_ch ? 1 : 2;
  664. break;
  665. case RTW89_CHANNEL_WIDTH_80:
  666. if (dbw == RTW89_CHANNEL_WIDTH_20) {
  667. if (pri_ch > central_ch)
  668. txsc_idx = (pri_ch - central_ch) >> 1;
  669. else
  670. txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
  671. } else {
  672. txsc_idx = pri_ch > central_ch ? 9 : 10;
  673. }
  674. break;
  675. case RTW89_CHANNEL_WIDTH_160:
  676. if (pri_ch > central_ch)
  677. tmp = (pri_ch - central_ch) >> 1;
  678. else
  679. tmp = ((central_ch - pri_ch) >> 1) + 1;
  680. if (dbw == RTW89_CHANNEL_WIDTH_20) {
  681. txsc_idx = tmp;
  682. } else if (dbw == RTW89_CHANNEL_WIDTH_40) {
  683. if (tmp == 1 || tmp == 3)
  684. txsc_idx = 9;
  685. else if (tmp == 5 || tmp == 7)
  686. txsc_idx = 11;
  687. else if (tmp == 2 || tmp == 4)
  688. txsc_idx = 10;
  689. else if (tmp == 6 || tmp == 8)
  690. txsc_idx = 12;
  691. else
  692. return 0xff;
  693. } else {
  694. txsc_idx = pri_ch > central_ch ? 13 : 14;
  695. }
  696. break;
  697. case RTW89_CHANNEL_WIDTH_80_80:
  698. if (dbw == RTW89_CHANNEL_WIDTH_20) {
  699. if (pri_ch > central_ch)
  700. txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
  701. else
  702. txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
  703. } else if (dbw == RTW89_CHANNEL_WIDTH_40) {
  704. txsc_idx = pri_ch > central_ch ? 10 : 12;
  705. } else {
  706. txsc_idx = 14;
  707. }
  708. break;
  709. default:
  710. break;
  711. }
  712. return txsc_idx;
  713. }
  714. EXPORT_SYMBOL(rtw89_phy_get_txsc);
  715. static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
  716. {
  717. return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
  718. !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
  719. }
  720. u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
  721. u32 addr, u32 mask)
  722. {
  723. const struct rtw89_chip_info *chip = rtwdev->chip;
  724. const u32 *base_addr = chip->rf_base_addr;
  725. u32 val, direct_addr;
  726. if (rf_path >= rtwdev->chip->rf_path_num) {
  727. rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
  728. return INV_RF_DATA;
  729. }
  730. addr &= 0xff;
  731. direct_addr = base_addr[rf_path] + (addr << 2);
  732. mask &= RFREG_MASK;
  733. val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
  734. return val;
  735. }
  736. EXPORT_SYMBOL(rtw89_phy_read_rf);
  737. static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
  738. enum rtw89_rf_path rf_path, u32 addr, u32 mask)
  739. {
  740. bool busy;
  741. bool done;
  742. u32 val;
  743. int ret;
  744. ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
  745. 1, 30, false, rtwdev);
  746. if (ret) {
  747. rtw89_err(rtwdev, "read rf busy swsi\n");
  748. return INV_RF_DATA;
  749. }
  750. mask &= RFREG_MASK;
  751. val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
  752. FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
  753. rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
  754. udelay(2);
  755. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
  756. 30, false, rtwdev, R_SWSI_V1,
  757. B_SWSI_R_DATA_DONE_V1);
  758. if (ret) {
  759. rtw89_err(rtwdev, "read swsi busy\n");
  760. return INV_RF_DATA;
  761. }
  762. return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
  763. }
  764. u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
  765. u32 addr, u32 mask)
  766. {
  767. bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
  768. if (rf_path >= rtwdev->chip->rf_path_num) {
  769. rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
  770. return INV_RF_DATA;
  771. }
  772. if (ad_sel)
  773. return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
  774. else
  775. return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
  776. }
  777. EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
  778. bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
  779. u32 addr, u32 mask, u32 data)
  780. {
  781. const struct rtw89_chip_info *chip = rtwdev->chip;
  782. const u32 *base_addr = chip->rf_base_addr;
  783. u32 direct_addr;
  784. if (rf_path >= rtwdev->chip->rf_path_num) {
  785. rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
  786. return false;
  787. }
  788. addr &= 0xff;
  789. direct_addr = base_addr[rf_path] + (addr << 2);
  790. mask &= RFREG_MASK;
  791. rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
  792. /* delay to ensure writing properly */
  793. udelay(1);
  794. return true;
  795. }
  796. EXPORT_SYMBOL(rtw89_phy_write_rf);
  797. static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
  798. enum rtw89_rf_path rf_path, u32 addr, u32 mask,
  799. u32 data)
  800. {
  801. u8 bit_shift;
  802. u32 val;
  803. bool busy, b_msk_en = false;
  804. int ret;
  805. ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
  806. 1, 30, false, rtwdev);
  807. if (ret) {
  808. rtw89_err(rtwdev, "write rf busy swsi\n");
  809. return false;
  810. }
  811. data &= RFREG_MASK;
  812. mask &= RFREG_MASK;
  813. if (mask != RFREG_MASK) {
  814. b_msk_en = true;
  815. rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
  816. mask);
  817. bit_shift = __ffs(mask);
  818. data = (data << bit_shift) & RFREG_MASK;
  819. }
  820. val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
  821. FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
  822. FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
  823. FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
  824. rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
  825. return true;
  826. }
  827. bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
  828. u32 addr, u32 mask, u32 data)
  829. {
  830. bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
  831. if (rf_path >= rtwdev->chip->rf_path_num) {
  832. rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
  833. return false;
  834. }
  835. if (ad_sel)
  836. return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
  837. else
  838. return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
  839. }
  840. EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
  841. static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
  842. {
  843. return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
  844. }
  845. static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
  846. enum rtw89_phy_idx phy_idx)
  847. {
  848. const struct rtw89_chip_info *chip = rtwdev->chip;
  849. chip->ops->bb_reset(rtwdev, phy_idx);
  850. }
  851. static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
  852. const struct rtw89_reg2_def *reg,
  853. enum rtw89_rf_path rf_path,
  854. void *extra_data)
  855. {
  856. if (reg->addr == 0xfe)
  857. mdelay(50);
  858. else if (reg->addr == 0xfd)
  859. mdelay(5);
  860. else if (reg->addr == 0xfc)
  861. mdelay(1);
  862. else if (reg->addr == 0xfb)
  863. udelay(50);
  864. else if (reg->addr == 0xfa)
  865. udelay(5);
  866. else if (reg->addr == 0xf9)
  867. udelay(1);
  868. else
  869. rtw89_phy_write32(rtwdev, reg->addr, reg->data);
  870. }
  871. union rtw89_phy_bb_gain_arg {
  872. u32 addr;
  873. struct {
  874. union {
  875. u8 type;
  876. struct {
  877. u8 rxsc_start:4;
  878. u8 bw:4;
  879. };
  880. };
  881. u8 path;
  882. u8 gain_band;
  883. u8 cfg_type;
  884. };
  885. } __packed;
  886. static void
  887. rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
  888. union rtw89_phy_bb_gain_arg arg, u32 data)
  889. {
  890. struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
  891. u8 type = arg.type;
  892. u8 path = arg.path;
  893. u8 gband = arg.gain_band;
  894. int i;
  895. switch (type) {
  896. case 0:
  897. for (i = 0; i < 4; i++, data >>= 8)
  898. gain->lna_gain[gband][path][i] = data & 0xff;
  899. break;
  900. case 1:
  901. for (i = 4; i < 7; i++, data >>= 8)
  902. gain->lna_gain[gband][path][i] = data & 0xff;
  903. break;
  904. case 2:
  905. for (i = 0; i < 2; i++, data >>= 8)
  906. gain->tia_gain[gband][path][i] = data & 0xff;
  907. break;
  908. default:
  909. rtw89_warn(rtwdev,
  910. "bb gain error {0x%x:0x%x} with unknown type: %d\n",
  911. arg.addr, data, type);
  912. break;
  913. }
  914. }
  915. enum rtw89_phy_bb_rxsc_start_idx {
  916. RTW89_BB_RXSC_START_IDX_FULL = 0,
  917. RTW89_BB_RXSC_START_IDX_20 = 1,
  918. RTW89_BB_RXSC_START_IDX_20_1 = 5,
  919. RTW89_BB_RXSC_START_IDX_40 = 9,
  920. RTW89_BB_RXSC_START_IDX_80 = 13,
  921. };
  922. static void
  923. rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
  924. union rtw89_phy_bb_gain_arg arg, u32 data)
  925. {
  926. struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
  927. u8 rxsc_start = arg.rxsc_start;
  928. u8 bw = arg.bw;
  929. u8 path = arg.path;
  930. u8 gband = arg.gain_band;
  931. u8 rxsc;
  932. s8 ofst;
  933. int i;
  934. switch (bw) {
  935. case RTW89_CHANNEL_WIDTH_20:
  936. gain->rpl_ofst_20[gband][path] = (s8)data;
  937. break;
  938. case RTW89_CHANNEL_WIDTH_40:
  939. if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
  940. gain->rpl_ofst_40[gband][path][0] = (s8)data;
  941. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
  942. for (i = 0; i < 2; i++, data >>= 8) {
  943. rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
  944. ofst = (s8)(data & 0xff);
  945. gain->rpl_ofst_40[gband][path][rxsc] = ofst;
  946. }
  947. }
  948. break;
  949. case RTW89_CHANNEL_WIDTH_80:
  950. if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
  951. gain->rpl_ofst_80[gband][path][0] = (s8)data;
  952. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
  953. for (i = 0; i < 4; i++, data >>= 8) {
  954. rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
  955. ofst = (s8)(data & 0xff);
  956. gain->rpl_ofst_80[gband][path][rxsc] = ofst;
  957. }
  958. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
  959. for (i = 0; i < 2; i++, data >>= 8) {
  960. rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
  961. ofst = (s8)(data & 0xff);
  962. gain->rpl_ofst_80[gband][path][rxsc] = ofst;
  963. }
  964. }
  965. break;
  966. case RTW89_CHANNEL_WIDTH_160:
  967. if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
  968. gain->rpl_ofst_160[gband][path][0] = (s8)data;
  969. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
  970. for (i = 0; i < 4; i++, data >>= 8) {
  971. rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
  972. ofst = (s8)(data & 0xff);
  973. gain->rpl_ofst_160[gband][path][rxsc] = ofst;
  974. }
  975. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
  976. for (i = 0; i < 4; i++, data >>= 8) {
  977. rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
  978. ofst = (s8)(data & 0xff);
  979. gain->rpl_ofst_160[gband][path][rxsc] = ofst;
  980. }
  981. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
  982. for (i = 0; i < 4; i++, data >>= 8) {
  983. rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
  984. ofst = (s8)(data & 0xff);
  985. gain->rpl_ofst_160[gband][path][rxsc] = ofst;
  986. }
  987. } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
  988. for (i = 0; i < 2; i++, data >>= 8) {
  989. rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
  990. ofst = (s8)(data & 0xff);
  991. gain->rpl_ofst_160[gband][path][rxsc] = ofst;
  992. }
  993. }
  994. break;
  995. default:
  996. rtw89_warn(rtwdev,
  997. "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
  998. arg.addr, data, bw);
  999. break;
  1000. }
  1001. }
  1002. static void
  1003. rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
  1004. union rtw89_phy_bb_gain_arg arg, u32 data)
  1005. {
  1006. struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
  1007. u8 type = arg.type;
  1008. u8 path = arg.path;
  1009. u8 gband = arg.gain_band;
  1010. int i;
  1011. switch (type) {
  1012. case 0:
  1013. for (i = 0; i < 4; i++, data >>= 8)
  1014. gain->lna_gain_bypass[gband][path][i] = data & 0xff;
  1015. break;
  1016. case 1:
  1017. for (i = 4; i < 7; i++, data >>= 8)
  1018. gain->lna_gain_bypass[gband][path][i] = data & 0xff;
  1019. break;
  1020. default:
  1021. rtw89_warn(rtwdev,
  1022. "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
  1023. arg.addr, data, type);
  1024. break;
  1025. }
  1026. }
  1027. static void
  1028. rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
  1029. union rtw89_phy_bb_gain_arg arg, u32 data)
  1030. {
  1031. struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
  1032. u8 type = arg.type;
  1033. u8 path = arg.path;
  1034. u8 gband = arg.gain_band;
  1035. int i;
  1036. switch (type) {
  1037. case 0:
  1038. for (i = 0; i < 4; i++, data >>= 8)
  1039. gain->lna_op1db[gband][path][i] = data & 0xff;
  1040. break;
  1041. case 1:
  1042. for (i = 4; i < 7; i++, data >>= 8)
  1043. gain->lna_op1db[gband][path][i] = data & 0xff;
  1044. break;
  1045. case 2:
  1046. for (i = 0; i < 4; i++, data >>= 8)
  1047. gain->tia_lna_op1db[gband][path][i] = data & 0xff;
  1048. break;
  1049. case 3:
  1050. for (i = 4; i < 8; i++, data >>= 8)
  1051. gain->tia_lna_op1db[gband][path][i] = data & 0xff;
  1052. break;
  1053. default:
  1054. rtw89_warn(rtwdev,
  1055. "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
  1056. arg.addr, data, type);
  1057. break;
  1058. }
  1059. }
  1060. static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
  1061. const struct rtw89_reg2_def *reg,
  1062. enum rtw89_rf_path rf_path,
  1063. void *extra_data)
  1064. {
  1065. const struct rtw89_chip_info *chip = rtwdev->chip;
  1066. union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
  1067. struct rtw89_efuse *efuse = &rtwdev->efuse;
  1068. if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
  1069. return;
  1070. if (arg.path >= chip->rf_path_num)
  1071. return;
  1072. if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
  1073. rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
  1074. return;
  1075. }
  1076. switch (arg.cfg_type) {
  1077. case 0:
  1078. rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
  1079. break;
  1080. case 1:
  1081. rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
  1082. break;
  1083. case 2:
  1084. rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
  1085. break;
  1086. case 3:
  1087. rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
  1088. break;
  1089. case 4:
  1090. /* This cfg_type is only used by rfe_type >= 50 with eFEM */
  1091. if (efuse->rfe_type < 50)
  1092. break;
  1093. fallthrough;
  1094. default:
  1095. rtw89_warn(rtwdev,
  1096. "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
  1097. arg.addr, reg->data, arg.cfg_type);
  1098. break;
  1099. }
  1100. }
  1101. static void
  1102. rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
  1103. const struct rtw89_reg2_def *reg,
  1104. enum rtw89_rf_path rf_path,
  1105. struct rtw89_fw_h2c_rf_reg_info *info)
  1106. {
  1107. u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
  1108. u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
  1109. if (page >= RTW89_H2C_RF_PAGE_NUM) {
  1110. rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
  1111. rf_path, info->curr_idx);
  1112. return;
  1113. }
  1114. info->rtw89_phy_config_rf_h2c[page][idx] =
  1115. cpu_to_le32((reg->addr << 20) | reg->data);
  1116. info->curr_idx++;
  1117. }
  1118. static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
  1119. struct rtw89_fw_h2c_rf_reg_info *info)
  1120. {
  1121. u16 remain = info->curr_idx;
  1122. u16 len = 0;
  1123. u8 i;
  1124. int ret = 0;
  1125. if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
  1126. rtw89_warn(rtwdev,
  1127. "rf reg h2c total len %d larger than %d\n",
  1128. remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
  1129. ret = -EINVAL;
  1130. goto out;
  1131. }
  1132. for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
  1133. len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
  1134. ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
  1135. if (ret)
  1136. goto out;
  1137. }
  1138. out:
  1139. info->curr_idx = 0;
  1140. return ret;
  1141. }
  1142. static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
  1143. const struct rtw89_reg2_def *reg,
  1144. enum rtw89_rf_path rf_path,
  1145. void *extra_data)
  1146. {
  1147. u32 addr = reg->addr;
  1148. if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
  1149. addr == 0xfa || addr == 0xf9)
  1150. return;
  1151. if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
  1152. return;
  1153. rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
  1154. (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
  1155. }
  1156. static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
  1157. const struct rtw89_reg2_def *reg,
  1158. enum rtw89_rf_path rf_path,
  1159. void *extra_data)
  1160. {
  1161. if (reg->addr == 0xfe) {
  1162. mdelay(50);
  1163. } else if (reg->addr == 0xfd) {
  1164. mdelay(5);
  1165. } else if (reg->addr == 0xfc) {
  1166. mdelay(1);
  1167. } else if (reg->addr == 0xfb) {
  1168. udelay(50);
  1169. } else if (reg->addr == 0xfa) {
  1170. udelay(5);
  1171. } else if (reg->addr == 0xf9) {
  1172. udelay(1);
  1173. } else {
  1174. rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
  1175. rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
  1176. (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
  1177. }
  1178. }
  1179. void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
  1180. const struct rtw89_reg2_def *reg,
  1181. enum rtw89_rf_path rf_path,
  1182. void *extra_data)
  1183. {
  1184. rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
  1185. if (reg->addr < 0x100)
  1186. return;
  1187. rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
  1188. (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
  1189. }
  1190. EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
  1191. static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
  1192. const struct rtw89_phy_table *table,
  1193. u32 *headline_size, u32 *headline_idx,
  1194. u8 rfe, u8 cv)
  1195. {
  1196. const struct rtw89_reg2_def *reg;
  1197. u32 headline;
  1198. u32 compare, target;
  1199. u8 rfe_para, cv_para;
  1200. u8 cv_max = 0;
  1201. bool case_matched = false;
  1202. u32 i;
  1203. for (i = 0; i < table->n_regs; i++) {
  1204. reg = &table->regs[i];
  1205. headline = get_phy_headline(reg->addr);
  1206. if (headline != PHY_HEADLINE_VALID)
  1207. break;
  1208. }
  1209. *headline_size = i;
  1210. if (*headline_size == 0)
  1211. return 0;
  1212. /* case 1: RFE match, CV match */
  1213. compare = get_phy_compare(rfe, cv);
  1214. for (i = 0; i < *headline_size; i++) {
  1215. reg = &table->regs[i];
  1216. target = get_phy_target(reg->addr);
  1217. if (target == compare) {
  1218. *headline_idx = i;
  1219. return 0;
  1220. }
  1221. }
  1222. /* case 2: RFE match, CV don't care */
  1223. compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
  1224. for (i = 0; i < *headline_size; i++) {
  1225. reg = &table->regs[i];
  1226. target = get_phy_target(reg->addr);
  1227. if (target == compare) {
  1228. *headline_idx = i;
  1229. return 0;
  1230. }
  1231. }
  1232. /* case 3: RFE match, CV max in table */
  1233. for (i = 0; i < *headline_size; i++) {
  1234. reg = &table->regs[i];
  1235. rfe_para = get_phy_cond_rfe(reg->addr);
  1236. cv_para = get_phy_cond_cv(reg->addr);
  1237. if (rfe_para == rfe) {
  1238. if (cv_para >= cv_max) {
  1239. cv_max = cv_para;
  1240. *headline_idx = i;
  1241. case_matched = true;
  1242. }
  1243. }
  1244. }
  1245. if (case_matched)
  1246. return 0;
  1247. /* case 4: RFE don't care, CV max in table */
  1248. for (i = 0; i < *headline_size; i++) {
  1249. reg = &table->regs[i];
  1250. rfe_para = get_phy_cond_rfe(reg->addr);
  1251. cv_para = get_phy_cond_cv(reg->addr);
  1252. if (rfe_para == PHY_COND_DONT_CARE) {
  1253. if (cv_para >= cv_max) {
  1254. cv_max = cv_para;
  1255. *headline_idx = i;
  1256. case_matched = true;
  1257. }
  1258. }
  1259. }
  1260. if (case_matched)
  1261. return 0;
  1262. return -EINVAL;
  1263. }
  1264. static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
  1265. const struct rtw89_phy_table *table,
  1266. void (*config)(struct rtw89_dev *rtwdev,
  1267. const struct rtw89_reg2_def *reg,
  1268. enum rtw89_rf_path rf_path,
  1269. void *data),
  1270. void *extra_data)
  1271. {
  1272. const struct rtw89_reg2_def *reg;
  1273. enum rtw89_rf_path rf_path = table->rf_path;
  1274. u8 rfe = rtwdev->efuse.rfe_type;
  1275. u8 cv = rtwdev->hal.cv;
  1276. u32 i;
  1277. u32 headline_size = 0, headline_idx = 0;
  1278. u32 target = 0, cfg_target;
  1279. u8 cond;
  1280. bool is_matched = true;
  1281. bool target_found = false;
  1282. int ret;
  1283. ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
  1284. &headline_idx, rfe, cv);
  1285. if (ret) {
  1286. rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
  1287. return;
  1288. }
  1289. cfg_target = get_phy_target(table->regs[headline_idx].addr);
  1290. for (i = headline_size; i < table->n_regs; i++) {
  1291. reg = &table->regs[i];
  1292. cond = get_phy_cond(reg->addr);
  1293. switch (cond) {
  1294. case PHY_COND_BRANCH_IF:
  1295. case PHY_COND_BRANCH_ELIF:
  1296. target = get_phy_target(reg->addr);
  1297. break;
  1298. case PHY_COND_BRANCH_ELSE:
  1299. is_matched = false;
  1300. if (!target_found) {
  1301. rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
  1302. reg->addr, reg->data);
  1303. return;
  1304. }
  1305. break;
  1306. case PHY_COND_BRANCH_END:
  1307. is_matched = true;
  1308. target_found = false;
  1309. break;
  1310. case PHY_COND_CHECK:
  1311. if (target_found) {
  1312. is_matched = false;
  1313. break;
  1314. }
  1315. if (target == cfg_target) {
  1316. is_matched = true;
  1317. target_found = true;
  1318. } else {
  1319. is_matched = false;
  1320. target_found = false;
  1321. }
  1322. break;
  1323. default:
  1324. if (is_matched)
  1325. config(rtwdev, reg, rf_path, extra_data);
  1326. break;
  1327. }
  1328. }
  1329. }
  1330. void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
  1331. {
  1332. struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
  1333. const struct rtw89_chip_info *chip = rtwdev->chip;
  1334. const struct rtw89_phy_table *bb_table;
  1335. const struct rtw89_phy_table *bb_gain_table;
  1336. bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
  1337. rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
  1338. rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
  1339. bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
  1340. if (bb_gain_table)
  1341. rtw89_phy_init_reg(rtwdev, bb_gain_table,
  1342. rtw89_phy_config_bb_gain, NULL);
  1343. rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
  1344. }
  1345. static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
  1346. {
  1347. rtw89_phy_write32(rtwdev, 0x8080, 0x4);
  1348. udelay(1);
  1349. return rtw89_phy_read32(rtwdev, 0x8080);
  1350. }
  1351. void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
  1352. {
  1353. void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
  1354. enum rtw89_rf_path rf_path, void *data);
  1355. struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
  1356. const struct rtw89_chip_info *chip = rtwdev->chip;
  1357. const struct rtw89_phy_table *rf_table;
  1358. struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
  1359. u8 path;
  1360. rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL);
  1361. if (!rf_reg_info)
  1362. return;
  1363. for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
  1364. rf_table = elm_info->rf_radio[path] ?
  1365. elm_info->rf_radio[path] : chip->rf_table[path];
  1366. rf_reg_info->rf_path = rf_table->rf_path;
  1367. if (noio)
  1368. config = rtw89_phy_config_rf_reg_noio;
  1369. else
  1370. config = rf_table->config ? rf_table->config :
  1371. rtw89_phy_config_rf_reg;
  1372. rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
  1373. if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
  1374. rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
  1375. rf_reg_info->rf_path);
  1376. }
  1377. kfree(rf_reg_info);
  1378. }
  1379. static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
  1380. {
  1381. struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
  1382. const struct rtw89_chip_info *chip = rtwdev->chip;
  1383. const struct rtw89_phy_table *nctl_table;
  1384. u32 val;
  1385. int ret;
  1386. /* IQK/DPK clock & reset */
  1387. rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
  1388. rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
  1389. rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
  1390. if (chip->chip_id != RTL8851B)
  1391. rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
  1392. if (chip->chip_id == RTL8852B)
  1393. rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
  1394. /* check 0x8080 */
  1395. rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
  1396. ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
  1397. 1000, false, rtwdev);
  1398. if (ret)
  1399. rtw89_err(rtwdev, "failed to poll nctl block\n");
  1400. nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
  1401. rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
  1402. if (chip->nctl_post_table)
  1403. rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
  1404. }
  1405. static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
  1406. {
  1407. u32 phy_page = addr >> 8;
  1408. u32 ofst = 0;
  1409. if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
  1410. return addr < 0x10000 ? 0x20000 : 0;
  1411. switch (phy_page) {
  1412. case 0x6:
  1413. case 0x7:
  1414. case 0x8:
  1415. case 0x9:
  1416. case 0xa:
  1417. case 0xb:
  1418. case 0xc:
  1419. case 0xd:
  1420. case 0x19:
  1421. case 0x1a:
  1422. case 0x1b:
  1423. ofst = 0x2000;
  1424. break;
  1425. default:
  1426. /* warning case */
  1427. ofst = 0;
  1428. break;
  1429. }
  1430. if (phy_page >= 0x40 && phy_page <= 0x4f)
  1431. ofst = 0x2000;
  1432. return ofst;
  1433. }
  1434. void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
  1435. u32 data, enum rtw89_phy_idx phy_idx)
  1436. {
  1437. if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
  1438. addr += rtw89_phy0_phy1_offset(rtwdev, addr);
  1439. rtw89_phy_write32_mask(rtwdev, addr, mask, data);
  1440. }
  1441. EXPORT_SYMBOL(rtw89_phy_write32_idx);
  1442. u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
  1443. enum rtw89_phy_idx phy_idx)
  1444. {
  1445. if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
  1446. addr += rtw89_phy0_phy1_offset(rtwdev, addr);
  1447. return rtw89_phy_read32_mask(rtwdev, addr, mask);
  1448. }
  1449. EXPORT_SYMBOL(rtw89_phy_read32_idx);
  1450. void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
  1451. u32 val)
  1452. {
  1453. rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
  1454. if (!rtwdev->dbcc_en)
  1455. return;
  1456. rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
  1457. }
  1458. void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
  1459. const struct rtw89_phy_reg3_tbl *tbl)
  1460. {
  1461. const struct rtw89_reg3_def *reg3;
  1462. int i;
  1463. for (i = 0; i < tbl->size; i++) {
  1464. reg3 = &tbl->reg3[i];
  1465. rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
  1466. }
  1467. }
  1468. EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
  1469. static const u8 rtw89_rs_idx_num[] = {
  1470. [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
  1471. [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
  1472. [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX,
  1473. [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
  1474. [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX,
  1475. };
  1476. static const u8 rtw89_rs_nss_num[] = {
  1477. [RTW89_RS_CCK] = 1,
  1478. [RTW89_RS_OFDM] = 1,
  1479. [RTW89_RS_MCS] = RTW89_NSS_NUM,
  1480. [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
  1481. [RTW89_RS_OFFSET] = 1,
  1482. };
  1483. s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
  1484. struct rtw89_txpwr_byrate *head,
  1485. const struct rtw89_rate_desc *desc)
  1486. {
  1487. switch (desc->rs) {
  1488. case RTW89_RS_CCK:
  1489. return &head->cck[desc->idx];
  1490. case RTW89_RS_OFDM:
  1491. return &head->ofdm[desc->idx];
  1492. case RTW89_RS_MCS:
  1493. return &head->mcs[desc->ofdma][desc->nss][desc->idx];
  1494. case RTW89_RS_HEDCM:
  1495. return &head->hedcm[desc->ofdma][desc->nss][desc->idx];
  1496. case RTW89_RS_OFFSET:
  1497. return &head->offset[desc->idx];
  1498. default:
  1499. rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs);
  1500. return &head->trap;
  1501. }
  1502. }
  1503. void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
  1504. const struct rtw89_txpwr_table *tbl)
  1505. {
  1506. const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
  1507. const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
  1508. struct rtw89_txpwr_byrate *byr_head;
  1509. struct rtw89_rate_desc desc = {};
  1510. s8 *byr;
  1511. u32 data;
  1512. u8 i;
  1513. for (; cfg < end; cfg++) {
  1514. byr_head = &rtwdev->byr[cfg->band][0];
  1515. desc.rs = cfg->rs;
  1516. desc.nss = cfg->nss;
  1517. data = cfg->data;
  1518. for (i = 0; i < cfg->len; i++, data >>= 8) {
  1519. desc.idx = cfg->shf + i;
  1520. byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
  1521. *byr = data & 0xff;
  1522. }
  1523. }
  1524. }
  1525. EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
  1526. static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
  1527. {
  1528. const struct rtw89_chip_info *chip = rtwdev->chip;
  1529. return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
  1530. }
  1531. static
  1532. s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
  1533. const struct rtw89_rate_desc *rate_desc)
  1534. {
  1535. struct rtw89_txpwr_byrate *byr_head;
  1536. s8 *byr;
  1537. if (rate_desc->rs == RTW89_RS_CCK)
  1538. band = RTW89_BAND_2G;
  1539. byr_head = &rtwdev->byr[band][bw];
  1540. byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc);
  1541. return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr);
  1542. }
  1543. static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
  1544. {
  1545. switch (channel_6g) {
  1546. case 1 ... 29:
  1547. return (channel_6g - 1) / 2;
  1548. case 33 ... 61:
  1549. return (channel_6g - 3) / 2;
  1550. case 65 ... 93:
  1551. return (channel_6g - 5) / 2;
  1552. case 97 ... 125:
  1553. return (channel_6g - 7) / 2;
  1554. case 129 ... 157:
  1555. return (channel_6g - 9) / 2;
  1556. case 161 ... 189:
  1557. return (channel_6g - 11) / 2;
  1558. case 193 ... 221:
  1559. return (channel_6g - 13) / 2;
  1560. case 225 ... 253:
  1561. return (channel_6g - 15) / 2;
  1562. default:
  1563. rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
  1564. return 0;
  1565. }
  1566. }
  1567. static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
  1568. {
  1569. if (band == RTW89_BAND_6G)
  1570. return rtw89_channel_6g_to_idx(rtwdev, channel);
  1571. switch (channel) {
  1572. case 1 ... 14:
  1573. return channel - 1;
  1574. case 36 ... 64:
  1575. return (channel - 36) / 2;
  1576. case 100 ... 144:
  1577. return ((channel - 100) / 2) + 15;
  1578. case 149 ... 177:
  1579. return ((channel - 149) / 2) + 38;
  1580. default:
  1581. rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
  1582. return 0;
  1583. }
  1584. }
  1585. s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
  1586. u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
  1587. {
  1588. const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
  1589. const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
  1590. const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
  1591. const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
  1592. struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
  1593. u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
  1594. u8 regd = rtw89_regd_get(rtwdev, band);
  1595. u8 reg6 = regulatory->reg_6ghz_power;
  1596. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
  1597. enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
  1598. u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
  1599. s8 lmt = 0, sar;
  1600. #else
  1601. s8 lmt = 0;
  1602. #endif
  1603. switch (band) {
  1604. case RTW89_BAND_2G:
  1605. lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
  1606. if (lmt)
  1607. break;
  1608. lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
  1609. break;
  1610. case RTW89_BAND_5G:
  1611. lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
  1612. if (lmt)
  1613. break;
  1614. lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
  1615. break;
  1616. case RTW89_BAND_6G:
  1617. lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
  1618. if (lmt)
  1619. break;
  1620. lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
  1621. [RTW89_REG_6GHZ_POWER_DFLT]
  1622. [ch_idx];
  1623. break;
  1624. default:
  1625. rtw89_warn(rtwdev, "unknown band type: %d\n", band);
  1626. return 0;
  1627. }
  1628. lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
  1629. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
  1630. sar = rtw89_query_sar(rtwdev, freq);
  1631. return min(lmt, sar);
  1632. #else
  1633. return lmt;
  1634. #endif
  1635. }
  1636. EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
  1637. #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \
  1638. do { \
  1639. u8 __i; \
  1640. for (__i = 0; __i < RTW89_BF_NUM; __i++) \
  1641. ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \
  1642. band, \
  1643. bw, ntx, \
  1644. rs, __i, \
  1645. (ch)); \
  1646. } while (0)
  1647. static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
  1648. struct rtw89_txpwr_limit *lmt,
  1649. u8 band, u8 ntx, u8 ch)
  1650. {
  1651. __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
  1652. ntx, RTW89_RS_CCK, ch);
  1653. __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
  1654. ntx, RTW89_RS_CCK, ch);
  1655. __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
  1656. ntx, RTW89_RS_OFDM, ch);
  1657. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
  1658. RTW89_CHANNEL_WIDTH_20,
  1659. ntx, RTW89_RS_MCS, ch);
  1660. }
  1661. static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
  1662. struct rtw89_txpwr_limit *lmt,
  1663. u8 band, u8 ntx, u8 ch, u8 pri_ch)
  1664. {
  1665. __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
  1666. ntx, RTW89_RS_CCK, ch - 2);
  1667. __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
  1668. ntx, RTW89_RS_CCK, ch);
  1669. __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
  1670. ntx, RTW89_RS_OFDM, pri_ch);
  1671. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
  1672. RTW89_CHANNEL_WIDTH_20,
  1673. ntx, RTW89_RS_MCS, ch - 2);
  1674. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
  1675. RTW89_CHANNEL_WIDTH_20,
  1676. ntx, RTW89_RS_MCS, ch + 2);
  1677. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
  1678. RTW89_CHANNEL_WIDTH_40,
  1679. ntx, RTW89_RS_MCS, ch);
  1680. }
  1681. static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
  1682. struct rtw89_txpwr_limit *lmt,
  1683. u8 band, u8 ntx, u8 ch, u8 pri_ch)
  1684. {
  1685. s8 val_0p5_n[RTW89_BF_NUM];
  1686. s8 val_0p5_p[RTW89_BF_NUM];
  1687. u8 i;
  1688. __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
  1689. ntx, RTW89_RS_OFDM, pri_ch);
  1690. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
  1691. RTW89_CHANNEL_WIDTH_20,
  1692. ntx, RTW89_RS_MCS, ch - 6);
  1693. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
  1694. RTW89_CHANNEL_WIDTH_20,
  1695. ntx, RTW89_RS_MCS, ch - 2);
  1696. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
  1697. RTW89_CHANNEL_WIDTH_20,
  1698. ntx, RTW89_RS_MCS, ch + 2);
  1699. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
  1700. RTW89_CHANNEL_WIDTH_20,
  1701. ntx, RTW89_RS_MCS, ch + 6);
  1702. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
  1703. RTW89_CHANNEL_WIDTH_40,
  1704. ntx, RTW89_RS_MCS, ch - 4);
  1705. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
  1706. RTW89_CHANNEL_WIDTH_40,
  1707. ntx, RTW89_RS_MCS, ch + 4);
  1708. __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
  1709. RTW89_CHANNEL_WIDTH_80,
  1710. ntx, RTW89_RS_MCS, ch);
  1711. __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
  1712. ntx, RTW89_RS_MCS, ch - 4);
  1713. __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
  1714. ntx, RTW89_RS_MCS, ch + 4);
  1715. for (i = 0; i < RTW89_BF_NUM; i++)
  1716. lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
  1717. }
  1718. static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
  1719. struct rtw89_txpwr_limit *lmt,
  1720. u8 band, u8 ntx, u8 ch, u8 pri_ch)
  1721. {
  1722. s8 val_0p5_n[RTW89_BF_NUM];
  1723. s8 val_0p5_p[RTW89_BF_NUM];
  1724. s8 val_2p5_n[RTW89_BF_NUM];
  1725. s8 val_2p5_p[RTW89_BF_NUM];
  1726. u8 i;
  1727. /* fill ofdm section */
  1728. __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
  1729. ntx, RTW89_RS_OFDM, pri_ch);
  1730. /* fill mcs 20m section */
  1731. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
  1732. RTW89_CHANNEL_WIDTH_20,
  1733. ntx, RTW89_RS_MCS, ch - 14);
  1734. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
  1735. RTW89_CHANNEL_WIDTH_20,
  1736. ntx, RTW89_RS_MCS, ch - 10);
  1737. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
  1738. RTW89_CHANNEL_WIDTH_20,
  1739. ntx, RTW89_RS_MCS, ch - 6);
  1740. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
  1741. RTW89_CHANNEL_WIDTH_20,
  1742. ntx, RTW89_RS_MCS, ch - 2);
  1743. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
  1744. RTW89_CHANNEL_WIDTH_20,
  1745. ntx, RTW89_RS_MCS, ch + 2);
  1746. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
  1747. RTW89_CHANNEL_WIDTH_20,
  1748. ntx, RTW89_RS_MCS, ch + 6);
  1749. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
  1750. RTW89_CHANNEL_WIDTH_20,
  1751. ntx, RTW89_RS_MCS, ch + 10);
  1752. __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
  1753. RTW89_CHANNEL_WIDTH_20,
  1754. ntx, RTW89_RS_MCS, ch + 14);
  1755. /* fill mcs 40m section */
  1756. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
  1757. RTW89_CHANNEL_WIDTH_40,
  1758. ntx, RTW89_RS_MCS, ch - 12);
  1759. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
  1760. RTW89_CHANNEL_WIDTH_40,
  1761. ntx, RTW89_RS_MCS, ch - 4);
  1762. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
  1763. RTW89_CHANNEL_WIDTH_40,
  1764. ntx, RTW89_RS_MCS, ch + 4);
  1765. __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
  1766. RTW89_CHANNEL_WIDTH_40,
  1767. ntx, RTW89_RS_MCS, ch + 12);
  1768. /* fill mcs 80m section */
  1769. __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
  1770. RTW89_CHANNEL_WIDTH_80,
  1771. ntx, RTW89_RS_MCS, ch - 8);
  1772. __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
  1773. RTW89_CHANNEL_WIDTH_80,
  1774. ntx, RTW89_RS_MCS, ch + 8);
  1775. /* fill mcs 160m section */
  1776. __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
  1777. RTW89_CHANNEL_WIDTH_160,
  1778. ntx, RTW89_RS_MCS, ch);
  1779. /* fill mcs 40m 0p5 section */
  1780. __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
  1781. ntx, RTW89_RS_MCS, ch - 4);
  1782. __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
  1783. ntx, RTW89_RS_MCS, ch + 4);
  1784. for (i = 0; i < RTW89_BF_NUM; i++)
  1785. lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
  1786. /* fill mcs 40m 2p5 section */
  1787. __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
  1788. ntx, RTW89_RS_MCS, ch - 8);
  1789. __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
  1790. ntx, RTW89_RS_MCS, ch + 8);
  1791. for (i = 0; i < RTW89_BF_NUM; i++)
  1792. lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
  1793. }
  1794. static
  1795. void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
  1796. const struct rtw89_chan *chan,
  1797. struct rtw89_txpwr_limit *lmt,
  1798. u8 ntx)
  1799. {
  1800. u8 band = chan->band_type;
  1801. u8 pri_ch = chan->primary_channel;
  1802. u8 ch = chan->channel;
  1803. u8 bw = chan->band_width;
  1804. memset(lmt, 0, sizeof(*lmt));
  1805. switch (bw) {
  1806. case RTW89_CHANNEL_WIDTH_20:
  1807. rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch);
  1808. break;
  1809. case RTW89_CHANNEL_WIDTH_40:
  1810. rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch,
  1811. pri_ch);
  1812. break;
  1813. case RTW89_CHANNEL_WIDTH_80:
  1814. rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch,
  1815. pri_ch);
  1816. break;
  1817. case RTW89_CHANNEL_WIDTH_160:
  1818. rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch,
  1819. pri_ch);
  1820. break;
  1821. }
  1822. }
  1823. static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
  1824. u8 ru, u8 ntx, u8 ch)
  1825. {
  1826. const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
  1827. const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
  1828. const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
  1829. const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
  1830. struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
  1831. u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
  1832. u8 regd = rtw89_regd_get(rtwdev, band);
  1833. u8 reg6 = regulatory->reg_6ghz_power;
  1834. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
  1835. enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
  1836. u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
  1837. s8 lmt_ru = 0, sar;
  1838. #else
  1839. s8 lmt_ru = 0;
  1840. #endif
  1841. switch (band) {
  1842. case RTW89_BAND_2G:
  1843. lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
  1844. if (lmt_ru)
  1845. break;
  1846. lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
  1847. break;
  1848. case RTW89_BAND_5G:
  1849. lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
  1850. if (lmt_ru)
  1851. break;
  1852. lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
  1853. break;
  1854. case RTW89_BAND_6G:
  1855. lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
  1856. if (lmt_ru)
  1857. break;
  1858. lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
  1859. [RTW89_REG_6GHZ_POWER_DFLT]
  1860. [ch_idx];
  1861. break;
  1862. default:
  1863. rtw89_warn(rtwdev, "unknown band type: %d\n", band);
  1864. return 0;
  1865. }
  1866. lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
  1867. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
  1868. sar = rtw89_query_sar(rtwdev, freq);
  1869. return min(lmt_ru, sar);
  1870. #else
  1871. return lmt_ru;
  1872. #endif
  1873. }
  1874. static void
  1875. rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
  1876. struct rtw89_txpwr_limit_ru *lmt_ru,
  1877. u8 band, u8 ntx, u8 ch)
  1878. {
  1879. lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1880. RTW89_RU26,
  1881. ntx, ch);
  1882. lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1883. RTW89_RU52,
  1884. ntx, ch);
  1885. lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1886. RTW89_RU106,
  1887. ntx, ch);
  1888. }
  1889. static void
  1890. rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
  1891. struct rtw89_txpwr_limit_ru *lmt_ru,
  1892. u8 band, u8 ntx, u8 ch)
  1893. {
  1894. lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1895. RTW89_RU26,
  1896. ntx, ch - 2);
  1897. lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1898. RTW89_RU26,
  1899. ntx, ch + 2);
  1900. lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1901. RTW89_RU52,
  1902. ntx, ch - 2);
  1903. lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1904. RTW89_RU52,
  1905. ntx, ch + 2);
  1906. lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1907. RTW89_RU106,
  1908. ntx, ch - 2);
  1909. lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1910. RTW89_RU106,
  1911. ntx, ch + 2);
  1912. }
  1913. static void
  1914. rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
  1915. struct rtw89_txpwr_limit_ru *lmt_ru,
  1916. u8 band, u8 ntx, u8 ch)
  1917. {
  1918. lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1919. RTW89_RU26,
  1920. ntx, ch - 6);
  1921. lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1922. RTW89_RU26,
  1923. ntx, ch - 2);
  1924. lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1925. RTW89_RU26,
  1926. ntx, ch + 2);
  1927. lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1928. RTW89_RU26,
  1929. ntx, ch + 6);
  1930. lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1931. RTW89_RU52,
  1932. ntx, ch - 6);
  1933. lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1934. RTW89_RU52,
  1935. ntx, ch - 2);
  1936. lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1937. RTW89_RU52,
  1938. ntx, ch + 2);
  1939. lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1940. RTW89_RU52,
  1941. ntx, ch + 6);
  1942. lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1943. RTW89_RU106,
  1944. ntx, ch - 6);
  1945. lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1946. RTW89_RU106,
  1947. ntx, ch - 2);
  1948. lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1949. RTW89_RU106,
  1950. ntx, ch + 2);
  1951. lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1952. RTW89_RU106,
  1953. ntx, ch + 6);
  1954. }
  1955. static void
  1956. rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
  1957. struct rtw89_txpwr_limit_ru *lmt_ru,
  1958. u8 band, u8 ntx, u8 ch)
  1959. {
  1960. static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
  1961. int i;
  1962. for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
  1963. lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1964. RTW89_RU26,
  1965. ntx,
  1966. ch + ofst[i]);
  1967. lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1968. RTW89_RU52,
  1969. ntx,
  1970. ch + ofst[i]);
  1971. lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
  1972. RTW89_RU106,
  1973. ntx,
  1974. ch + ofst[i]);
  1975. }
  1976. }
  1977. static
  1978. void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
  1979. const struct rtw89_chan *chan,
  1980. struct rtw89_txpwr_limit_ru *lmt_ru,
  1981. u8 ntx)
  1982. {
  1983. u8 band = chan->band_type;
  1984. u8 ch = chan->channel;
  1985. u8 bw = chan->band_width;
  1986. memset(lmt_ru, 0, sizeof(*lmt_ru));
  1987. switch (bw) {
  1988. case RTW89_CHANNEL_WIDTH_20:
  1989. rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx,
  1990. ch);
  1991. break;
  1992. case RTW89_CHANNEL_WIDTH_40:
  1993. rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx,
  1994. ch);
  1995. break;
  1996. case RTW89_CHANNEL_WIDTH_80:
  1997. rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx,
  1998. ch);
  1999. break;
  2000. case RTW89_CHANNEL_WIDTH_160:
  2001. rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx,
  2002. ch);
  2003. break;
  2004. }
  2005. }
  2006. void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
  2007. const struct rtw89_chan *chan,
  2008. enum rtw89_phy_idx phy_idx)
  2009. {
  2010. u8 max_nss_num = rtwdev->chip->rf_path_num;
  2011. static const u8 rs[] = {
  2012. RTW89_RS_CCK,
  2013. RTW89_RS_OFDM,
  2014. RTW89_RS_MCS,
  2015. RTW89_RS_HEDCM,
  2016. };
  2017. struct rtw89_rate_desc cur = {};
  2018. u8 band = chan->band_type;
  2019. u8 ch = chan->channel;
  2020. u32 addr, val;
  2021. s8 v[4] = {};
  2022. u8 i;
  2023. rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
  2024. "[TXPWR] set txpwr byrate with ch=%d\n", ch);
  2025. BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_CCK] % 4);
  2026. BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_OFDM] % 4);
  2027. BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_MCS] % 4);
  2028. BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_HEDCM] % 4);
  2029. addr = R_AX_PWR_BY_RATE;
  2030. for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
  2031. for (i = 0; i < ARRAY_SIZE(rs); i++) {
  2032. if (cur.nss >= rtw89_rs_nss_num[rs[i]])
  2033. continue;
  2034. cur.rs = rs[i];
  2035. for (cur.idx = 0; cur.idx < rtw89_rs_idx_num[rs[i]];
  2036. cur.idx++) {
  2037. v[cur.idx % 4] =
  2038. rtw89_phy_read_txpwr_byrate(rtwdev,
  2039. band, 0,
  2040. &cur);
  2041. if ((cur.idx + 1) % 4)
  2042. continue;
  2043. val = FIELD_PREP(GENMASK(7, 0), v[0]) |
  2044. FIELD_PREP(GENMASK(15, 8), v[1]) |
  2045. FIELD_PREP(GENMASK(23, 16), v[2]) |
  2046. FIELD_PREP(GENMASK(31, 24), v[3]);
  2047. rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
  2048. val);
  2049. addr += 4;
  2050. }
  2051. }
  2052. }
  2053. }
  2054. EXPORT_SYMBOL(rtw89_phy_set_txpwr_byrate);
  2055. void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev,
  2056. const struct rtw89_chan *chan,
  2057. enum rtw89_phy_idx phy_idx)
  2058. {
  2059. struct rtw89_rate_desc desc = {
  2060. .nss = RTW89_NSS_1,
  2061. .rs = RTW89_RS_OFFSET,
  2062. };
  2063. u8 band = chan->band_type;
  2064. s8 v[RTW89_RATE_OFFSET_NUM_AX] = {};
  2065. u32 val;
  2066. rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
  2067. for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++)
  2068. v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
  2069. BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5);
  2070. val = FIELD_PREP(GENMASK(3, 0), v[0]) |
  2071. FIELD_PREP(GENMASK(7, 4), v[1]) |
  2072. FIELD_PREP(GENMASK(11, 8), v[2]) |
  2073. FIELD_PREP(GENMASK(15, 12), v[3]) |
  2074. FIELD_PREP(GENMASK(19, 16), v[4]);
  2075. rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
  2076. GENMASK(19, 0), val);
  2077. }
  2078. EXPORT_SYMBOL(rtw89_phy_set_txpwr_offset);
  2079. void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
  2080. const struct rtw89_chan *chan,
  2081. enum rtw89_phy_idx phy_idx)
  2082. {
  2083. u8 max_ntx_num = rtwdev->chip->rf_path_num;
  2084. struct rtw89_txpwr_limit lmt;
  2085. u8 ch = chan->channel;
  2086. u8 bw = chan->band_width;
  2087. const s8 *ptr;
  2088. u32 addr, val;
  2089. u8 i, j;
  2090. rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
  2091. "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
  2092. BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit) !=
  2093. RTW89_TXPWR_LMT_PAGE_SIZE);
  2094. addr = R_AX_PWR_LMT;
  2095. for (i = 0; i < max_ntx_num; i++) {
  2096. rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i);
  2097. ptr = (s8 *)&lmt;
  2098. for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE;
  2099. j += 4, addr += 4, ptr += 4) {
  2100. val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
  2101. FIELD_PREP(GENMASK(15, 8), ptr[1]) |
  2102. FIELD_PREP(GENMASK(23, 16), ptr[2]) |
  2103. FIELD_PREP(GENMASK(31, 24), ptr[3]);
  2104. rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
  2105. }
  2106. }
  2107. }
  2108. EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit);
  2109. void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
  2110. const struct rtw89_chan *chan,
  2111. enum rtw89_phy_idx phy_idx)
  2112. {
  2113. u8 max_ntx_num = rtwdev->chip->rf_path_num;
  2114. struct rtw89_txpwr_limit_ru lmt_ru;
  2115. u8 ch = chan->channel;
  2116. u8 bw = chan->band_width;
  2117. const s8 *ptr;
  2118. u32 addr, val;
  2119. u8 i, j;
  2120. rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
  2121. "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
  2122. BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru) !=
  2123. RTW89_TXPWR_LMT_RU_PAGE_SIZE);
  2124. addr = R_AX_PWR_RU_LMT;
  2125. for (i = 0; i < max_ntx_num; i++) {
  2126. rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i);
  2127. ptr = (s8 *)&lmt_ru;
  2128. for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE;
  2129. j += 4, addr += 4, ptr += 4) {
  2130. val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
  2131. FIELD_PREP(GENMASK(15, 8), ptr[1]) |
  2132. FIELD_PREP(GENMASK(23, 16), ptr[2]) |
  2133. FIELD_PREP(GENMASK(31, 24), ptr[3]);
  2134. rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
  2135. }
  2136. }
  2137. }
  2138. EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit_ru);
  2139. struct rtw89_phy_iter_ra_data {
  2140. struct rtw89_dev *rtwdev;
  2141. struct sk_buff *c2h;
  2142. };
  2143. static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
  2144. {
  2145. struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
  2146. struct rtw89_dev *rtwdev = ra_data->rtwdev;
  2147. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2148. const struct rtw89_c2h_ra_rpt *c2h =
  2149. (const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
  2150. struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
  2151. const struct rtw89_chip_info *chip = rtwdev->chip;
  2152. bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
  2153. u8 mode, rate, bw, giltf, mac_id;
  2154. u16 legacy_bitrate;
  2155. bool valid;
  2156. u8 mcs = 0;
  2157. u8 t;
  2158. mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
  2159. if (mac_id != rtwsta->mac_id)
  2160. return;
  2161. rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
  2162. bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
  2163. giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
  2164. mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
  2165. if (format_v1) {
  2166. t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
  2167. rate |= u8_encode_bits(t, BIT(7));
  2168. t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
  2169. bw |= u8_encode_bits(t, BIT(2));
  2170. t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
  2171. mode |= u8_encode_bits(t, BIT(2));
  2172. }
  2173. if (mode == RTW89_RA_RPT_MODE_LEGACY) {
  2174. valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
  2175. if (!valid)
  2176. return;
  2177. }
  2178. memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
  2179. switch (mode) {
  2180. case RTW89_RA_RPT_MODE_LEGACY:
  2181. ra_report->txrate.legacy = legacy_bitrate;
  2182. break;
  2183. case RTW89_RA_RPT_MODE_HT:
  2184. ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
  2185. if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
  2186. rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
  2187. FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
  2188. else
  2189. rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
  2190. ra_report->txrate.mcs = rate;
  2191. if (giltf)
  2192. ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  2193. mcs = ra_report->txrate.mcs & 0x07;
  2194. break;
  2195. case RTW89_RA_RPT_MODE_VHT:
  2196. ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
  2197. ra_report->txrate.mcs = format_v1 ?
  2198. u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
  2199. u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
  2200. ra_report->txrate.nss = format_v1 ?
  2201. u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
  2202. u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
  2203. if (giltf)
  2204. ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  2205. mcs = ra_report->txrate.mcs;
  2206. break;
  2207. case RTW89_RA_RPT_MODE_HE:
  2208. ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
  2209. ra_report->txrate.mcs = format_v1 ?
  2210. u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
  2211. u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
  2212. ra_report->txrate.nss = format_v1 ?
  2213. u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
  2214. u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
  2215. if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
  2216. ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
  2217. else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
  2218. ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
  2219. else
  2220. ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
  2221. mcs = ra_report->txrate.mcs;
  2222. break;
  2223. }
  2224. ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
  2225. ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
  2226. ra_report->hw_rate = format_v1 ?
  2227. u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
  2228. u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
  2229. u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
  2230. u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
  2231. ra_report->might_fallback_legacy = mcs <= 2;
  2232. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
  2233. sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
  2234. rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
  2235. #else
  2236. sta->max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
  2237. rtwsta->max_agg_wait = sta->max_rc_amsdu_len / 1500 - 1;
  2238. #endif
  2239. }
  2240. static void
  2241. rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
  2242. {
  2243. struct rtw89_phy_iter_ra_data ra_data;
  2244. ra_data.rtwdev = rtwdev;
  2245. ra_data.c2h = c2h;
  2246. ieee80211_iterate_stations_atomic(rtwdev->hw,
  2247. rtw89_phy_c2h_ra_rpt_iter,
  2248. &ra_data);
  2249. }
  2250. static
  2251. void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
  2252. struct sk_buff *c2h, u32 len) = {
  2253. [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
  2254. [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
  2255. [RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
  2256. };
  2257. void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
  2258. u32 len, u8 class, u8 func)
  2259. {
  2260. void (*handler)(struct rtw89_dev *rtwdev,
  2261. struct sk_buff *c2h, u32 len) = NULL;
  2262. switch (class) {
  2263. case RTW89_PHY_C2H_CLASS_RA:
  2264. if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
  2265. handler = rtw89_phy_c2h_ra_handler[func];
  2266. break;
  2267. case RTW89_PHY_C2H_CLASS_DM:
  2268. if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
  2269. return;
  2270. fallthrough;
  2271. default:
  2272. rtw89_info(rtwdev, "c2h class %d not support\n", class);
  2273. return;
  2274. }
  2275. if (!handler) {
  2276. rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
  2277. func);
  2278. return;
  2279. }
  2280. handler(rtwdev, skb, len);
  2281. }
  2282. static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
  2283. {
  2284. const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
  2285. u32 reg_mask;
  2286. if (sc_xo)
  2287. reg_mask = xtal->sc_xo_mask;
  2288. else
  2289. reg_mask = xtal->sc_xi_mask;
  2290. return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
  2291. }
  2292. static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
  2293. u8 val)
  2294. {
  2295. const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
  2296. u32 reg_mask;
  2297. if (sc_xo)
  2298. reg_mask = xtal->sc_xo_mask;
  2299. else
  2300. reg_mask = xtal->sc_xi_mask;
  2301. rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
  2302. }
  2303. static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
  2304. u8 crystal_cap, bool force)
  2305. {
  2306. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2307. const struct rtw89_chip_info *chip = rtwdev->chip;
  2308. u8 sc_xi_val, sc_xo_val;
  2309. if (!force && cfo->crystal_cap == crystal_cap)
  2310. return;
  2311. crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
  2312. if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
  2313. rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
  2314. rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
  2315. sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
  2316. sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
  2317. } else {
  2318. rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
  2319. crystal_cap, XTAL_SC_XO_MASK);
  2320. rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
  2321. crystal_cap, XTAL_SC_XI_MASK);
  2322. rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
  2323. rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
  2324. }
  2325. cfo->crystal_cap = sc_xi_val;
  2326. cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
  2327. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
  2328. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
  2329. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
  2330. cfo->x_cap_ofst);
  2331. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
  2332. }
  2333. static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
  2334. {
  2335. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2336. u8 cap;
  2337. cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
  2338. cfo->is_adjust = false;
  2339. if (cfo->crystal_cap == cfo->def_x_cap)
  2340. return;
  2341. cap = cfo->crystal_cap;
  2342. cap += (cap > cfo->def_x_cap ? -1 : 1);
  2343. rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
  2344. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2345. "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
  2346. cfo->def_x_cap);
  2347. }
  2348. static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
  2349. {
  2350. const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
  2351. bool is_linked = rtwdev->total_sta_assoc > 0;
  2352. s32 cfo_avg_312;
  2353. s32 dcfo_comp_val;
  2354. int sign;
  2355. if (!is_linked) {
  2356. rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
  2357. is_linked);
  2358. return;
  2359. }
  2360. rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
  2361. if (curr_cfo == 0)
  2362. return;
  2363. dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
  2364. sign = curr_cfo > 0 ? 1 : -1;
  2365. cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
  2366. rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
  2367. if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
  2368. cfo_avg_312 = -cfo_avg_312;
  2369. rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
  2370. cfo_avg_312);
  2371. }
  2372. static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
  2373. {
  2374. const struct rtw89_chip_info *chip = rtwdev->chip;
  2375. rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1);
  2376. rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8);
  2377. if (chip->cfo_hw_comp)
  2378. rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
  2379. B_AX_PWR_UL_CFO_MASK, 0x6);
  2380. else
  2381. rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK);
  2382. }
  2383. static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
  2384. {
  2385. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2386. struct rtw89_efuse *efuse = &rtwdev->efuse;
  2387. cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
  2388. cfo->crystal_cap = cfo->crystal_cap_default;
  2389. cfo->def_x_cap = cfo->crystal_cap;
  2390. cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
  2391. cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
  2392. cfo->is_adjust = false;
  2393. cfo->divergence_lock_en = false;
  2394. cfo->x_cap_ofst = 0;
  2395. cfo->lock_cnt = 0;
  2396. cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
  2397. cfo->apply_compensation = false;
  2398. cfo->residual_cfo_acc = 0;
  2399. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
  2400. cfo->crystal_cap_default);
  2401. rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
  2402. rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
  2403. rtw89_dcfo_comp_init(rtwdev);
  2404. cfo->cfo_timer_ms = 2000;
  2405. cfo->cfo_trig_by_timer_en = false;
  2406. cfo->phy_cfo_trk_cnt = 0;
  2407. cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
  2408. cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
  2409. }
  2410. static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
  2411. s32 curr_cfo)
  2412. {
  2413. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2414. s8 crystal_cap = cfo->crystal_cap;
  2415. s32 cfo_abs = abs(curr_cfo);
  2416. int sign;
  2417. if (!cfo->is_adjust) {
  2418. if (cfo_abs > CFO_TRK_ENABLE_TH)
  2419. cfo->is_adjust = true;
  2420. } else {
  2421. if (cfo_abs < CFO_TRK_STOP_TH)
  2422. cfo->is_adjust = false;
  2423. }
  2424. if (!cfo->is_adjust) {
  2425. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
  2426. return;
  2427. }
  2428. sign = curr_cfo > 0 ? 1 : -1;
  2429. if (cfo_abs > CFO_TRK_STOP_TH_4)
  2430. crystal_cap += 7 * sign;
  2431. else if (cfo_abs > CFO_TRK_STOP_TH_3)
  2432. crystal_cap += 5 * sign;
  2433. else if (cfo_abs > CFO_TRK_STOP_TH_2)
  2434. crystal_cap += 3 * sign;
  2435. else if (cfo_abs > CFO_TRK_STOP_TH_1)
  2436. crystal_cap += 1 * sign;
  2437. else
  2438. return;
  2439. rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
  2440. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2441. "X_cap{Curr,Default}={0x%x,0x%x}\n",
  2442. cfo->crystal_cap, cfo->def_x_cap);
  2443. }
  2444. static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
  2445. {
  2446. const struct rtw89_chip_info *chip = rtwdev->chip;
  2447. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2448. s32 cfo_khz_all = 0;
  2449. s32 cfo_cnt_all = 0;
  2450. s32 cfo_all_avg = 0;
  2451. u8 i;
  2452. if (rtwdev->total_sta_assoc != 1)
  2453. return 0;
  2454. rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
  2455. for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
  2456. if (cfo->cfo_cnt[i] == 0)
  2457. continue;
  2458. cfo_khz_all += cfo->cfo_tail[i];
  2459. cfo_cnt_all += cfo->cfo_cnt[i];
  2460. cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
  2461. cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
  2462. cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
  2463. cfo_cnt_all);
  2464. }
  2465. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2466. "CFO track for macid = %d\n", i);
  2467. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2468. "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
  2469. cfo_khz_all, cfo_cnt_all, cfo_all_avg);
  2470. return cfo_all_avg;
  2471. }
  2472. static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
  2473. {
  2474. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2475. struct rtw89_traffic_stats *stats = &rtwdev->stats;
  2476. s32 target_cfo = 0;
  2477. s32 cfo_khz_all = 0;
  2478. s32 cfo_khz_all_tp_wgt = 0;
  2479. s32 cfo_avg = 0;
  2480. s32 max_cfo_lb = BIT(31);
  2481. s32 min_cfo_ub = GENMASK(30, 0);
  2482. u16 cfo_cnt_all = 0;
  2483. u8 active_entry_cnt = 0;
  2484. u8 sta_cnt = 0;
  2485. u32 tp_all = 0;
  2486. u8 i;
  2487. u8 cfo_tol = 0;
  2488. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
  2489. if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
  2490. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
  2491. for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
  2492. if (cfo->cfo_cnt[i] == 0)
  2493. continue;
  2494. cfo_khz_all += cfo->cfo_tail[i];
  2495. cfo_cnt_all += cfo->cfo_cnt[i];
  2496. cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
  2497. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2498. "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
  2499. cfo_khz_all, cfo_cnt_all, cfo_avg);
  2500. target_cfo = cfo_avg;
  2501. }
  2502. } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
  2503. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
  2504. for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
  2505. if (cfo->cfo_cnt[i] == 0)
  2506. continue;
  2507. cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
  2508. (s32)cfo->cfo_cnt[i]);
  2509. cfo_khz_all += cfo->cfo_avg[i];
  2510. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2511. "Macid=%d, cfo_avg=%d\n", i,
  2512. cfo->cfo_avg[i]);
  2513. }
  2514. sta_cnt = rtwdev->total_sta_assoc;
  2515. cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
  2516. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2517. "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
  2518. cfo_khz_all, sta_cnt, cfo_avg);
  2519. target_cfo = cfo_avg;
  2520. } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
  2521. rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
  2522. cfo_tol = cfo->sta_cfo_tolerance;
  2523. for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
  2524. sta_cnt++;
  2525. if (cfo->cfo_cnt[i] != 0) {
  2526. cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
  2527. (s32)cfo->cfo_cnt[i]);
  2528. active_entry_cnt++;
  2529. } else {
  2530. cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
  2531. }
  2532. max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
  2533. min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
  2534. cfo_khz_all += cfo->cfo_avg[i];
  2535. /* need tp for each entry */
  2536. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2537. "[%d] cfo_avg=%d, tp=tbd\n",
  2538. i, cfo->cfo_avg[i]);
  2539. if (sta_cnt >= rtwdev->total_sta_assoc)
  2540. break;
  2541. }
  2542. tp_all = stats->rx_throughput; /* need tp for each entry */
  2543. cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
  2544. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
  2545. sta_cnt);
  2546. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
  2547. active_entry_cnt);
  2548. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2549. "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
  2550. cfo_khz_all_tp_wgt, cfo_avg);
  2551. rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
  2552. max_cfo_lb, min_cfo_ub);
  2553. if (max_cfo_lb <= min_cfo_ub) {
  2554. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2555. "cfo win_size=%d\n",
  2556. min_cfo_ub - max_cfo_lb);
  2557. target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
  2558. } else {
  2559. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2560. "No intersection of cfo tolerance windows\n");
  2561. target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
  2562. }
  2563. for (i = 0; i < CFO_TRACK_MAX_USER; i++)
  2564. cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
  2565. }
  2566. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
  2567. return target_cfo;
  2568. }
  2569. static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
  2570. {
  2571. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2572. memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
  2573. memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
  2574. cfo->packet_count = 0;
  2575. cfo->packet_count_pre = 0;
  2576. cfo->cfo_avg_pre = 0;
  2577. }
  2578. static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
  2579. {
  2580. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2581. s32 new_cfo = 0;
  2582. bool x_cap_update = false;
  2583. u8 pre_x_cap = cfo->crystal_cap;
  2584. u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
  2585. cfo->dcfo_avg = 0;
  2586. rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
  2587. rtwdev->total_sta_assoc);
  2588. if (rtwdev->total_sta_assoc == 0) {
  2589. rtw89_phy_cfo_reset(rtwdev);
  2590. return;
  2591. }
  2592. if (cfo->packet_count == 0) {
  2593. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
  2594. return;
  2595. }
  2596. if (cfo->packet_count == cfo->packet_count_pre) {
  2597. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
  2598. return;
  2599. }
  2600. if (rtwdev->total_sta_assoc == 1)
  2601. new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
  2602. else
  2603. new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
  2604. if (new_cfo == 0) {
  2605. rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
  2606. return;
  2607. }
  2608. if (cfo->divergence_lock_en) {
  2609. cfo->lock_cnt++;
  2610. if (cfo->lock_cnt > CFO_PERIOD_CNT) {
  2611. cfo->divergence_lock_en = false;
  2612. cfo->lock_cnt = 0;
  2613. } else {
  2614. rtw89_phy_cfo_reset(rtwdev);
  2615. }
  2616. return;
  2617. }
  2618. if (cfo->crystal_cap >= cfo->x_cap_ub ||
  2619. cfo->crystal_cap <= cfo->x_cap_lb) {
  2620. cfo->divergence_lock_en = true;
  2621. rtw89_phy_cfo_reset(rtwdev);
  2622. return;
  2623. }
  2624. rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
  2625. cfo->cfo_avg_pre = new_cfo;
  2626. cfo->dcfo_avg_pre = cfo->dcfo_avg;
  2627. x_cap_update = cfo->crystal_cap != pre_x_cap;
  2628. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
  2629. rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
  2630. cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
  2631. cfo->x_cap_ofst);
  2632. if (x_cap_update) {
  2633. if (cfo->dcfo_avg > 0)
  2634. cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
  2635. else
  2636. cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
  2637. }
  2638. rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
  2639. rtw89_phy_cfo_statistics_reset(rtwdev);
  2640. }
  2641. void rtw89_phy_cfo_track_work(struct work_struct *work)
  2642. {
  2643. struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
  2644. cfo_track_work.work);
  2645. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2646. mutex_lock(&rtwdev->mutex);
  2647. if (!cfo->cfo_trig_by_timer_en)
  2648. goto out;
  2649. rtw89_leave_ps_mode(rtwdev);
  2650. rtw89_phy_cfo_dm(rtwdev);
  2651. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
  2652. msecs_to_jiffies(cfo->cfo_timer_ms));
  2653. out:
  2654. mutex_unlock(&rtwdev->mutex);
  2655. }
  2656. static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
  2657. {
  2658. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2659. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
  2660. msecs_to_jiffies(cfo->cfo_timer_ms));
  2661. }
  2662. void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
  2663. {
  2664. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2665. struct rtw89_traffic_stats *stats = &rtwdev->stats;
  2666. bool is_ul_ofdma = false, ofdma_acc_en = false;
  2667. if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
  2668. is_ul_ofdma = true;
  2669. if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
  2670. is_ul_ofdma)
  2671. ofdma_acc_en = true;
  2672. switch (cfo->phy_cfo_status) {
  2673. case RTW89_PHY_DCFO_STATE_NORMAL:
  2674. if (stats->tx_throughput >= CFO_TP_UPPER) {
  2675. cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
  2676. cfo->cfo_trig_by_timer_en = true;
  2677. cfo->cfo_timer_ms = CFO_COMP_PERIOD;
  2678. rtw89_phy_cfo_start_work(rtwdev);
  2679. }
  2680. break;
  2681. case RTW89_PHY_DCFO_STATE_ENHANCE:
  2682. if (stats->tx_throughput <= CFO_TP_LOWER)
  2683. cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
  2684. else if (ofdma_acc_en &&
  2685. cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
  2686. cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
  2687. else
  2688. cfo->phy_cfo_trk_cnt++;
  2689. if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
  2690. cfo->phy_cfo_trk_cnt = 0;
  2691. cfo->cfo_trig_by_timer_en = false;
  2692. }
  2693. break;
  2694. case RTW89_PHY_DCFO_STATE_HOLD:
  2695. if (stats->tx_throughput <= CFO_TP_LOWER) {
  2696. cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
  2697. cfo->phy_cfo_trk_cnt = 0;
  2698. cfo->cfo_trig_by_timer_en = false;
  2699. } else {
  2700. cfo->phy_cfo_trk_cnt++;
  2701. }
  2702. break;
  2703. default:
  2704. cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
  2705. cfo->phy_cfo_trk_cnt = 0;
  2706. break;
  2707. }
  2708. rtw89_debug(rtwdev, RTW89_DBG_CFO,
  2709. "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
  2710. stats->tx_throughput, cfo->phy_cfo_status,
  2711. cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
  2712. ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
  2713. if (cfo->cfo_trig_by_timer_en)
  2714. return;
  2715. rtw89_phy_cfo_dm(rtwdev);
  2716. }
  2717. void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
  2718. struct rtw89_rx_phy_ppdu *phy_ppdu)
  2719. {
  2720. struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
  2721. u8 macid = phy_ppdu->mac_id;
  2722. if (macid >= CFO_TRACK_MAX_USER) {
  2723. rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
  2724. return;
  2725. }
  2726. cfo->cfo_tail[macid] += cfo_val;
  2727. cfo->cfo_cnt[macid]++;
  2728. cfo->packet_count++;
  2729. }
  2730. void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  2731. {
  2732. const struct rtw89_chip_info *chip = rtwdev->chip;
  2733. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
  2734. rtwvif->sub_entity_idx);
  2735. struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
  2736. if (!chip->ul_tb_waveform_ctrl)
  2737. return;
  2738. rtwvif->def_tri_idx =
  2739. rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
  2740. if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
  2741. rtwvif->dyn_tb_bedge_en = false;
  2742. else if (chan->band_type >= RTW89_BAND_5G &&
  2743. chan->band_width >= RTW89_CHANNEL_WIDTH_40)
  2744. rtwvif->dyn_tb_bedge_en = true;
  2745. else
  2746. rtwvif->dyn_tb_bedge_en = false;
  2747. rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
  2748. "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
  2749. ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx);
  2750. rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
  2751. "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
  2752. rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
  2753. }
  2754. struct rtw89_phy_ul_tb_check_data {
  2755. bool valid;
  2756. bool high_tf_client;
  2757. bool low_tf_client;
  2758. bool dyn_tb_bedge_en;
  2759. u8 def_tri_idx;
  2760. };
  2761. struct rtw89_phy_power_diff {
  2762. u32 q_00;
  2763. u32 q_11;
  2764. u32 q_matrix_en;
  2765. u32 ultb_1t_norm_160;
  2766. u32 ultb_2t_norm_160;
  2767. u32 com1_norm_1sts;
  2768. u32 com2_resp_1sts_path;
  2769. };
  2770. static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
  2771. struct rtw89_vif *rtwvif)
  2772. {
  2773. static const struct rtw89_phy_power_diff table[2] = {
  2774. {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3},
  2775. {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1},
  2776. };
  2777. const struct rtw89_phy_power_diff *param;
  2778. u32 reg;
  2779. if (!rtwdev->chip->ul_tb_pwr_diff)
  2780. return;
  2781. if (rtwvif->pwr_diff_en == rtwvif->pre_pwr_diff_en) {
  2782. rtwvif->pwr_diff_en = false;
  2783. return;
  2784. }
  2785. rtwvif->pre_pwr_diff_en = rtwvif->pwr_diff_en;
  2786. param = &table[rtwvif->pwr_diff_en];
  2787. rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL,
  2788. param->q_00);
  2789. rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL,
  2790. param->q_11);
  2791. rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX,
  2792. B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en);
  2793. reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif->mac_idx);
  2794. rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160,
  2795. param->ultb_1t_norm_160);
  2796. reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif->mac_idx);
  2797. rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160,
  2798. param->ultb_2t_norm_160);
  2799. reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif->mac_idx);
  2800. rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS,
  2801. param->com1_norm_1sts);
  2802. reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif->mac_idx);
  2803. rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH,
  2804. param->com2_resp_1sts_path);
  2805. }
  2806. static
  2807. void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
  2808. struct rtw89_vif *rtwvif,
  2809. struct rtw89_phy_ul_tb_check_data *ul_tb_data)
  2810. {
  2811. struct rtw89_traffic_stats *stats = &rtwdev->stats;
  2812. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2813. if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
  2814. return;
  2815. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
  2816. if (!vif->cfg.assoc)
  2817. return;
  2818. #else
  2819. if (!vif->bss_conf.assoc)
  2820. return;
  2821. #endif
  2822. if (rtwdev->chip->ul_tb_waveform_ctrl) {
  2823. if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
  2824. ul_tb_data->high_tf_client = true;
  2825. else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
  2826. ul_tb_data->low_tf_client = true;
  2827. ul_tb_data->valid = true;
  2828. ul_tb_data->def_tri_idx = rtwvif->def_tri_idx;
  2829. ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en;
  2830. }
  2831. rtw89_phy_ofdma_power_diff(rtwdev, rtwvif);
  2832. }
  2833. static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev,
  2834. struct rtw89_phy_ul_tb_check_data *ul_tb_data)
  2835. {
  2836. struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
  2837. if (!rtwdev->chip->ul_tb_waveform_ctrl)
  2838. return;
  2839. if (ul_tb_data->dyn_tb_bedge_en) {
  2840. if (ul_tb_data->high_tf_client) {
  2841. rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
  2842. rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
  2843. "[ULTB] Turn off if_bandedge\n");
  2844. } else if (ul_tb_data->low_tf_client) {
  2845. rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
  2846. ul_tb_info->def_if_bandedge);
  2847. rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
  2848. "[ULTB] Set to default if_bandedge = %d\n",
  2849. ul_tb_info->def_if_bandedge);
  2850. }
  2851. }
  2852. if (ul_tb_info->dyn_tb_tri_en) {
  2853. if (ul_tb_data->high_tf_client) {
  2854. rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
  2855. B_TXSHAPE_TRIANGULAR_CFG, 0);
  2856. rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
  2857. "[ULTB] Turn off Tx triangle\n");
  2858. } else if (ul_tb_data->low_tf_client) {
  2859. rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
  2860. B_TXSHAPE_TRIANGULAR_CFG,
  2861. ul_tb_data->def_tri_idx);
  2862. rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
  2863. "[ULTB] Set to default tx_shap_idx = %d\n",
  2864. ul_tb_data->def_tri_idx);
  2865. }
  2866. }
  2867. }
  2868. void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
  2869. {
  2870. const struct rtw89_chip_info *chip = rtwdev->chip;
  2871. struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
  2872. struct rtw89_vif *rtwvif;
  2873. if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff)
  2874. return;
  2875. if (rtwdev->total_sta_assoc != 1)
  2876. return;
  2877. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  2878. rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data);
  2879. if (!ul_tb_data.valid)
  2880. return;
  2881. rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data);
  2882. }
  2883. static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
  2884. {
  2885. const struct rtw89_chip_info *chip = rtwdev->chip;
  2886. struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
  2887. if (!chip->ul_tb_waveform_ctrl)
  2888. return;
  2889. ul_tb_info->dyn_tb_tri_en = true;
  2890. ul_tb_info->def_if_bandedge =
  2891. rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
  2892. }
  2893. static
  2894. void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
  2895. {
  2896. ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
  2897. ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
  2898. ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
  2899. antdiv_sts->pkt_cnt_cck = 0;
  2900. antdiv_sts->pkt_cnt_ofdm = 0;
  2901. antdiv_sts->pkt_cnt_non_legacy = 0;
  2902. antdiv_sts->evm = 0;
  2903. }
  2904. static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
  2905. struct rtw89_rx_phy_ppdu *phy_ppdu,
  2906. struct rtw89_antdiv_stats *stats)
  2907. {
  2908. if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
  2909. if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
  2910. ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
  2911. stats->pkt_cnt_cck++;
  2912. } else {
  2913. ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
  2914. stats->pkt_cnt_ofdm++;
  2915. stats->evm += phy_ppdu->ofdm.evm_min;
  2916. }
  2917. } else {
  2918. ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
  2919. stats->pkt_cnt_non_legacy++;
  2920. stats->evm += phy_ppdu->ofdm.evm_min;
  2921. }
  2922. }
  2923. static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
  2924. {
  2925. if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
  2926. stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
  2927. return ewma_rssi_read(&stats->non_legacy_rssi_avg);
  2928. else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
  2929. stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
  2930. return ewma_rssi_read(&stats->ofdm_rssi_avg);
  2931. else
  2932. return ewma_rssi_read(&stats->cck_rssi_avg);
  2933. }
  2934. static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
  2935. {
  2936. return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
  2937. }
  2938. void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
  2939. struct rtw89_rx_phy_ppdu *phy_ppdu)
  2940. {
  2941. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  2942. struct rtw89_hal *hal = &rtwdev->hal;
  2943. if (!hal->ant_diversity || hal->ant_diversity_fixed)
  2944. return;
  2945. rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
  2946. if (!antdiv->get_stats)
  2947. return;
  2948. if (hal->antenna_rx == RF_A)
  2949. rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
  2950. else if (hal->antenna_rx == RF_B)
  2951. rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
  2952. }
  2953. static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
  2954. {
  2955. rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
  2956. 0x0, RTW89_PHY_0);
  2957. rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
  2958. 0x0, RTW89_PHY_0);
  2959. rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
  2960. 0x0, RTW89_PHY_0);
  2961. rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
  2962. 0x0, RTW89_PHY_0);
  2963. rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
  2964. 0x0, RTW89_PHY_0);
  2965. rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
  2966. 0x0100, RTW89_PHY_0);
  2967. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
  2968. 0x1, RTW89_PHY_0);
  2969. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
  2970. 0x0, RTW89_PHY_0);
  2971. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
  2972. 0x0, RTW89_PHY_0);
  2973. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
  2974. 0x0, RTW89_PHY_0);
  2975. }
  2976. static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
  2977. {
  2978. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  2979. rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
  2980. rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
  2981. rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
  2982. }
  2983. static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
  2984. {
  2985. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  2986. struct rtw89_hal *hal = &rtwdev->hal;
  2987. if (!hal->ant_diversity)
  2988. return;
  2989. antdiv->get_stats = false;
  2990. antdiv->rssi_pre = 0;
  2991. rtw89_phy_antdiv_sts_reset(rtwdev);
  2992. rtw89_phy_antdiv_reg_init(rtwdev);
  2993. }
  2994. static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
  2995. {
  2996. struct rtw89_phy_stat *phystat = &rtwdev->phystat;
  2997. int i;
  2998. u8 th;
  2999. for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
  3000. th = rtw89_chip_get_thermal(rtwdev, i);
  3001. if (th)
  3002. ewma_thermal_add(&phystat->avg_thermal[i], th);
  3003. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  3004. "path(%d) thermal cur=%u avg=%ld", i, th,
  3005. ewma_thermal_read(&phystat->avg_thermal[i]));
  3006. }
  3007. }
  3008. struct rtw89_phy_iter_rssi_data {
  3009. struct rtw89_dev *rtwdev;
  3010. struct rtw89_phy_ch_info *ch_info;
  3011. bool rssi_changed;
  3012. };
  3013. static void rtw89_phy_stat_rssi_update_iter(void *data,
  3014. struct ieee80211_sta *sta)
  3015. {
  3016. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  3017. struct rtw89_phy_iter_rssi_data *rssi_data =
  3018. (struct rtw89_phy_iter_rssi_data *)data;
  3019. struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
  3020. unsigned long rssi_curr;
  3021. rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi);
  3022. if (rssi_curr < ch_info->rssi_min) {
  3023. ch_info->rssi_min = rssi_curr;
  3024. ch_info->rssi_min_macid = rtwsta->mac_id;
  3025. }
  3026. if (rtwsta->prev_rssi == 0) {
  3027. rtwsta->prev_rssi = rssi_curr;
  3028. } else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) {
  3029. rtwsta->prev_rssi = rssi_curr;
  3030. rssi_data->rssi_changed = true;
  3031. }
  3032. }
  3033. static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
  3034. {
  3035. struct rtw89_phy_iter_rssi_data rssi_data = {0};
  3036. rssi_data.rtwdev = rtwdev;
  3037. rssi_data.ch_info = &rtwdev->ch_info;
  3038. rssi_data.ch_info->rssi_min = U8_MAX;
  3039. ieee80211_iterate_stations_atomic(rtwdev->hw,
  3040. rtw89_phy_stat_rssi_update_iter,
  3041. &rssi_data);
  3042. if (rssi_data.rssi_changed)
  3043. rtw89_btc_ntfy_wl_sta(rtwdev);
  3044. }
  3045. static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
  3046. {
  3047. struct rtw89_phy_stat *phystat = &rtwdev->phystat;
  3048. int i;
  3049. for (i = 0; i < rtwdev->chip->rf_path_num; i++)
  3050. ewma_thermal_init(&phystat->avg_thermal[i]);
  3051. rtw89_phy_stat_thermal_update(rtwdev);
  3052. memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
  3053. memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
  3054. }
  3055. void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
  3056. {
  3057. struct rtw89_phy_stat *phystat = &rtwdev->phystat;
  3058. rtw89_phy_stat_thermal_update(rtwdev);
  3059. rtw89_phy_stat_rssi_update(rtwdev);
  3060. phystat->last_pkt_stat = phystat->cur_pkt_stat;
  3061. memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
  3062. }
  3063. static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
  3064. {
  3065. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3066. return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
  3067. }
  3068. static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
  3069. {
  3070. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3071. return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
  3072. }
  3073. static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
  3074. {
  3075. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3076. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3077. const struct rtw89_ccx_regs *ccx = phy->ccx;
  3078. env->ccx_manual_ctrl = false;
  3079. env->ccx_ongoing = false;
  3080. env->ccx_rac_lv = RTW89_RAC_RELEASE;
  3081. env->ccx_period = 0;
  3082. env->ccx_unit_idx = RTW89_CCX_32_US;
  3083. rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1);
  3084. rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1);
  3085. rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
  3086. rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
  3087. RTW89_CCX_EDCCA_BW20_0);
  3088. }
  3089. static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
  3090. u16 score)
  3091. {
  3092. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3093. u32 numer = 0;
  3094. u16 ret = 0;
  3095. numer = report * score + (env->ccx_period >> 1);
  3096. if (env->ccx_period)
  3097. ret = numer / env->ccx_period;
  3098. return ret >= score ? score - 1 : ret;
  3099. }
  3100. static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
  3101. u16 time_ms, u32 *period,
  3102. u32 *unit_idx)
  3103. {
  3104. u32 idx;
  3105. u8 quotient;
  3106. if (time_ms >= CCX_MAX_PERIOD)
  3107. time_ms = CCX_MAX_PERIOD;
  3108. quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
  3109. if (quotient < 4)
  3110. idx = RTW89_CCX_4_US;
  3111. else if (quotient < 8)
  3112. idx = RTW89_CCX_8_US;
  3113. else if (quotient < 16)
  3114. idx = RTW89_CCX_16_US;
  3115. else
  3116. idx = RTW89_CCX_32_US;
  3117. *unit_idx = idx;
  3118. *period = (time_ms * MS_TO_4US_RATIO) >> idx;
  3119. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3120. "[Trigger Time] period:%d, unit_idx:%d\n",
  3121. *period, *unit_idx);
  3122. }
  3123. static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
  3124. {
  3125. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3126. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3127. "lv:(%d)->(0)\n", env->ccx_rac_lv);
  3128. env->ccx_ongoing = false;
  3129. env->ccx_rac_lv = RTW89_RAC_RELEASE;
  3130. env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
  3131. }
  3132. static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
  3133. struct rtw89_ccx_para_info *para)
  3134. {
  3135. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3136. bool is_update = env->ifs_clm_app != para->ifs_clm_app;
  3137. u8 i = 0;
  3138. u16 *ifs_th_l = env->ifs_clm_th_l;
  3139. u16 *ifs_th_h = env->ifs_clm_th_h;
  3140. u32 ifs_th0_us = 0, ifs_th_times = 0;
  3141. u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
  3142. if (!is_update)
  3143. goto ifs_update_finished;
  3144. switch (para->ifs_clm_app) {
  3145. case RTW89_IFS_CLM_INIT:
  3146. case RTW89_IFS_CLM_BACKGROUND:
  3147. case RTW89_IFS_CLM_ACS:
  3148. case RTW89_IFS_CLM_DBG:
  3149. case RTW89_IFS_CLM_DIG:
  3150. case RTW89_IFS_CLM_TDMA_DIG:
  3151. ifs_th0_us = IFS_CLM_TH0_UPPER;
  3152. ifs_th_times = IFS_CLM_TH_MUL;
  3153. break;
  3154. case RTW89_IFS_CLM_DBG_MANUAL:
  3155. ifs_th0_us = para->ifs_clm_manual_th0;
  3156. ifs_th_times = para->ifs_clm_manual_th_times;
  3157. break;
  3158. default:
  3159. break;
  3160. }
  3161. /* Set sampling threshold for 4 different regions, unit in idx_cnt.
  3162. * low[i] = high[i-1] + 1
  3163. * high[i] = high[i-1] * ifs_th_times
  3164. */
  3165. ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
  3166. ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
  3167. ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
  3168. ifs_th0_us);
  3169. for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
  3170. ifs_th_l[i] = ifs_th_h[i - 1] + 1;
  3171. ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
  3172. ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
  3173. }
  3174. ifs_update_finished:
  3175. if (!is_update)
  3176. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3177. "No need to update IFS_TH\n");
  3178. return is_update;
  3179. }
  3180. static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
  3181. {
  3182. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3183. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3184. const struct rtw89_ccx_regs *ccx = phy->ccx;
  3185. u8 i = 0;
  3186. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
  3187. env->ifs_clm_th_l[0]);
  3188. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
  3189. env->ifs_clm_th_l[1]);
  3190. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
  3191. env->ifs_clm_th_l[2]);
  3192. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
  3193. env->ifs_clm_th_l[3]);
  3194. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
  3195. env->ifs_clm_th_h[0]);
  3196. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
  3197. env->ifs_clm_th_h[1]);
  3198. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
  3199. env->ifs_clm_th_h[2]);
  3200. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
  3201. env->ifs_clm_th_h[3]);
  3202. for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
  3203. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3204. "Update IFS_T%d_th{low, high} : {%d, %d}\n",
  3205. i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
  3206. }
  3207. static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
  3208. {
  3209. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3210. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3211. const struct rtw89_ccx_regs *ccx = phy->ccx;
  3212. struct rtw89_ccx_para_info para = {0};
  3213. env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
  3214. env->ifs_clm_mntr_time = 0;
  3215. para.ifs_clm_app = RTW89_IFS_CLM_INIT;
  3216. if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
  3217. rtw89_phy_ifs_clm_set_th_reg(rtwdev);
  3218. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true);
  3219. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true);
  3220. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true);
  3221. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true);
  3222. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true);
  3223. }
  3224. static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
  3225. enum rtw89_env_racing_lv level)
  3226. {
  3227. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3228. int ret = 0;
  3229. if (level >= RTW89_RAC_MAX_NUM) {
  3230. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3231. "[WARNING] Wrong LV=%d\n", level);
  3232. return -EINVAL;
  3233. }
  3234. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3235. "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
  3236. env->ccx_rac_lv, level);
  3237. if (env->ccx_ongoing) {
  3238. if (level <= env->ccx_rac_lv)
  3239. ret = -EINVAL;
  3240. else
  3241. env->ccx_ongoing = false;
  3242. }
  3243. if (ret == 0)
  3244. env->ccx_rac_lv = level;
  3245. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
  3246. !ret);
  3247. return ret;
  3248. }
  3249. static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
  3250. {
  3251. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3252. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3253. const struct rtw89_ccx_regs *ccx = phy->ccx;
  3254. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0);
  3255. rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0);
  3256. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1);
  3257. rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
  3258. env->ccx_ongoing = true;
  3259. }
  3260. static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
  3261. {
  3262. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3263. u8 i = 0;
  3264. u32 res = 0;
  3265. env->ifs_clm_tx_ratio =
  3266. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
  3267. env->ifs_clm_edcca_excl_cca_ratio =
  3268. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
  3269. PERCENT);
  3270. env->ifs_clm_cck_fa_ratio =
  3271. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
  3272. env->ifs_clm_ofdm_fa_ratio =
  3273. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
  3274. env->ifs_clm_cck_cca_excl_fa_ratio =
  3275. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
  3276. PERCENT);
  3277. env->ifs_clm_ofdm_cca_excl_fa_ratio =
  3278. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
  3279. PERCENT);
  3280. env->ifs_clm_cck_fa_permil =
  3281. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
  3282. env->ifs_clm_ofdm_fa_permil =
  3283. rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
  3284. for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
  3285. if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
  3286. env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
  3287. } else {
  3288. env->ifs_clm_ifs_avg[i] =
  3289. rtw89_phy_ccx_idx_to_us(rtwdev,
  3290. env->ifs_clm_avg[i]);
  3291. }
  3292. res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
  3293. res += env->ifs_clm_his[i] >> 1;
  3294. if (env->ifs_clm_his[i])
  3295. res /= env->ifs_clm_his[i];
  3296. else
  3297. res = 0;
  3298. env->ifs_clm_cca_avg[i] = res;
  3299. }
  3300. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3301. "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
  3302. env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
  3303. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3304. "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
  3305. env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
  3306. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3307. "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
  3308. env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
  3309. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3310. "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
  3311. env->ifs_clm_cck_cca_excl_fa_ratio,
  3312. env->ifs_clm_ofdm_cca_excl_fa_ratio);
  3313. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3314. "Time:[his, ifs_avg(us), cca_avg(us)]\n");
  3315. for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
  3316. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
  3317. i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
  3318. env->ifs_clm_cca_avg[i]);
  3319. }
  3320. static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
  3321. {
  3322. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3323. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3324. const struct rtw89_ccx_regs *ccx = phy->ccx;
  3325. u8 i = 0;
  3326. if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
  3327. ccx->ifs_cnt_done_mask) == 0) {
  3328. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3329. "Get IFS_CLM report Fail\n");
  3330. return false;
  3331. }
  3332. env->ifs_clm_tx =
  3333. rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
  3334. ccx->ifs_clm_tx_cnt_msk);
  3335. env->ifs_clm_edcca_excl_cca =
  3336. rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
  3337. ccx->ifs_clm_edcca_excl_cca_fa_mask);
  3338. env->ifs_clm_cckcca_excl_fa =
  3339. rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
  3340. ccx->ifs_clm_cckcca_excl_fa_mask);
  3341. env->ifs_clm_ofdmcca_excl_fa =
  3342. rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
  3343. ccx->ifs_clm_ofdmcca_excl_fa_mask);
  3344. env->ifs_clm_cckfa =
  3345. rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
  3346. ccx->ifs_clm_cck_fa_mask);
  3347. env->ifs_clm_ofdmfa =
  3348. rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
  3349. ccx->ifs_clm_ofdm_fa_mask);
  3350. env->ifs_clm_his[0] =
  3351. rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
  3352. ccx->ifs_t1_his_mask);
  3353. env->ifs_clm_his[1] =
  3354. rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
  3355. ccx->ifs_t2_his_mask);
  3356. env->ifs_clm_his[2] =
  3357. rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
  3358. ccx->ifs_t3_his_mask);
  3359. env->ifs_clm_his[3] =
  3360. rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
  3361. ccx->ifs_t4_his_mask);
  3362. env->ifs_clm_avg[0] =
  3363. rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
  3364. ccx->ifs_t1_avg_mask);
  3365. env->ifs_clm_avg[1] =
  3366. rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
  3367. ccx->ifs_t2_avg_mask);
  3368. env->ifs_clm_avg[2] =
  3369. rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
  3370. ccx->ifs_t3_avg_mask);
  3371. env->ifs_clm_avg[3] =
  3372. rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
  3373. ccx->ifs_t4_avg_mask);
  3374. env->ifs_clm_cca[0] =
  3375. rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
  3376. ccx->ifs_t1_cca_mask);
  3377. env->ifs_clm_cca[1] =
  3378. rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
  3379. ccx->ifs_t2_cca_mask);
  3380. env->ifs_clm_cca[2] =
  3381. rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
  3382. ccx->ifs_t3_cca_mask);
  3383. env->ifs_clm_cca[3] =
  3384. rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
  3385. ccx->ifs_t4_cca_mask);
  3386. env->ifs_clm_total_ifs =
  3387. rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
  3388. ccx->ifs_total_mask);
  3389. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
  3390. env->ifs_clm_total_ifs);
  3391. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3392. "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
  3393. env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
  3394. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3395. "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
  3396. env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
  3397. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3398. "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
  3399. env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
  3400. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
  3401. for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
  3402. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3403. "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
  3404. env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
  3405. rtw89_phy_ifs_clm_get_utility(rtwdev);
  3406. return true;
  3407. }
  3408. static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
  3409. struct rtw89_ccx_para_info *para)
  3410. {
  3411. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3412. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3413. const struct rtw89_ccx_regs *ccx = phy->ccx;
  3414. u32 period = 0;
  3415. u32 unit_idx = 0;
  3416. if (para->mntr_time == 0) {
  3417. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3418. "[WARN] MNTR_TIME is 0\n");
  3419. return -EINVAL;
  3420. }
  3421. if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
  3422. return -EINVAL;
  3423. if (para->mntr_time != env->ifs_clm_mntr_time) {
  3424. rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
  3425. &period, &unit_idx);
  3426. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
  3427. ccx->ifs_clm_period_mask, period);
  3428. rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
  3429. ccx->ifs_clm_cnt_unit_mask,
  3430. unit_idx);
  3431. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3432. "Update IFS-CLM time ((%d)) -> ((%d))\n",
  3433. env->ifs_clm_mntr_time, para->mntr_time);
  3434. env->ifs_clm_mntr_time = para->mntr_time;
  3435. env->ccx_period = (u16)period;
  3436. env->ccx_unit_idx = (u8)unit_idx;
  3437. }
  3438. if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
  3439. env->ifs_clm_app = para->ifs_clm_app;
  3440. rtw89_phy_ifs_clm_set_th_reg(rtwdev);
  3441. }
  3442. return 0;
  3443. }
  3444. void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
  3445. {
  3446. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3447. struct rtw89_ccx_para_info para = {0};
  3448. u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
  3449. env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
  3450. if (env->ccx_manual_ctrl) {
  3451. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3452. "CCX in manual ctrl\n");
  3453. return;
  3454. }
  3455. /* only ifs_clm for now */
  3456. if (rtw89_phy_ifs_clm_get_result(rtwdev))
  3457. env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
  3458. rtw89_phy_ccx_racing_release(rtwdev);
  3459. para.mntr_time = 1900;
  3460. para.rac_lv = RTW89_RAC_LV_1;
  3461. para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
  3462. if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
  3463. chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
  3464. if (chk_result)
  3465. rtw89_phy_ccx_trigger(rtwdev);
  3466. rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
  3467. "get_result=0x%x, chk_result:0x%x\n",
  3468. env->ccx_watchdog_result, chk_result);
  3469. }
  3470. static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
  3471. {
  3472. if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
  3473. *ie_page == RTW89_RSVD_9)
  3474. return false;
  3475. else if (*ie_page > RTW89_RSVD_9)
  3476. *ie_page -= 1;
  3477. return true;
  3478. }
  3479. static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
  3480. {
  3481. static const u8 ie_page_shift = 2;
  3482. return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
  3483. }
  3484. static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
  3485. enum rtw89_phy_status_bitmap ie_page)
  3486. {
  3487. u32 addr;
  3488. if (!rtw89_physts_ie_page_valid(&ie_page))
  3489. return 0;
  3490. addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
  3491. return rtw89_phy_read32(rtwdev, addr);
  3492. }
  3493. static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
  3494. enum rtw89_phy_status_bitmap ie_page,
  3495. u32 val)
  3496. {
  3497. const struct rtw89_chip_info *chip = rtwdev->chip;
  3498. u32 addr;
  3499. if (!rtw89_physts_ie_page_valid(&ie_page))
  3500. return;
  3501. if (chip->chip_id == RTL8852A)
  3502. val &= B_PHY_STS_BITMAP_MSK_52A;
  3503. addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
  3504. rtw89_phy_write32(rtwdev, addr, val);
  3505. }
  3506. static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev,
  3507. enum rtw89_phy_status_bitmap bitmap,
  3508. enum rtw89_phy_status_ie_type ie,
  3509. bool enable)
  3510. {
  3511. u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap);
  3512. if (enable)
  3513. val |= BIT(ie);
  3514. else
  3515. val &= ~BIT(ie);
  3516. rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val);
  3517. }
  3518. static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
  3519. bool enable,
  3520. enum rtw89_phy_idx phy_idx)
  3521. {
  3522. const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
  3523. const struct rtw89_physts_regs *physts = phy->physts;
  3524. if (enable) {
  3525. rtw89_phy_write32_clr(rtwdev, physts->setting_addr,
  3526. physts->dis_trigger_fail_mask);
  3527. rtw89_phy_write32_clr(rtwdev, physts->setting_addr,
  3528. physts->dis_trigger_brk_mask);
  3529. } else {
  3530. rtw89_phy_write32_set(rtwdev, physts->setting_addr,
  3531. physts->dis_trigger_fail_mask);
  3532. rtw89_phy_write32_set(rtwdev, physts->setting_addr,
  3533. physts->dis_trigger_brk_mask);
  3534. }
  3535. }
  3536. static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
  3537. {
  3538. u8 i;
  3539. rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
  3540. for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
  3541. if (i >= RTW89_CCK_PKT)
  3542. rtw89_physts_enable_ie_bitmap(rtwdev, i,
  3543. RTW89_PHYSTS_IE09_FTR_0,
  3544. true);
  3545. if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) ||
  3546. (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT))
  3547. continue;
  3548. rtw89_physts_enable_ie_bitmap(rtwdev, i,
  3549. RTW89_PHYSTS_IE24_OFDM_TD_PATH_A,
  3550. true);
  3551. }
  3552. rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT,
  3553. RTW89_PHYSTS_IE13_DL_MU_DEF, true);
  3554. rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT,
  3555. RTW89_PHYSTS_IE13_DL_MU_DEF, true);
  3556. /* force IE01 for channel index, only channel field is valid */
  3557. rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT,
  3558. RTW89_PHYSTS_IE01_CMN_OFDM, true);
  3559. }
  3560. static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
  3561. {
  3562. const struct rtw89_chip_info *chip = rtwdev->chip;
  3563. struct rtw89_dig_info *dig = &rtwdev->dig;
  3564. const struct rtw89_phy_dig_gain_cfg *cfg;
  3565. const char *msg;
  3566. u8 i;
  3567. s8 gain_base;
  3568. s8 *gain_arr;
  3569. u32 tmp;
  3570. switch (type) {
  3571. case RTW89_DIG_GAIN_LNA_G:
  3572. gain_arr = dig->lna_gain_g;
  3573. gain_base = LNA0_GAIN;
  3574. cfg = chip->dig_table->cfg_lna_g;
  3575. msg = "lna_gain_g";
  3576. break;
  3577. case RTW89_DIG_GAIN_TIA_G:
  3578. gain_arr = dig->tia_gain_g;
  3579. gain_base = TIA0_GAIN_G;
  3580. cfg = chip->dig_table->cfg_tia_g;
  3581. msg = "tia_gain_g";
  3582. break;
  3583. case RTW89_DIG_GAIN_LNA_A:
  3584. gain_arr = dig->lna_gain_a;
  3585. gain_base = LNA0_GAIN;
  3586. cfg = chip->dig_table->cfg_lna_a;
  3587. msg = "lna_gain_a";
  3588. break;
  3589. case RTW89_DIG_GAIN_TIA_A:
  3590. gain_arr = dig->tia_gain_a;
  3591. gain_base = TIA0_GAIN_A;
  3592. cfg = chip->dig_table->cfg_tia_a;
  3593. msg = "tia_gain_a";
  3594. break;
  3595. default:
  3596. return;
  3597. }
  3598. for (i = 0; i < cfg->size; i++) {
  3599. tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
  3600. cfg->table[i].mask);
  3601. tmp >>= DIG_GAIN_SHIFT;
  3602. gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
  3603. gain_base += DIG_GAIN;
  3604. rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
  3605. msg, i, gain_arr[i]);
  3606. }
  3607. }
  3608. static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
  3609. {
  3610. struct rtw89_dig_info *dig = &rtwdev->dig;
  3611. u32 tmp;
  3612. u8 i;
  3613. if (!rtwdev->hal.support_igi)
  3614. return;
  3615. tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
  3616. B_PATH0_IB_PKPW_MSK);
  3617. dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
  3618. dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
  3619. B_PATH0_IB_PBK_MSK);
  3620. rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
  3621. dig->ib_pkpwr, dig->ib_pbk);
  3622. for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
  3623. rtw89_phy_dig_read_gain_table(rtwdev, i);
  3624. }
  3625. static const u8 rssi_nolink = 22;
  3626. static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
  3627. static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
  3628. static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
  3629. static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
  3630. static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
  3631. {
  3632. struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
  3633. struct rtw89_dig_info *dig = &rtwdev->dig;
  3634. bool is_linked = rtwdev->total_sta_assoc > 0;
  3635. if (is_linked) {
  3636. dig->igi_rssi = ch_info->rssi_min >> 1;
  3637. } else {
  3638. rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
  3639. dig->igi_rssi = rssi_nolink;
  3640. }
  3641. }
  3642. static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
  3643. {
  3644. struct rtw89_dig_info *dig = &rtwdev->dig;
  3645. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3646. bool is_linked = rtwdev->total_sta_assoc > 0;
  3647. const u16 *fa_th_src = NULL;
  3648. switch (chan->band_type) {
  3649. case RTW89_BAND_2G:
  3650. dig->lna_gain = dig->lna_gain_g;
  3651. dig->tia_gain = dig->tia_gain_g;
  3652. fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
  3653. dig->force_gaincode_idx_en = false;
  3654. dig->dyn_pd_th_en = true;
  3655. break;
  3656. case RTW89_BAND_5G:
  3657. default:
  3658. dig->lna_gain = dig->lna_gain_a;
  3659. dig->tia_gain = dig->tia_gain_a;
  3660. fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
  3661. dig->force_gaincode_idx_en = true;
  3662. dig->dyn_pd_th_en = true;
  3663. break;
  3664. }
  3665. memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
  3666. memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
  3667. }
  3668. static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
  3669. static const u8 igi_max_performance_mode = 0x5a;
  3670. static const u8 dynamic_pd_threshold_max;
  3671. static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
  3672. {
  3673. struct rtw89_dig_info *dig = &rtwdev->dig;
  3674. dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
  3675. dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
  3676. dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
  3677. dig->force_gaincode.lna_idx = LNA_IDX_MAX;
  3678. dig->force_gaincode.tia_idx = TIA_IDX_MAX;
  3679. dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
  3680. dig->dyn_igi_max = igi_max_performance_mode;
  3681. dig->dyn_igi_min = dynamic_igi_min;
  3682. dig->dyn_pd_th_max = dynamic_pd_threshold_max;
  3683. dig->pd_low_th_ofst = pd_low_th_offset;
  3684. dig->is_linked_pre = false;
  3685. }
  3686. static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
  3687. {
  3688. rtw89_phy_dig_update_gain_para(rtwdev);
  3689. rtw89_phy_dig_reset(rtwdev);
  3690. }
  3691. static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
  3692. {
  3693. struct rtw89_dig_info *dig = &rtwdev->dig;
  3694. u8 lna_idx;
  3695. if (rssi < dig->igi_rssi_th[0])
  3696. lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
  3697. else if (rssi < dig->igi_rssi_th[1])
  3698. lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
  3699. else if (rssi < dig->igi_rssi_th[2])
  3700. lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
  3701. else if (rssi < dig->igi_rssi_th[3])
  3702. lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
  3703. else if (rssi < dig->igi_rssi_th[4])
  3704. lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
  3705. else
  3706. lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
  3707. return lna_idx;
  3708. }
  3709. static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
  3710. {
  3711. struct rtw89_dig_info *dig = &rtwdev->dig;
  3712. u8 tia_idx;
  3713. if (rssi < dig->igi_rssi_th[0])
  3714. tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
  3715. else
  3716. tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
  3717. return tia_idx;
  3718. }
  3719. #define IB_PBK_BASE 110
  3720. #define WB_RSSI_BASE 10
  3721. static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
  3722. struct rtw89_agc_gaincode_set *set)
  3723. {
  3724. struct rtw89_dig_info *dig = &rtwdev->dig;
  3725. s8 lna_gain = dig->lna_gain[set->lna_idx];
  3726. s8 tia_gain = dig->tia_gain[set->tia_idx];
  3727. s32 wb_rssi = rssi + lna_gain + tia_gain;
  3728. s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
  3729. u8 rxb_idx;
  3730. rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
  3731. rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
  3732. rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
  3733. wb_rssi, rxb_idx_tmp);
  3734. return rxb_idx;
  3735. }
  3736. static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
  3737. struct rtw89_agc_gaincode_set *set)
  3738. {
  3739. set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
  3740. set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
  3741. set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
  3742. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3743. "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
  3744. rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
  3745. }
  3746. #define IGI_OFFSET_MAX 25
  3747. #define IGI_OFFSET_MUL 2
  3748. static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
  3749. {
  3750. struct rtw89_dig_info *dig = &rtwdev->dig;
  3751. struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
  3752. enum rtw89_dig_noisy_level noisy_lv;
  3753. u8 igi_offset = dig->fa_rssi_ofst;
  3754. u16 fa_ratio = 0;
  3755. fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
  3756. if (fa_ratio < dig->fa_th[0])
  3757. noisy_lv = RTW89_DIG_NOISY_LEVEL0;
  3758. else if (fa_ratio < dig->fa_th[1])
  3759. noisy_lv = RTW89_DIG_NOISY_LEVEL1;
  3760. else if (fa_ratio < dig->fa_th[2])
  3761. noisy_lv = RTW89_DIG_NOISY_LEVEL2;
  3762. else if (fa_ratio < dig->fa_th[3])
  3763. noisy_lv = RTW89_DIG_NOISY_LEVEL3;
  3764. else
  3765. noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
  3766. if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
  3767. igi_offset = 0;
  3768. else
  3769. igi_offset += noisy_lv * IGI_OFFSET_MUL;
  3770. igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
  3771. dig->fa_rssi_ofst = igi_offset;
  3772. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3773. "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
  3774. dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
  3775. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3776. "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
  3777. env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
  3778. env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
  3779. noisy_lv, igi_offset);
  3780. }
  3781. static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
  3782. {
  3783. const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
  3784. rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
  3785. dig_regs->p0_lna_init.mask, lna_idx);
  3786. rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
  3787. dig_regs->p1_lna_init.mask, lna_idx);
  3788. }
  3789. static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
  3790. {
  3791. const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
  3792. rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
  3793. dig_regs->p0_tia_init.mask, tia_idx);
  3794. rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
  3795. dig_regs->p1_tia_init.mask, tia_idx);
  3796. }
  3797. static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
  3798. {
  3799. const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
  3800. rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
  3801. dig_regs->p0_rxb_init.mask, rxb_idx);
  3802. rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
  3803. dig_regs->p1_rxb_init.mask, rxb_idx);
  3804. }
  3805. static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
  3806. const struct rtw89_agc_gaincode_set set)
  3807. {
  3808. rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
  3809. rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
  3810. rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
  3811. rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
  3812. set.lna_idx, set.tia_idx, set.rxb_idx);
  3813. }
  3814. static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
  3815. bool enable)
  3816. {
  3817. const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
  3818. rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
  3819. dig_regs->p0_p20_pagcugc_en.mask, enable);
  3820. rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
  3821. dig_regs->p0_s20_pagcugc_en.mask, enable);
  3822. rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
  3823. dig_regs->p1_p20_pagcugc_en.mask, enable);
  3824. rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
  3825. dig_regs->p1_s20_pagcugc_en.mask, enable);
  3826. rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
  3827. }
  3828. static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
  3829. {
  3830. struct rtw89_dig_info *dig = &rtwdev->dig;
  3831. if (!rtwdev->hal.support_igi)
  3832. return;
  3833. if (dig->force_gaincode_idx_en) {
  3834. rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
  3835. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3836. "Force gaincode index enabled.\n");
  3837. } else {
  3838. rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
  3839. &dig->cur_gaincode);
  3840. rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
  3841. }
  3842. }
  3843. static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
  3844. bool enable)
  3845. {
  3846. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3847. const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
  3848. enum rtw89_bandwidth cbw = chan->band_width;
  3849. struct rtw89_dig_info *dig = &rtwdev->dig;
  3850. u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
  3851. u8 ofdm_cca_th;
  3852. s8 cck_cca_th;
  3853. u32 pd_val = 0;
  3854. under_region += PD_TH_SB_FLTR_CMP_VAL;
  3855. switch (cbw) {
  3856. case RTW89_CHANNEL_WIDTH_40:
  3857. under_region += PD_TH_BW40_CMP_VAL;
  3858. break;
  3859. case RTW89_CHANNEL_WIDTH_80:
  3860. under_region += PD_TH_BW80_CMP_VAL;
  3861. break;
  3862. case RTW89_CHANNEL_WIDTH_160:
  3863. under_region += PD_TH_BW160_CMP_VAL;
  3864. break;
  3865. case RTW89_CHANNEL_WIDTH_20:
  3866. fallthrough;
  3867. default:
  3868. under_region += PD_TH_BW20_CMP_VAL;
  3869. break;
  3870. }
  3871. dig->dyn_pd_th_max = dig->igi_rssi;
  3872. final_rssi = min_t(u8, rssi, dig->igi_rssi);
  3873. ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
  3874. PD_TH_MAX_RSSI + under_region);
  3875. if (enable) {
  3876. pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
  3877. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3878. "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
  3879. final_rssi, ofdm_cca_th, under_region, pd_val);
  3880. } else {
  3881. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3882. "Dynamic PD th disabled, Set PD_low_bd=0\n");
  3883. }
  3884. rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
  3885. dig_regs->pd_lower_bound_mask, pd_val);
  3886. rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
  3887. dig_regs->pd_spatial_reuse_en, enable);
  3888. if (!rtwdev->hal.support_cckpd)
  3889. return;
  3890. cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
  3891. pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
  3892. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3893. "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
  3894. final_rssi, cck_cca_th, under_region, pd_val);
  3895. rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg,
  3896. dig_regs->bmode_cca_rssi_limit_en, enable);
  3897. rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
  3898. dig_regs->bmode_rssi_nocca_low_th_mask, pd_val);
  3899. }
  3900. void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
  3901. {
  3902. struct rtw89_dig_info *dig = &rtwdev->dig;
  3903. dig->bypass_dig = false;
  3904. rtw89_phy_dig_para_reset(rtwdev);
  3905. rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
  3906. rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
  3907. rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
  3908. rtw89_phy_dig_update_para(rtwdev);
  3909. }
  3910. #define IGI_RSSI_MIN 10
  3911. void rtw89_phy_dig(struct rtw89_dev *rtwdev)
  3912. {
  3913. struct rtw89_dig_info *dig = &rtwdev->dig;
  3914. bool is_linked = rtwdev->total_sta_assoc > 0;
  3915. if (unlikely(dig->bypass_dig)) {
  3916. dig->bypass_dig = false;
  3917. return;
  3918. }
  3919. if (!dig->is_linked_pre && is_linked) {
  3920. rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
  3921. rtw89_phy_dig_update_para(rtwdev);
  3922. } else if (dig->is_linked_pre && !is_linked) {
  3923. rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
  3924. rtw89_phy_dig_update_para(rtwdev);
  3925. }
  3926. dig->is_linked_pre = is_linked;
  3927. rtw89_phy_dig_igi_offset_by_env(rtwdev);
  3928. rtw89_phy_dig_update_rssi_info(rtwdev);
  3929. dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
  3930. dig->igi_rssi - IGI_RSSI_MIN : 0;
  3931. dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
  3932. dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
  3933. dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
  3934. dig->dyn_igi_max);
  3935. rtw89_debug(rtwdev, RTW89_DBG_DIG,
  3936. "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
  3937. dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
  3938. dig->igi_fa_rssi);
  3939. rtw89_phy_dig_config_igi(rtwdev);
  3940. rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
  3941. if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
  3942. rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
  3943. else
  3944. rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
  3945. }
  3946. static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
  3947. {
  3948. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  3949. struct rtw89_dev *rtwdev = rtwsta->rtwdev;
  3950. struct rtw89_vif *rtwvif = rtwsta->rtwvif;
  3951. struct rtw89_hal *hal = &rtwdev->hal;
  3952. bool *done = data;
  3953. u8 rssi_a, rssi_b;
  3954. u32 candidate;
  3955. if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
  3956. return;
  3957. if (*done)
  3958. return;
  3959. *done = true;
  3960. rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
  3961. rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
  3962. if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
  3963. candidate = RF_A;
  3964. else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
  3965. candidate = RF_B;
  3966. else
  3967. return;
  3968. if (hal->antenna_tx == candidate)
  3969. return;
  3970. hal->antenna_tx = candidate;
  3971. rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
  3972. if (hal->antenna_tx == RF_A) {
  3973. rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
  3974. rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
  3975. } else if (hal->antenna_tx == RF_B) {
  3976. rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
  3977. rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
  3978. }
  3979. }
  3980. void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
  3981. {
  3982. struct rtw89_hal *hal = &rtwdev->hal;
  3983. bool done = false;
  3984. if (!hal->tx_path_diversity)
  3985. return;
  3986. ieee80211_iterate_stations_atomic(rtwdev->hw,
  3987. rtw89_phy_tx_path_div_sta_iter,
  3988. &done);
  3989. }
  3990. #define ANTDIV_MAIN 0
  3991. #define ANTDIV_AUX 1
  3992. static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
  3993. {
  3994. struct rtw89_hal *hal = &rtwdev->hal;
  3995. u8 default_ant, optional_ant;
  3996. if (!hal->ant_diversity || hal->antenna_tx == 0)
  3997. return;
  3998. if (hal->antenna_tx == RF_B) {
  3999. default_ant = ANTDIV_AUX;
  4000. optional_ant = ANTDIV_MAIN;
  4001. } else {
  4002. default_ant = ANTDIV_MAIN;
  4003. optional_ant = ANTDIV_AUX;
  4004. }
  4005. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
  4006. default_ant, RTW89_PHY_0);
  4007. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
  4008. default_ant, RTW89_PHY_0);
  4009. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
  4010. optional_ant, RTW89_PHY_0);
  4011. rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
  4012. default_ant, RTW89_PHY_0);
  4013. }
  4014. static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
  4015. {
  4016. struct rtw89_hal *hal = &rtwdev->hal;
  4017. hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
  4018. hal->antenna_tx = hal->antenna_rx;
  4019. }
  4020. static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
  4021. {
  4022. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  4023. struct rtw89_hal *hal = &rtwdev->hal;
  4024. bool no_change = false;
  4025. u8 main_rssi, aux_rssi;
  4026. u8 main_evm, aux_evm;
  4027. u32 candidate;
  4028. antdiv->get_stats = false;
  4029. antdiv->training_count = 0;
  4030. main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
  4031. main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
  4032. aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
  4033. aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
  4034. if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
  4035. candidate = RF_A;
  4036. else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
  4037. candidate = RF_B;
  4038. else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
  4039. candidate = RF_A;
  4040. else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
  4041. candidate = RF_B;
  4042. else
  4043. no_change = true;
  4044. if (no_change) {
  4045. /* swap back from training antenna to original */
  4046. rtw89_phy_swap_hal_antenna(rtwdev);
  4047. return;
  4048. }
  4049. hal->antenna_tx = candidate;
  4050. hal->antenna_rx = candidate;
  4051. }
  4052. static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
  4053. {
  4054. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  4055. u64 state_period;
  4056. if (antdiv->training_count % 2 == 0) {
  4057. if (antdiv->training_count == 0)
  4058. rtw89_phy_antdiv_sts_reset(rtwdev);
  4059. antdiv->get_stats = true;
  4060. state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
  4061. } else {
  4062. antdiv->get_stats = false;
  4063. state_period = msecs_to_jiffies(ANTDIV_DELAY);
  4064. rtw89_phy_swap_hal_antenna(rtwdev);
  4065. rtw89_phy_antdiv_set_ant(rtwdev);
  4066. }
  4067. antdiv->training_count++;
  4068. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
  4069. state_period);
  4070. }
  4071. void rtw89_phy_antdiv_work(struct work_struct *work)
  4072. {
  4073. struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
  4074. antdiv_work.work);
  4075. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  4076. mutex_lock(&rtwdev->mutex);
  4077. if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
  4078. rtw89_phy_antdiv_training_state(rtwdev);
  4079. } else {
  4080. rtw89_phy_antdiv_decision_state(rtwdev);
  4081. rtw89_phy_antdiv_set_ant(rtwdev);
  4082. }
  4083. mutex_unlock(&rtwdev->mutex);
  4084. }
  4085. void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
  4086. {
  4087. struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
  4088. struct rtw89_hal *hal = &rtwdev->hal;
  4089. u8 rssi, rssi_pre;
  4090. if (!hal->ant_diversity || hal->ant_diversity_fixed)
  4091. return;
  4092. rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
  4093. rssi_pre = antdiv->rssi_pre;
  4094. antdiv->rssi_pre = rssi;
  4095. rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
  4096. if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
  4097. return;
  4098. antdiv->training_count = 0;
  4099. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
  4100. }
  4101. static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
  4102. {
  4103. rtw89_phy_ccx_top_setting_init(rtwdev);
  4104. rtw89_phy_ifs_clm_setting_init(rtwdev);
  4105. }
  4106. void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
  4107. {
  4108. rtw89_phy_stat_init(rtwdev);
  4109. rtw89_chip_bb_sethw(rtwdev);
  4110. rtw89_phy_env_monitor_init(rtwdev);
  4111. rtw89_physts_parsing_init(rtwdev);
  4112. rtw89_phy_dig_init(rtwdev);
  4113. rtw89_phy_cfo_init(rtwdev);
  4114. rtw89_phy_ul_tb_info_init(rtwdev);
  4115. rtw89_phy_antdiv_init(rtwdev);
  4116. rtw89_chip_rfe_gpio(rtwdev);
  4117. rtw89_phy_antdiv_set_ant(rtwdev);
  4118. rtw89_phy_init_rf_nctl(rtwdev);
  4119. rtw89_chip_rfk_init(rtwdev);
  4120. rtw89_chip_set_txpwr_ctrl(rtwdev);
  4121. rtw89_chip_power_trim(rtwdev);
  4122. rtw89_chip_cfg_txrx_path(rtwdev);
  4123. }
  4124. void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
  4125. {
  4126. const struct rtw89_chip_info *chip = rtwdev->chip;
  4127. enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
  4128. u8 bss_color;
  4129. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  4130. if (!vif->bss_conf.he_support || !vif->cfg.assoc)
  4131. #else
  4132. if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
  4133. #endif
  4134. return;
  4135. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) && SUSE == 0
  4136. bss_color = vif->bss_conf.bss_color;
  4137. #else
  4138. bss_color = vif->bss_conf.he_bss_color.color;
  4139. #endif
  4140. rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1,
  4141. phy_idx);
  4142. rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
  4143. bss_color, phy_idx);
  4144. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  4145. rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
  4146. vif->cfg.aid, phy_idx);
  4147. #else
  4148. rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
  4149. vif->bss_conf.aid, phy_idx);
  4150. #endif
  4151. }
  4152. static void
  4153. _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
  4154. {
  4155. rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
  4156. }
  4157. static void
  4158. _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
  4159. {
  4160. rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
  4161. }
  4162. static void
  4163. _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
  4164. {
  4165. rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
  4166. }
  4167. static void
  4168. _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
  4169. {
  4170. rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
  4171. }
  4172. static void
  4173. _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
  4174. {
  4175. udelay(def->data);
  4176. }
  4177. static void
  4178. (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
  4179. [RTW89_RFK_F_WRF] = _rfk_write_rf,
  4180. [RTW89_RFK_F_WM] = _rfk_write32_mask,
  4181. [RTW89_RFK_F_WS] = _rfk_write32_set,
  4182. [RTW89_RFK_F_WC] = _rfk_write32_clr,
  4183. [RTW89_RFK_F_DELAY] = _rfk_delay,
  4184. };
  4185. static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
  4186. void
  4187. rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
  4188. {
  4189. const struct rtw89_reg5_def *p = tbl->defs;
  4190. const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
  4191. for (; p < end; p++)
  4192. _rfk_handler[p->flag](rtwdev, p);
  4193. }
  4194. EXPORT_SYMBOL(rtw89_rfk_parser);
  4195. #define RTW89_TSSI_FAST_MODE_NUM 4
  4196. static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
  4197. {0xD934, 0xff0000},
  4198. {0xD934, 0xff000000},
  4199. {0xD938, 0xff},
  4200. {0xD934, 0xff00},
  4201. };
  4202. static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
  4203. {0xD930, 0xff0000},
  4204. {0xD930, 0xff000000},
  4205. {0xD934, 0xff},
  4206. {0xD930, 0xff00},
  4207. };
  4208. static
  4209. void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
  4210. enum rtw89_mac_idx mac_idx,
  4211. enum rtw89_tssi_bandedge_cfg bandedge_cfg,
  4212. u32 val)
  4213. {
  4214. const struct rtw89_reg_def *regs;
  4215. u32 reg;
  4216. int i;
  4217. if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
  4218. regs = rtw89_tssi_fastmode_regs_flat;
  4219. else
  4220. regs = rtw89_tssi_fastmode_regs_level;
  4221. for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
  4222. reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
  4223. rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
  4224. }
  4225. }
  4226. static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
  4227. {0xD91C, 0xff000000},
  4228. {0xD920, 0xff},
  4229. {0xD920, 0xff00},
  4230. {0xD920, 0xff0000},
  4231. {0xD920, 0xff000000},
  4232. {0xD924, 0xff},
  4233. {0xD924, 0xff00},
  4234. {0xD914, 0xff000000},
  4235. {0xD918, 0xff},
  4236. {0xD918, 0xff00},
  4237. {0xD918, 0xff0000},
  4238. {0xD918, 0xff000000},
  4239. {0xD91C, 0xff},
  4240. {0xD91C, 0xff00},
  4241. {0xD91C, 0xff0000},
  4242. };
  4243. static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
  4244. {0xD910, 0xff},
  4245. {0xD910, 0xff00},
  4246. {0xD910, 0xff0000},
  4247. {0xD910, 0xff000000},
  4248. {0xD914, 0xff},
  4249. {0xD914, 0xff00},
  4250. {0xD914, 0xff0000},
  4251. {0xD908, 0xff},
  4252. {0xD908, 0xff00},
  4253. {0xD908, 0xff0000},
  4254. {0xD908, 0xff000000},
  4255. {0xD90C, 0xff},
  4256. {0xD90C, 0xff00},
  4257. {0xD90C, 0xff0000},
  4258. {0xD90C, 0xff000000},
  4259. };
  4260. void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
  4261. enum rtw89_mac_idx mac_idx,
  4262. enum rtw89_tssi_bandedge_cfg bandedge_cfg)
  4263. {
  4264. const struct rtw89_chip_info *chip = rtwdev->chip;
  4265. const struct rtw89_reg_def *regs;
  4266. const u32 *data;
  4267. u32 reg;
  4268. int i;
  4269. if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
  4270. return;
  4271. if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
  4272. regs = rtw89_tssi_bandedge_regs_flat;
  4273. else
  4274. regs = rtw89_tssi_bandedge_regs_level;
  4275. data = chip->tssi_dbw_table->data[bandedge_cfg];
  4276. for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
  4277. reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
  4278. rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
  4279. }
  4280. reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx);
  4281. rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
  4282. rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
  4283. data[RTW89_TSSI_SBW20]);
  4284. }
  4285. EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
  4286. static
  4287. const u8 rtw89_ch_base_table[16] = {1, 0xff,
  4288. 36, 100, 132, 149, 0xff,
  4289. 1, 33, 65, 97, 129, 161, 193, 225, 0xff};
  4290. #define RTW89_CH_BASE_IDX_2G 0
  4291. #define RTW89_CH_BASE_IDX_5G_FIRST 2
  4292. #define RTW89_CH_BASE_IDX_5G_LAST 5
  4293. #define RTW89_CH_BASE_IDX_6G_FIRST 7
  4294. #define RTW89_CH_BASE_IDX_6G_LAST 14
  4295. #define RTW89_CH_BASE_IDX_MASK GENMASK(7, 4)
  4296. #define RTW89_CH_OFFSET_MASK GENMASK(3, 0)
  4297. u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
  4298. {
  4299. u8 chan_idx;
  4300. u8 last, first;
  4301. u8 idx;
  4302. switch (band) {
  4303. case RTW89_BAND_2G:
  4304. chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
  4305. FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
  4306. return chan_idx;
  4307. case RTW89_BAND_5G:
  4308. first = RTW89_CH_BASE_IDX_5G_FIRST;
  4309. last = RTW89_CH_BASE_IDX_5G_LAST;
  4310. break;
  4311. case RTW89_BAND_6G:
  4312. first = RTW89_CH_BASE_IDX_6G_FIRST;
  4313. last = RTW89_CH_BASE_IDX_6G_LAST;
  4314. break;
  4315. default:
  4316. rtw89_warn(rtwdev, "Unsupported band %d\n", band);
  4317. return 0;
  4318. }
  4319. for (idx = last; idx >= first; idx--)
  4320. if (central_ch >= rtw89_ch_base_table[idx])
  4321. break;
  4322. if (idx < first) {
  4323. rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
  4324. return 0;
  4325. }
  4326. chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
  4327. FIELD_PREP(RTW89_CH_OFFSET_MASK,
  4328. (central_ch - rtw89_ch_base_table[idx]) >> 1);
  4329. return chan_idx;
  4330. }
  4331. EXPORT_SYMBOL(rtw89_encode_chan_idx);
  4332. void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
  4333. u8 *ch, enum nl80211_band *band)
  4334. {
  4335. u8 idx, offset;
  4336. idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
  4337. offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
  4338. if (idx == RTW89_CH_BASE_IDX_2G) {
  4339. *band = NL80211_BAND_2GHZ;
  4340. *ch = offset;
  4341. return;
  4342. }
  4343. *band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
  4344. *ch = rtw89_ch_base_table[idx] + (offset << 1);
  4345. }
  4346. EXPORT_SYMBOL(rtw89_decode_chan_idx);
  4347. #define EDCCA_DEFAULT 249
  4348. void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
  4349. {
  4350. u32 reg = rtwdev->chip->edcca_lvl_reg;
  4351. struct rtw89_hal *hal = &rtwdev->hal;
  4352. u32 val;
  4353. if (scan) {
  4354. hal->edcca_bak = rtw89_phy_read32(rtwdev, reg);
  4355. val = hal->edcca_bak;
  4356. u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_A_MSK);
  4357. u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_P_MSK);
  4358. u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_PPDU_LVL_MSK);
  4359. rtw89_phy_write32(rtwdev, reg, val);
  4360. } else {
  4361. rtw89_phy_write32(rtwdev, reg, hal->edcca_bak);
  4362. }
  4363. }
  4364. static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
  4365. .setting_addr = R_CCX,
  4366. .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
  4367. .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
  4368. .trig_opt_mask = B_CCX_TRIG_OPT_MSK,
  4369. .en_mask = B_CCX_EN_MSK,
  4370. .ifs_cnt_addr = R_IFS_COUNTER,
  4371. .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
  4372. .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
  4373. .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
  4374. .ifs_collect_en_mask = B_IFS_COLLECT_EN,
  4375. .ifs_t1_addr = R_IFS_T1,
  4376. .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
  4377. .ifs_t1_en_mask = B_IFS_T1_EN_MSK,
  4378. .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
  4379. .ifs_t2_addr = R_IFS_T2,
  4380. .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
  4381. .ifs_t2_en_mask = B_IFS_T2_EN_MSK,
  4382. .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
  4383. .ifs_t3_addr = R_IFS_T3,
  4384. .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
  4385. .ifs_t3_en_mask = B_IFS_T3_EN_MSK,
  4386. .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
  4387. .ifs_t4_addr = R_IFS_T4,
  4388. .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
  4389. .ifs_t4_en_mask = B_IFS_T4_EN_MSK,
  4390. .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
  4391. .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT,
  4392. .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
  4393. .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
  4394. .ifs_clm_cca_addr = R_IFS_CLM_CCA,
  4395. .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
  4396. .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
  4397. .ifs_clm_fa_addr = R_IFS_CLM_FA,
  4398. .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
  4399. .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
  4400. .ifs_his_addr = R_IFS_HIS,
  4401. .ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
  4402. .ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
  4403. .ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
  4404. .ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
  4405. .ifs_avg_l_addr = R_IFS_AVG_L,
  4406. .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
  4407. .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
  4408. .ifs_avg_h_addr = R_IFS_AVG_H,
  4409. .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
  4410. .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
  4411. .ifs_cca_l_addr = R_IFS_CCA_L,
  4412. .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
  4413. .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
  4414. .ifs_cca_h_addr = R_IFS_CCA_H,
  4415. .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
  4416. .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
  4417. .ifs_total_addr = R_IFSCNT,
  4418. .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
  4419. .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
  4420. };
  4421. static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
  4422. .setting_addr = R_PLCP_HISTOGRAM,
  4423. .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
  4424. .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
  4425. };
  4426. const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
  4427. .cr_base = 0x10000,
  4428. .ccx = &rtw89_ccx_regs_ax,
  4429. .physts = &rtw89_physts_regs_ax,
  4430. };
  4431. EXPORT_SYMBOL(rtw89_phy_gen_ax);