rtw8852a_rfk.c 121 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include "coex.h"
  5. #include "debug.h"
  6. #include "mac.h"
  7. #include "phy.h"
  8. #include "reg.h"
  9. #include "rtw8852a.h"
  10. #include "rtw8852a_rfk.h"
  11. #include "rtw8852a_rfk_table.h"
  12. #include "rtw8852a_table.h"
  13. static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  14. {
  15. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
  16. rtwdev->dbcc_en, phy_idx);
  17. if (!rtwdev->dbcc_en)
  18. return RF_AB;
  19. if (phy_idx == RTW89_PHY_0)
  20. return RF_A;
  21. else
  22. return RF_B;
  23. }
  24. static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0};
  25. static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
  26. #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs)
  27. #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs)
  28. static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
  29. {
  30. u32 i;
  31. for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  32. backup_bb_reg_val[i] =
  33. rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
  34. MASKDWORD);
  35. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  36. "[IQK]backup bb reg : %x, value =%x\n",
  37. rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
  38. }
  39. }
  40. static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
  41. u8 rf_path)
  42. {
  43. u32 i;
  44. for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  45. backup_rf_reg_val[i] =
  46. rtw89_read_rf(rtwdev, rf_path,
  47. rtw8852a_backup_rf_regs[i], RFREG_MASK);
  48. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  49. "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
  50. rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
  51. }
  52. }
  53. static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
  54. u32 backup_bb_reg_val[])
  55. {
  56. u32 i;
  57. for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  58. rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
  59. MASKDWORD, backup_bb_reg_val[i]);
  60. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  61. "[IQK]restore bb reg : %x, value =%x\n",
  62. rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
  63. }
  64. }
  65. static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
  66. u32 backup_rf_reg_val[], u8 rf_path)
  67. {
  68. u32 i;
  69. for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  70. rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i],
  71. RFREG_MASK, backup_rf_reg_val[i]);
  72. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  73. "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
  74. rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
  75. }
  76. }
  77. static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
  78. {
  79. u8 path;
  80. u32 rf_mode;
  81. int ret;
  82. for (path = 0; path < RF_PATH_MAX; path++) {
  83. if (!(kpath & BIT(path)))
  84. continue;
  85. ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
  86. 2, 5000, false, rtwdev, path, 0x00,
  87. RR_MOD_MASK);
  88. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  89. "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
  90. path, ret);
  91. }
  92. }
  93. static void _dack_dump(struct rtw89_dev *rtwdev)
  94. {
  95. struct rtw89_dack_info *dack = &rtwdev->dack;
  96. u8 i;
  97. u8 t;
  98. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  99. "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  100. dack->addck_d[0][0], dack->addck_d[0][1]);
  101. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  102. "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  103. dack->addck_d[1][0], dack->addck_d[1][1]);
  104. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  105. "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  106. dack->dadck_d[0][0], dack->dadck_d[0][1]);
  107. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  108. "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  109. dack->dadck_d[1][0], dack->dadck_d[1][1]);
  110. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  111. "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
  112. dack->biask_d[0][0], dack->biask_d[0][1]);
  113. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  114. "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
  115. dack->biask_d[1][0], dack->biask_d[1][1]);
  116. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
  117. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  118. t = dack->msbk_d[0][0][i];
  119. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  120. }
  121. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
  122. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  123. t = dack->msbk_d[0][1][i];
  124. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  125. }
  126. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
  127. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  128. t = dack->msbk_d[1][0][i];
  129. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  130. }
  131. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
  132. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  133. t = dack->msbk_d[1][1][i];
  134. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  135. }
  136. }
  137. static void _afe_init(struct rtw89_dev *rtwdev)
  138. {
  139. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl);
  140. }
  141. static void _addck_backup(struct rtw89_dev *rtwdev)
  142. {
  143. struct rtw89_dack_info *dack = &rtwdev->dack;
  144. rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL);
  145. dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
  146. B_S0_ADDCK_Q);
  147. dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
  148. B_S0_ADDCK_I);
  149. rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL);
  150. dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
  151. B_S1_ADDCK_Q);
  152. dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
  153. B_S1_ADDCK_I);
  154. }
  155. static void _addck_reload(struct rtw89_dev *rtwdev)
  156. {
  157. struct rtw89_dack_info *dack = &rtwdev->dack;
  158. rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]);
  159. rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2,
  160. (dack->addck_d[0][1] >> 6));
  161. rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q,
  162. (dack->addck_d[0][1] & 0x3f));
  163. rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN);
  164. rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]);
  165. rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2,
  166. (dack->addck_d[1][1] >> 6));
  167. rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q,
  168. (dack->addck_d[1][1] & 0x3f));
  169. rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN);
  170. }
  171. static void _dack_backup_s0(struct rtw89_dev *rtwdev)
  172. {
  173. struct rtw89_dack_info *dack = &rtwdev->dack;
  174. u8 i;
  175. rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN);
  176. rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN);
  177. rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
  178. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  179. rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i);
  180. dack->msbk_d[0][0][i] =
  181. (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K);
  182. rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i);
  183. dack->msbk_d[0][1][i] =
  184. (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K);
  185. }
  186. dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2,
  187. B_S0_DACKI2_K);
  188. dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2,
  189. B_S0_DACKQ2_K);
  190. dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8,
  191. B_S0_DACKI8_K) - 8;
  192. dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8,
  193. B_S0_DACKQ8_K) - 8;
  194. }
  195. static void _dack_backup_s1(struct rtw89_dev *rtwdev)
  196. {
  197. struct rtw89_dack_info *dack = &rtwdev->dack;
  198. u8 i;
  199. rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN);
  200. rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN);
  201. rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
  202. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  203. rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i);
  204. dack->msbk_d[1][0][i] =
  205. (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K);
  206. rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i);
  207. dack->msbk_d[1][1][i] =
  208. (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K);
  209. }
  210. dack->biask_d[1][0] =
  211. (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K);
  212. dack->biask_d[1][1] =
  213. (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K);
  214. dack->dadck_d[1][0] =
  215. (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8;
  216. dack->dadck_d[1][1] =
  217. (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8;
  218. }
  219. static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
  220. enum rtw89_rf_path path, u8 index)
  221. {
  222. struct rtw89_dack_info *dack = &rtwdev->dack;
  223. u32 tmp = 0, tmp_offset, tmp_reg;
  224. u8 i;
  225. u32 idx_offset, path_offset;
  226. if (index == 0)
  227. idx_offset = 0;
  228. else
  229. idx_offset = 0x50;
  230. if (path == RF_PATH_A)
  231. path_offset = 0;
  232. else
  233. path_offset = 0x2000;
  234. tmp_offset = idx_offset + path_offset;
  235. /* msbk_d: 15/14/13/12 */
  236. tmp = 0x0;
  237. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  238. tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
  239. tmp_reg = 0x5e14 + tmp_offset;
  240. rtw89_phy_write32(rtwdev, tmp_reg, tmp);
  241. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
  242. rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
  243. /* msbk_d: 11/10/9/8 */
  244. tmp = 0x0;
  245. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  246. tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
  247. tmp_reg = 0x5e18 + tmp_offset;
  248. rtw89_phy_write32(rtwdev, tmp_reg, tmp);
  249. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
  250. rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
  251. /* msbk_d: 7/6/5/4 */
  252. tmp = 0x0;
  253. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  254. tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
  255. tmp_reg = 0x5e1c + tmp_offset;
  256. rtw89_phy_write32(rtwdev, tmp_reg, tmp);
  257. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
  258. rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
  259. /* msbk_d: 3/2/1/0 */
  260. tmp = 0x0;
  261. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  262. tmp |= dack->msbk_d[path][index][i] << (i * 8);
  263. tmp_reg = 0x5e20 + tmp_offset;
  264. rtw89_phy_write32(rtwdev, tmp_reg, tmp);
  265. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
  266. rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
  267. /* dadak_d/biask_d */
  268. tmp = 0x0;
  269. tmp = (dack->biask_d[path][index] << 22) |
  270. (dack->dadck_d[path][index] << 14);
  271. tmp_reg = 0x5e24 + tmp_offset;
  272. rtw89_phy_write32(rtwdev, tmp_reg, tmp);
  273. }
  274. static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  275. {
  276. u8 i;
  277. for (i = 0; i < 2; i++)
  278. _dack_reload_by_path(rtwdev, path, i);
  279. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  280. &rtw8852a_rfk_dack_reload_defs_a_tbl,
  281. &rtw8852a_rfk_dack_reload_defs_b_tbl);
  282. }
  283. #define ADDC_T_AVG 100
  284. static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  285. {
  286. s32 dc_re = 0, dc_im = 0;
  287. u32 tmp;
  288. u32 i;
  289. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  290. &rtw8852a_rfk_check_addc_defs_a_tbl,
  291. &rtw8852a_rfk_check_addc_defs_b_tbl);
  292. for (i = 0; i < ADDC_T_AVG; i++) {
  293. tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
  294. dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
  295. dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
  296. }
  297. dc_re /= ADDC_T_AVG;
  298. dc_im /= ADDC_T_AVG;
  299. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  300. "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
  301. }
  302. static void _addck(struct rtw89_dev *rtwdev)
  303. {
  304. struct rtw89_dack_info *dack = &rtwdev->dack;
  305. u32 val;
  306. int ret;
  307. /* S0 */
  308. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl);
  309. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
  310. _check_addc(rtwdev, RF_PATH_A);
  311. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl);
  312. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  313. false, rtwdev, 0x1e00, BIT(0));
  314. if (ret) {
  315. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
  316. dack->addck_timeout[0] = true;
  317. }
  318. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
  319. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
  320. _check_addc(rtwdev, RF_PATH_A);
  321. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl);
  322. /* S1 */
  323. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl);
  324. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
  325. _check_addc(rtwdev, RF_PATH_B);
  326. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl);
  327. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  328. false, rtwdev, 0x3e00, BIT(0));
  329. if (ret) {
  330. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
  331. dack->addck_timeout[1] = true;
  332. }
  333. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
  334. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
  335. _check_addc(rtwdev, RF_PATH_B);
  336. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl);
  337. }
  338. static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  339. {
  340. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  341. &rtw8852a_rfk_check_dadc_defs_f_a_tbl,
  342. &rtw8852a_rfk_check_dadc_defs_f_b_tbl);
  343. _check_addc(rtwdev, path);
  344. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  345. &rtw8852a_rfk_check_dadc_defs_r_a_tbl,
  346. &rtw8852a_rfk_check_dadc_defs_r_b_tbl);
  347. }
  348. static void _dack_s0(struct rtw89_dev *rtwdev)
  349. {
  350. struct rtw89_dack_info *dack = &rtwdev->dack;
  351. u32 val;
  352. int ret;
  353. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl);
  354. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  355. false, rtwdev, 0x5e28, BIT(15));
  356. ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  357. false, rtwdev, 0x5e78, BIT(15));
  358. if (ret) {
  359. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
  360. dack->msbk_timeout[0] = true;
  361. }
  362. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  363. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl);
  364. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  365. false, rtwdev, 0x5e48, BIT(17));
  366. ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  367. false, rtwdev, 0x5e98, BIT(17));
  368. if (ret) {
  369. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n");
  370. dack->dadck_timeout[0] = true;
  371. }
  372. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  373. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl);
  374. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
  375. _check_dadc(rtwdev, RF_PATH_A);
  376. _dack_backup_s0(rtwdev);
  377. _dack_reload(rtwdev, RF_PATH_A);
  378. rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
  379. }
  380. static void _dack_s1(struct rtw89_dev *rtwdev)
  381. {
  382. struct rtw89_dack_info *dack = &rtwdev->dack;
  383. u32 val;
  384. int ret;
  385. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl);
  386. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  387. false, rtwdev, 0x7e28, BIT(15));
  388. ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  389. false, rtwdev, 0x7e78, BIT(15));
  390. if (ret) {
  391. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
  392. dack->msbk_timeout[1] = true;
  393. }
  394. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  395. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl);
  396. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  397. false, rtwdev, 0x7e48, BIT(17));
  398. ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  399. false, rtwdev, 0x7e98, BIT(17));
  400. if (ret) {
  401. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
  402. dack->dadck_timeout[1] = true;
  403. }
  404. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  405. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl);
  406. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
  407. _check_dadc(rtwdev, RF_PATH_B);
  408. _dack_backup_s1(rtwdev);
  409. _dack_reload(rtwdev, RF_PATH_B);
  410. rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
  411. }
  412. static void _dack(struct rtw89_dev *rtwdev)
  413. {
  414. _dack_s0(rtwdev);
  415. _dack_s1(rtwdev);
  416. }
  417. static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
  418. {
  419. struct rtw89_dack_info *dack = &rtwdev->dack;
  420. u32 rf0_0, rf1_0;
  421. u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
  422. dack->dack_done = false;
  423. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
  424. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
  425. rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
  426. rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
  427. _afe_init(rtwdev);
  428. rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
  429. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
  430. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001);
  431. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001);
  432. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
  433. _addck(rtwdev);
  434. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
  435. _addck_backup(rtwdev);
  436. _addck_reload(rtwdev);
  437. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
  438. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001);
  439. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
  440. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
  441. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
  442. _dack(rtwdev);
  443. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
  444. _dack_dump(rtwdev);
  445. dack->dack_done = true;
  446. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
  447. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
  448. rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
  449. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
  450. dack->dack_cnt++;
  451. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
  452. }
  453. #define RTW8852A_NCTL_VER 0xd
  454. #define RTW8852A_IQK_VER 0x2a
  455. #define RTW8852A_IQK_SS 2
  456. #define RTW8852A_IQK_THR_REK 8
  457. #define RTW8852A_IQK_CFIR_GROUP_NR 4
  458. enum rtw8852a_iqk_type {
  459. ID_TXAGC,
  460. ID_FLOK_COARSE,
  461. ID_FLOK_FINE,
  462. ID_TXK,
  463. ID_RXAGC,
  464. ID_RXK,
  465. ID_NBTXK,
  466. ID_NBRXK,
  467. };
  468. static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path)
  469. {
  470. u8 i = 0x0;
  471. u32 fft[6] = {0x0};
  472. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  473. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000);
  474. fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  475. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000);
  476. fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  477. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000);
  478. fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  479. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000);
  480. fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  481. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000);
  482. fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  483. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000);
  484. fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  485. for (i = 0; i < 6; i++)
  486. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n",
  487. path, i, fft[i]);
  488. }
  489. static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path)
  490. {
  491. u8 i = 0x0;
  492. u32 tmp = 0x0;
  493. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  494. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
  495. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1);
  496. for (i = 0x0; i < 0x18; i++) {
  497. rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i);
  498. rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD);
  499. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  500. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n",
  501. path, BIT(path), tmp);
  502. udelay(1);
  503. }
  504. rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX);
  505. rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
  506. rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100);
  507. udelay(1);
  508. }
  509. static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
  510. u8 group)
  511. {
  512. static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
  513. {0x8f20, 0x8f54, 0x8f88, 0x8fbc},
  514. {0x9320, 0x9354, 0x9388, 0x93bc},
  515. };
  516. u8 idx = 0x0;
  517. u32 tmp = 0x0;
  518. u32 base_addr;
  519. if (path >= RTW8852A_IQK_SS) {
  520. rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
  521. return;
  522. }
  523. if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
  524. rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
  525. return;
  526. }
  527. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  528. rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
  529. base_addr = base_addrs[path][group];
  530. for (idx = 0; idx < 0x0d; idx++) {
  531. tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
  532. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  533. "[IQK] %x = %x\n",
  534. base_addr + (idx << 2), tmp);
  535. }
  536. if (path == 0x0) {
  537. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
  538. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD);
  539. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp);
  540. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD);
  541. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp);
  542. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD);
  543. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp);
  544. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD);
  545. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp);
  546. } else {
  547. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
  548. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD);
  549. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp);
  550. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD);
  551. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp);
  552. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD);
  553. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp);
  554. tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD);
  555. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp);
  556. }
  557. rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
  558. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc);
  559. udelay(1);
  560. tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
  561. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
  562. BIT(path), tmp);
  563. }
  564. static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
  565. u8 group)
  566. {
  567. static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
  568. {0x8d00, 0x8d44, 0x8d88, 0x8dcc},
  569. {0x9100, 0x9144, 0x9188, 0x91cc},
  570. };
  571. u8 idx = 0x0;
  572. u32 tmp = 0x0;
  573. u32 base_addr;
  574. if (path >= RTW8852A_IQK_SS) {
  575. rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
  576. return;
  577. }
  578. if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
  579. rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
  580. return;
  581. }
  582. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  583. rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
  584. base_addr = base_addrs[path][group];
  585. for (idx = 0; idx < 0x10; idx++) {
  586. tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
  587. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  588. "[IQK]%x = %x\n",
  589. base_addr + (idx << 2), tmp);
  590. }
  591. if (path == 0x0) {
  592. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
  593. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD);
  594. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp);
  595. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD);
  596. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp);
  597. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD);
  598. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp);
  599. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD);
  600. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp);
  601. } else {
  602. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
  603. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD);
  604. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp);
  605. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD);
  606. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp);
  607. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD);
  608. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp);
  609. tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD);
  610. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp);
  611. }
  612. rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
  613. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd);
  614. tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
  615. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
  616. BIT(path), tmp);
  617. }
  618. static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
  619. {
  620. u32 tmp = 0x0;
  621. u32 i = 0x0;
  622. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  623. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
  624. rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
  625. rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
  626. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
  627. for (i = 0; i <= 0x9f; i++) {
  628. rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
  629. tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  630. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
  631. }
  632. for (i = 0; i <= 0x9f; i++) {
  633. rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
  634. tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
  635. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
  636. }
  637. rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD);
  638. rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD);
  639. }
  640. static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
  641. {
  642. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  643. u32 tmp = 0x0;
  644. rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
  645. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3);
  646. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
  647. udelay(1);
  648. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3);
  649. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
  650. udelay(1);
  651. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
  652. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0);
  653. udelay(1);
  654. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
  655. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
  656. switch (iqk_info->iqk_band[path]) {
  657. case RTW89_BAND_2G:
  658. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
  659. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
  660. break;
  661. case RTW89_BAND_5G:
  662. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
  663. rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5);
  664. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
  665. break;
  666. default:
  667. break;
  668. }
  669. tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
  670. rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
  671. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
  672. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  673. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
  674. fsleep(128);
  675. }
  676. static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
  677. {
  678. u32 tmp;
  679. u32 val;
  680. int ret;
  681. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200,
  682. false, rtwdev, 0xbff8, MASKBYTE0);
  683. if (ret)
  684. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
  685. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
  686. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
  687. tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
  688. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  689. "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
  690. return false;
  691. }
  692. static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
  693. enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
  694. {
  695. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  696. bool fail = false;
  697. u32 iqk_cmd = 0x0;
  698. u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
  699. u32 addr_rfc_ctl = 0x0;
  700. if (path == RF_PATH_A)
  701. addr_rfc_ctl = 0x5864;
  702. else
  703. addr_rfc_ctl = 0x7864;
  704. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
  705. switch (ktype) {
  706. case ID_TXAGC:
  707. iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
  708. break;
  709. case ID_FLOK_COARSE:
  710. rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
  711. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
  712. iqk_cmd = 0x108 | (1 << (4 + path));
  713. break;
  714. case ID_FLOK_FINE:
  715. rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
  716. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
  717. iqk_cmd = 0x208 | (1 << (4 + path));
  718. break;
  719. case ID_TXK:
  720. rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
  721. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
  722. iqk_cmd = 0x008 | (1 << (path + 4)) |
  723. (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
  724. break;
  725. case ID_RXAGC:
  726. iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
  727. break;
  728. case ID_RXK:
  729. rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
  730. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
  731. iqk_cmd = 0x008 | (1 << (path + 4)) |
  732. (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
  733. break;
  734. case ID_NBTXK:
  735. rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
  736. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
  737. iqk_cmd = 0x308 | (1 << (4 + path));
  738. break;
  739. case ID_NBRXK:
  740. rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
  741. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
  742. iqk_cmd = 0x608 | (1 << (4 + path));
  743. break;
  744. default:
  745. return false;
  746. }
  747. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
  748. rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
  749. udelay(1);
  750. fail = _iqk_check_cal(rtwdev, path, ktype);
  751. if (iqk_info->iqk_xym_en)
  752. _iqk_read_xym_dbcc0(rtwdev, path);
  753. if (iqk_info->iqk_fft_en)
  754. _iqk_read_fft_dbcc0(rtwdev, path);
  755. if (iqk_info->iqk_sram_en)
  756. _iqk_sram(rtwdev, path);
  757. if (iqk_info->iqk_cfir_en) {
  758. if (ktype == ID_TXK) {
  759. _iqk_read_txcfir_dbcc0(rtwdev, path, 0x0);
  760. _iqk_read_txcfir_dbcc0(rtwdev, path, 0x1);
  761. _iqk_read_txcfir_dbcc0(rtwdev, path, 0x2);
  762. _iqk_read_txcfir_dbcc0(rtwdev, path, 0x3);
  763. } else {
  764. _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0);
  765. _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1);
  766. _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2);
  767. _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3);
  768. }
  769. }
  770. rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
  771. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
  772. return fail;
  773. }
  774. static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
  775. enum rtw89_phy_idx phy_idx, u8 path)
  776. {
  777. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  778. static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
  779. static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30};
  780. static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1};
  781. static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0};
  782. static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a};
  783. static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0};
  784. u8 gp = 0x0;
  785. bool fail = false;
  786. u32 rf0 = 0x0;
  787. for (gp = 0; gp < 0x4; gp++) {
  788. switch (iqk_info->iqk_band[path]) {
  789. case RTW89_BAND_2G:
  790. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]);
  791. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]);
  792. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]);
  793. break;
  794. case RTW89_BAND_5G:
  795. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]);
  796. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]);
  797. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]);
  798. break;
  799. default:
  800. break;
  801. }
  802. rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
  803. rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
  804. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
  805. rf0 | iqk_info->syn1to2);
  806. rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
  807. rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
  808. rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
  809. rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
  810. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
  811. rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
  812. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
  813. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
  814. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
  815. }
  816. switch (iqk_info->iqk_band[path]) {
  817. case RTW89_BAND_2G:
  818. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
  819. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  820. break;
  821. case RTW89_BAND_5G:
  822. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
  823. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  824. rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
  825. break;
  826. default:
  827. break;
  828. }
  829. iqk_info->nb_rxcfir[path] = 0x40000000;
  830. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
  831. B_IQK_RES_RXCFIR, 0x5);
  832. iqk_info->is_wb_rxiqk[path] = true;
  833. return false;
  834. }
  835. static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
  836. enum rtw89_phy_idx phy_idx, u8 path)
  837. {
  838. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  839. u8 group = 0x0;
  840. u32 rf0 = 0x0, tmp = 0x0;
  841. u32 idxrxgain_a = 0x1a0;
  842. u32 idxattc2_a = 0x00;
  843. u32 idxattc1_a = 0x5;
  844. u32 idxrxgain_g = 0x1E0;
  845. u32 idxattc2_g = 0x15;
  846. u32 idxattc1_g = 0x0;
  847. bool fail = false;
  848. switch (iqk_info->iqk_band[path]) {
  849. case RTW89_BAND_2G:
  850. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g);
  851. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g);
  852. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g);
  853. break;
  854. case RTW89_BAND_5G:
  855. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a);
  856. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a);
  857. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a);
  858. break;
  859. default:
  860. break;
  861. }
  862. rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
  863. rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
  864. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
  865. rf0 | iqk_info->syn1to2);
  866. rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
  867. rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
  868. rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
  869. rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
  870. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  871. B_CFIR_LUT_GP, group);
  872. rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
  873. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
  874. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
  875. switch (iqk_info->iqk_band[path]) {
  876. case RTW89_BAND_2G:
  877. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
  878. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  879. break;
  880. case RTW89_BAND_5G:
  881. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
  882. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  883. rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
  884. break;
  885. default:
  886. break;
  887. }
  888. if (!fail) {
  889. tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
  890. iqk_info->nb_rxcfir[path] = tmp | 0x2;
  891. } else {
  892. iqk_info->nb_rxcfir[path] = 0x40000002;
  893. }
  894. return fail;
  895. }
  896. static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
  897. {
  898. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  899. if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
  900. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  901. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
  902. MASKDWORD, 0x4d000a08);
  903. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
  904. B_P0_RXCK_VAL, 0x2);
  905. rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
  906. rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
  907. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
  908. } else {
  909. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
  910. MASKDWORD, 0x44000a08);
  911. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
  912. B_P0_RXCK_VAL, 0x1);
  913. rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
  914. rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
  915. rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL);
  916. }
  917. }
  918. static bool _txk_group_sel(struct rtw89_dev *rtwdev,
  919. enum rtw89_phy_idx phy_idx, u8 path)
  920. {
  921. static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
  922. static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
  923. static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b};
  924. static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12};
  925. static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1};
  926. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  927. bool fail = false;
  928. u8 gp = 0x0;
  929. u32 tmp = 0x0;
  930. for (gp = 0x0; gp < 0x4; gp++) {
  931. switch (iqk_info->iqk_band[path]) {
  932. case RTW89_BAND_2G:
  933. rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
  934. B_RFGAIN_BND, 0x08);
  935. rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
  936. g_txgain[gp]);
  937. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1,
  938. g_attsmxr[gp]);
  939. rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0,
  940. g_attsmxr[gp]);
  941. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  942. MASKDWORD, g_itqt[gp]);
  943. break;
  944. case RTW89_BAND_5G:
  945. rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
  946. B_RFGAIN_BND, 0x04);
  947. rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
  948. a_txgain[gp]);
  949. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  950. MASKDWORD, a_itqt[gp]);
  951. break;
  952. default:
  953. break;
  954. }
  955. rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
  956. rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
  957. rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
  958. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  959. B_CFIR_LUT_GP, gp);
  960. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
  961. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
  962. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
  963. }
  964. iqk_info->nb_txcfir[path] = 0x40000000;
  965. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
  966. B_IQK_RES_TXCFIR, 0x5);
  967. iqk_info->is_wb_txiqk[path] = true;
  968. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  969. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
  970. BIT(path), tmp);
  971. return false;
  972. }
  973. static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
  974. enum rtw89_phy_idx phy_idx, u8 path)
  975. {
  976. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  977. u8 group = 0x2;
  978. u32 a_mode_txgain = 0x64e2;
  979. u32 g_mode_txgain = 0x61e8;
  980. u32 attsmxr = 0x1;
  981. u32 itqt = 0x12;
  982. u32 tmp = 0x0;
  983. bool fail = false;
  984. switch (iqk_info->iqk_band[path]) {
  985. case RTW89_BAND_2G:
  986. rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
  987. B_RFGAIN_BND, 0x08);
  988. rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain);
  989. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr);
  990. rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr);
  991. break;
  992. case RTW89_BAND_5G:
  993. rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
  994. B_RFGAIN_BND, 0x04);
  995. rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain);
  996. break;
  997. default:
  998. break;
  999. }
  1000. rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
  1001. rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
  1002. rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
  1003. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
  1004. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
  1005. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
  1006. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
  1007. if (!fail) {
  1008. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  1009. iqk_info->nb_txcfir[path] = tmp | 0x2;
  1010. } else {
  1011. iqk_info->nb_txcfir[path] = 0x40000002;
  1012. }
  1013. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  1014. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
  1015. BIT(path), tmp);
  1016. return fail;
  1017. }
  1018. static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
  1019. {
  1020. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1021. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
  1022. rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
  1023. if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
  1024. rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
  1025. else
  1026. rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
  1027. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
  1028. rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
  1029. }
  1030. static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
  1031. {
  1032. bool is_fail = false;
  1033. u32 tmp = 0x0;
  1034. u32 core_i = 0x0;
  1035. u32 core_q = 0x0;
  1036. tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
  1037. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n",
  1038. path, tmp);
  1039. core_i = FIELD_GET(RR_TXMO_COI, tmp);
  1040. core_q = FIELD_GET(RR_TXMO_COQ, tmp);
  1041. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i);
  1042. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q);
  1043. if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
  1044. is_fail = true;
  1045. return is_fail;
  1046. }
  1047. static bool _iqk_lok(struct rtw89_dev *rtwdev,
  1048. enum rtw89_phy_idx phy_idx, u8 path)
  1049. {
  1050. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1051. u32 rf0 = 0x0;
  1052. u8 itqt = 0x12;
  1053. bool fail = false;
  1054. bool tmp = false;
  1055. switch (iqk_info->iqk_band[path]) {
  1056. case RTW89_BAND_2G:
  1057. rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0);
  1058. itqt = 0x09;
  1059. break;
  1060. case RTW89_BAND_5G:
  1061. rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0);
  1062. itqt = 0x12;
  1063. break;
  1064. default:
  1065. break;
  1066. }
  1067. rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
  1068. rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
  1069. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI,
  1070. rf0 | iqk_info->syn1to2);
  1071. rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
  1072. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
  1073. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1);
  1074. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0);
  1075. rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
  1076. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
  1077. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
  1078. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
  1079. iqk_info->lok_cor_fail[0][path] = tmp;
  1080. fsleep(10);
  1081. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
  1082. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
  1083. iqk_info->lok_fin_fail[0][path] = tmp;
  1084. fail = _lok_finetune_check(rtwdev, path);
  1085. return fail;
  1086. }
  1087. static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
  1088. {
  1089. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1090. rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
  1091. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
  1092. udelay(1);
  1093. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
  1094. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
  1095. udelay(1);
  1096. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
  1097. udelay(1);
  1098. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
  1099. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
  1100. switch (iqk_info->iqk_band[path]) {
  1101. case RTW89_BAND_2G:
  1102. rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00);
  1103. rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
  1104. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
  1105. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1);
  1106. rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
  1107. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
  1108. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1109. rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
  1110. rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000);
  1111. rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
  1112. rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
  1113. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1114. 0x403e0 | iqk_info->syn1to2);
  1115. udelay(1);
  1116. break;
  1117. case RTW89_BAND_5G:
  1118. rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
  1119. rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
  1120. rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7);
  1121. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
  1122. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1123. rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
  1124. rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100);
  1125. rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
  1126. rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
  1127. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1);
  1128. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0);
  1129. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1130. 0x403e0 | iqk_info->syn1to2);
  1131. udelay(1);
  1132. break;
  1133. default:
  1134. break;
  1135. }
  1136. }
  1137. static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
  1138. {
  1139. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
  1140. }
  1141. static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  1142. u8 path)
  1143. {
  1144. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1145. u32 tmp = 0x0;
  1146. bool flag = 0x0;
  1147. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %lu\n", path,
  1148. ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]));
  1149. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
  1150. iqk_info->lok_cor_fail[0][path]);
  1151. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
  1152. iqk_info->lok_fin_fail[0][path]);
  1153. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
  1154. iqk_info->iqk_tx_fail[0][path]);
  1155. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
  1156. iqk_info->iqk_rx_fail[0][path]);
  1157. flag = iqk_info->lok_cor_fail[0][path];
  1158. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag);
  1159. flag = iqk_info->lok_fin_fail[0][path];
  1160. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag);
  1161. flag = iqk_info->iqk_tx_fail[0][path];
  1162. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag);
  1163. flag = iqk_info->iqk_rx_fail[0][path];
  1164. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag);
  1165. tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
  1166. iqk_info->bp_iqkenable[path] = tmp;
  1167. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  1168. iqk_info->bp_txkresult[path] = tmp;
  1169. tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
  1170. iqk_info->bp_rxkresult[path] = tmp;
  1171. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
  1172. (u8)iqk_info->iqk_times);
  1173. tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4));
  1174. if (tmp != 0x0)
  1175. iqk_info->iqk_fail_cnt++;
  1176. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4),
  1177. iqk_info->iqk_fail_cnt);
  1178. }
  1179. static
  1180. void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  1181. {
  1182. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1183. bool lok_is_fail = false;
  1184. u8 ibias = 0x1;
  1185. u8 i = 0;
  1186. _iqk_txclk_setting(rtwdev, path);
  1187. for (i = 0; i < 3; i++) {
  1188. _lok_res_table(rtwdev, path, ibias++);
  1189. _iqk_txk_setting(rtwdev, path);
  1190. lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
  1191. if (!lok_is_fail)
  1192. break;
  1193. }
  1194. if (iqk_info->is_nbiqk)
  1195. iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
  1196. else
  1197. iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
  1198. _iqk_rxclk_setting(rtwdev, path);
  1199. _iqk_rxk_setting(rtwdev, path);
  1200. if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
  1201. iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
  1202. else
  1203. iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
  1204. _iqk_info_iqk(rtwdev, phy_idx, path);
  1205. }
  1206. static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
  1207. enum rtw89_phy_idx phy, u8 path)
  1208. {
  1209. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1210. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1211. u32 reg_rf18 = 0x0, reg_35c = 0x0;
  1212. u8 idx = 0;
  1213. u8 get_empty_table = false;
  1214. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  1215. for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
  1216. if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
  1217. get_empty_table = true;
  1218. break;
  1219. }
  1220. }
  1221. if (!get_empty_table) {
  1222. idx = iqk_info->iqk_table_idx[path] + 1;
  1223. if (idx > RTW89_IQK_CHS_NR - 1)
  1224. idx = 0;
  1225. }
  1226. reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
  1227. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
  1228. reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
  1229. iqk_info->iqk_band[path] = chan->band_type;
  1230. iqk_info->iqk_bw[path] = chan->band_width;
  1231. iqk_info->iqk_ch[path] = chan->channel;
  1232. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1233. "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
  1234. iqk_info->iqk_band[path]);
  1235. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
  1236. path, iqk_info->iqk_bw[path]);
  1237. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
  1238. path, iqk_info->iqk_ch[path]);
  1239. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1240. "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
  1241. rtwdev->dbcc_en ? "on" : "off",
  1242. iqk_info->iqk_band[path] == 0 ? "2G" :
  1243. iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
  1244. iqk_info->iqk_ch[path],
  1245. iqk_info->iqk_bw[path] == 0 ? "20M" :
  1246. iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
  1247. if (reg_35c == 0x01)
  1248. iqk_info->syn1to2 = 0x1;
  1249. else
  1250. iqk_info->syn1to2 = 0x0;
  1251. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER);
  1252. rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16),
  1253. (u8)iqk_info->iqk_band[path]);
  1254. rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16),
  1255. (u8)iqk_info->iqk_bw[path]);
  1256. rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16),
  1257. (u8)iqk_info->iqk_ch[path]);
  1258. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER);
  1259. }
  1260. static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  1261. u8 path)
  1262. {
  1263. _iqk_by_path(rtwdev, phy_idx, path);
  1264. }
  1265. static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
  1266. {
  1267. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1268. rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
  1269. iqk_info->nb_txcfir[path]);
  1270. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
  1271. iqk_info->nb_rxcfir[path]);
  1272. rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
  1273. rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD);
  1274. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
  1275. rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD);
  1276. rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR);
  1277. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
  1278. rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
  1279. rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4);
  1280. rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
  1281. rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW);
  1282. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002);
  1283. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
  1284. rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0);
  1285. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
  1286. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  1287. rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0);
  1288. rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0);
  1289. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
  1290. }
  1291. static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
  1292. enum rtw89_phy_idx phy_idx, u8 path)
  1293. {
  1294. const struct rtw89_rfk_tbl *tbl;
  1295. switch (_kpath(rtwdev, phy_idx)) {
  1296. case RF_A:
  1297. tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
  1298. break;
  1299. case RF_B:
  1300. tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
  1301. break;
  1302. default:
  1303. tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
  1304. break;
  1305. }
  1306. rtw89_rfk_parser(rtwdev, tbl);
  1307. }
  1308. static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
  1309. {
  1310. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1311. u8 idx = iqk_info->iqk_table_idx[path];
  1312. if (rtwdev->dbcc_en) {
  1313. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
  1314. B_COEF_SEL_IQC, path & 0x1);
  1315. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  1316. B_CFIR_LUT_G2, path & 0x1);
  1317. } else {
  1318. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
  1319. B_COEF_SEL_IQC, idx);
  1320. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  1321. B_CFIR_LUT_G2, idx);
  1322. }
  1323. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1324. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
  1325. rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD);
  1326. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
  1327. rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000);
  1328. rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
  1329. rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD);
  1330. }
  1331. static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
  1332. enum rtw89_phy_idx phy_idx, u8 path)
  1333. {
  1334. const struct rtw89_rfk_tbl *tbl;
  1335. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
  1336. switch (_kpath(rtwdev, phy_idx)) {
  1337. case RF_A:
  1338. tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
  1339. break;
  1340. case RF_B:
  1341. tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
  1342. break;
  1343. default:
  1344. tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
  1345. break;
  1346. }
  1347. rtw89_rfk_parser(rtwdev, tbl);
  1348. }
  1349. static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
  1350. {
  1351. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1352. u8 phy_idx = 0x0;
  1353. iqk_info->iqk_times++;
  1354. if (path == 0x0)
  1355. phy_idx = RTW89_PHY_0;
  1356. else
  1357. phy_idx = RTW89_PHY_1;
  1358. _iqk_get_ch_info(rtwdev, phy_idx, path);
  1359. _iqk_macbb_setting(rtwdev, phy_idx, path);
  1360. _iqk_preset(rtwdev, path);
  1361. _iqk_start_iqk(rtwdev, phy_idx, path);
  1362. _iqk_restore(rtwdev, path);
  1363. _iqk_afebb_restore(rtwdev, phy_idx, path);
  1364. }
  1365. static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  1366. {
  1367. u32 rf_reg5, rck_val = 0;
  1368. u32 val;
  1369. int ret;
  1370. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
  1371. rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
  1372. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1373. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  1374. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
  1375. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
  1376. /* RCK trigger */
  1377. rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
  1378. ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
  1379. false, rtwdev, path, 0x1c, BIT(3));
  1380. if (ret)
  1381. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
  1382. rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
  1383. rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
  1384. /* RCK_ADC_OFFSET */
  1385. rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4);
  1386. rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1);
  1387. rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0);
  1388. rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
  1389. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1390. "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n",
  1391. rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
  1392. rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK),
  1393. rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK));
  1394. }
  1395. static void _iqk_init(struct rtw89_dev *rtwdev)
  1396. {
  1397. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1398. u8 ch, path;
  1399. rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
  1400. if (iqk_info->is_iqk_init)
  1401. return;
  1402. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  1403. iqk_info->is_iqk_init = true;
  1404. iqk_info->is_nbiqk = false;
  1405. iqk_info->iqk_fft_en = false;
  1406. iqk_info->iqk_sram_en = false;
  1407. iqk_info->iqk_cfir_en = false;
  1408. iqk_info->iqk_xym_en = false;
  1409. iqk_info->iqk_times = 0x0;
  1410. for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
  1411. iqk_info->iqk_channel[ch] = 0x0;
  1412. for (path = 0; path < RTW8852A_IQK_SS; path++) {
  1413. iqk_info->lok_cor_fail[ch][path] = false;
  1414. iqk_info->lok_fin_fail[ch][path] = false;
  1415. iqk_info->iqk_tx_fail[ch][path] = false;
  1416. iqk_info->iqk_rx_fail[ch][path] = false;
  1417. iqk_info->iqk_mcc_ch[ch][path] = 0x0;
  1418. iqk_info->iqk_table_idx[path] = 0x0;
  1419. }
  1420. }
  1421. }
  1422. static void _doiqk(struct rtw89_dev *rtwdev, bool force,
  1423. enum rtw89_phy_idx phy_idx, u8 path)
  1424. {
  1425. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1426. u32 backup_bb_val[BACKUP_BB_REGS_NR];
  1427. u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
  1428. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
  1429. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
  1430. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1431. "[IQK]==========IQK start!!!!!==========\n");
  1432. iqk_info->iqk_times++;
  1433. iqk_info->version = RTW8852A_IQK_VER;
  1434. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
  1435. _iqk_get_ch_info(rtwdev, phy_idx, path);
  1436. _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
  1437. _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  1438. _iqk_macbb_setting(rtwdev, phy_idx, path);
  1439. _iqk_preset(rtwdev, path);
  1440. _iqk_start_iqk(rtwdev, phy_idx, path);
  1441. _iqk_restore(rtwdev, path);
  1442. _iqk_afebb_restore(rtwdev, phy_idx, path);
  1443. _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
  1444. _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  1445. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
  1446. }
  1447. static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
  1448. {
  1449. switch (_kpath(rtwdev, phy_idx)) {
  1450. case RF_A:
  1451. _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
  1452. break;
  1453. case RF_B:
  1454. _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
  1455. break;
  1456. case RF_AB:
  1457. _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
  1458. _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
  1459. break;
  1460. default:
  1461. break;
  1462. }
  1463. }
  1464. #define RXDCK_VER_8852A 0xe
  1465. static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1466. enum rtw89_rf_path path, bool is_afe)
  1467. {
  1468. u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
  1469. u32 ori_val;
  1470. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1471. "[RX_DCK] ==== S%d RX DCK (by %s)====\n",
  1472. path, is_afe ? "AFE" : "RFC");
  1473. ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD);
  1474. if (is_afe) {
  1475. rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
  1476. rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
  1477. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
  1478. B_P0_RXCK_VAL, 0x3);
  1479. rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN);
  1480. rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13),
  1481. B_S0_RXDC2_AVG, 0x3);
  1482. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
  1483. rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK);
  1484. rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
  1485. rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
  1486. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1);
  1487. }
  1488. rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f);
  1489. rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe);
  1490. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START);
  1491. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
  1492. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
  1493. fsleep(600);
  1494. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP);
  1495. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
  1496. if (is_afe) {
  1497. rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
  1498. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
  1499. MASKDWORD, ori_val);
  1500. }
  1501. }
  1502. static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1503. bool is_afe)
  1504. {
  1505. u8 path, kpath, dck_tune;
  1506. u32 rf_reg5;
  1507. u32 addr;
  1508. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1509. "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
  1510. RXDCK_VER_8852A, rtwdev->hal.cv);
  1511. kpath = _kpath(rtwdev, phy);
  1512. for (path = 0; path < 2; path++) {
  1513. if (!(kpath & BIT(path)))
  1514. continue;
  1515. rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
  1516. dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
  1517. if (rtwdev->is_tssi_mode[path]) {
  1518. addr = 0x5818 + (path << 13);
  1519. /* TSSI pause */
  1520. rtw89_phy_write32_set(rtwdev, addr, BIT(30));
  1521. }
  1522. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1523. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
  1524. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  1525. _set_rx_dck(rtwdev, phy, path, is_afe);
  1526. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
  1527. rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
  1528. if (rtwdev->is_tssi_mode[path]) {
  1529. addr = 0x5818 + (path << 13);
  1530. /* TSSI resume */
  1531. rtw89_phy_write32_clr(rtwdev, addr, BIT(30));
  1532. }
  1533. }
  1534. }
  1535. #define RTW8852A_RF_REL_VERSION 34
  1536. #define RTW8852A_DPK_VER 0x10
  1537. #define RTW8852A_DPK_TH_AVG_NUM 4
  1538. #define RTW8852A_DPK_RF_PATH 2
  1539. #define RTW8852A_DPK_KIP_REG_NUM 2
  1540. enum rtw8852a_dpk_id {
  1541. LBK_RXIQK = 0x06,
  1542. SYNC = 0x10,
  1543. MDPK_IDL = 0x11,
  1544. MDPK_MPA = 0x12,
  1545. GAIN_LOSS = 0x13,
  1546. GAIN_CAL = 0x14,
  1547. };
  1548. static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
  1549. enum rtw89_rf_path path, bool is_bybb)
  1550. {
  1551. if (is_bybb)
  1552. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
  1553. else
  1554. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1555. }
  1556. static void _dpk_onoff(struct rtw89_dev *rtwdev,
  1557. enum rtw89_rf_path path, bool off);
  1558. static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg,
  1559. u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],
  1560. u8 path)
  1561. {
  1562. u8 i;
  1563. for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
  1564. reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev,
  1565. reg[i] + (path << 8),
  1566. MASKDWORD);
  1567. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
  1568. reg[i] + (path << 8), reg_bkup[path][i]);
  1569. }
  1570. }
  1571. static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
  1572. u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path)
  1573. {
  1574. u8 i;
  1575. for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
  1576. rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
  1577. MASKDWORD, reg_bkup[path][i]);
  1578. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
  1579. reg[i] + (path << 8), reg_bkup[path][i]);
  1580. }
  1581. }
  1582. static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1583. enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
  1584. {
  1585. u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
  1586. u16 dpk_cmd = 0x0;
  1587. u32 val;
  1588. int ret;
  1589. dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4)));
  1590. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START);
  1591. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
  1592. rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
  1593. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
  1594. 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
  1595. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
  1596. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP);
  1597. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1598. "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
  1599. id == 0x06 ? "LBK_RXIQK" :
  1600. id == 0x10 ? "SYNC" :
  1601. id == 0x11 ? "MDPK_IDL" :
  1602. id == 0x12 ? "MDPK_MPA" :
  1603. id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
  1604. dpk_cmd, ret);
  1605. if (ret) {
  1606. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1607. "[DPK] one-shot over 20ms!!!!\n");
  1608. return 1;
  1609. }
  1610. return 0;
  1611. }
  1612. static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
  1613. enum rtw89_phy_idx phy,
  1614. enum rtw89_rf_path path)
  1615. {
  1616. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
  1617. _set_rx_dck(rtwdev, phy, path, false);
  1618. }
  1619. static void _dpk_information(struct rtw89_dev *rtwdev,
  1620. enum rtw89_phy_idx phy,
  1621. enum rtw89_rf_path path)
  1622. {
  1623. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1624. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1625. u8 kidx = dpk->cur_idx[path];
  1626. dpk->bp[path][kidx].band = chan->band_type;
  1627. dpk->bp[path][kidx].ch = chan->channel;
  1628. dpk->bp[path][kidx].bw = chan->band_width;
  1629. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1630. "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
  1631. path, dpk->cur_idx[path], phy,
  1632. rtwdev->is_tssi_mode[path] ? "on" : "off",
  1633. rtwdev->dbcc_en ? "on" : "off",
  1634. dpk->bp[path][kidx].band == 0 ? "2G" :
  1635. dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
  1636. dpk->bp[path][kidx].ch,
  1637. dpk->bp[path][kidx].bw == 0 ? "20M" :
  1638. dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
  1639. }
  1640. static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
  1641. enum rtw89_phy_idx phy,
  1642. enum rtw89_rf_path path, u8 kpath)
  1643. {
  1644. switch (kpath) {
  1645. case RF_A:
  1646. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl);
  1647. if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0)
  1648. rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
  1649. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl);
  1650. break;
  1651. case RF_B:
  1652. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl);
  1653. if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1)
  1654. rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
  1655. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl);
  1656. break;
  1657. case RF_AB:
  1658. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl);
  1659. break;
  1660. default:
  1661. break;
  1662. }
  1663. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1664. "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
  1665. }
  1666. static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
  1667. enum rtw89_phy_idx phy,
  1668. enum rtw89_rf_path path, u8 kpath)
  1669. {
  1670. switch (kpath) {
  1671. case RF_A:
  1672. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl);
  1673. break;
  1674. case RF_B:
  1675. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl);
  1676. break;
  1677. case RF_AB:
  1678. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl);
  1679. break;
  1680. default:
  1681. break;
  1682. }
  1683. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1684. "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
  1685. }
  1686. static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
  1687. enum rtw89_rf_path path, bool is_pause)
  1688. {
  1689. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
  1690. B_P0_TSSI_TRK_EN, is_pause);
  1691. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
  1692. is_pause ? "pause" : "resume");
  1693. }
  1694. static void _dpk_kip_setting(struct rtw89_dev *rtwdev,
  1695. enum rtw89_rf_path path, u8 kidx)
  1696. {
  1697. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
  1698. rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f);
  1699. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
  1700. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
  1701. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2);
  1702. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/
  1703. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2),
  1704. MASKDWORD, 0x003f2e2e);
  1705. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  1706. MASKDWORD, 0x005b5b5b);
  1707. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n",
  1708. path, kidx);
  1709. }
  1710. static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
  1711. enum rtw89_rf_path path)
  1712. {
  1713. rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
  1714. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
  1715. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
  1716. rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD);
  1717. if (rtwdev->hal.cv > CHIP_CBV)
  1718. rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1);
  1719. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
  1720. }
  1721. static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
  1722. enum rtw89_phy_idx phy,
  1723. enum rtw89_rf_path path)
  1724. {
  1725. u8 cur_rxbb;
  1726. cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
  1727. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl);
  1728. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  1729. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
  1730. rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2);
  1731. rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK,
  1732. rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK));
  1733. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
  1734. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  1735. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
  1736. fsleep(70);
  1737. rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f);
  1738. if (cur_rxbb <= 0xa)
  1739. rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3);
  1740. else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb)
  1741. rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1);
  1742. else
  1743. rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0);
  1744. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
  1745. _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
  1746. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
  1747. rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
  1748. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
  1749. rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0);
  1750. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/
  1751. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK);
  1752. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl);
  1753. }
  1754. static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx,
  1755. enum rtw89_rf_path path)
  1756. {
  1757. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1758. dpk->bp[path][kidx].ther_dpk =
  1759. ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  1760. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
  1761. dpk->bp[path][kidx].ther_dpk);
  1762. }
  1763. static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain,
  1764. enum rtw89_rf_path path)
  1765. {
  1766. u8 txagc_ori = 0x38;
  1767. rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori);
  1768. return txagc_ori;
  1769. }
  1770. static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
  1771. enum rtw89_rf_path path, u8 kidx)
  1772. {
  1773. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1774. if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
  1775. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b);
  1776. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0);
  1777. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
  1778. rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0);
  1779. } else {
  1780. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e);
  1781. rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7);
  1782. rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3);
  1783. rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3);
  1784. }
  1785. rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
  1786. rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
  1787. rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
  1788. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1789. "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n",
  1790. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
  1791. rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK),
  1792. rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
  1793. }
  1794. static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev,
  1795. enum rtw89_rf_path path, bool is_manual)
  1796. {
  1797. u8 tmp_pad, tmp_txbb;
  1798. if (is_manual) {
  1799. rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1);
  1800. tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD);
  1801. rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
  1802. B_RFGAIN_PAD, tmp_pad);
  1803. tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB);
  1804. rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
  1805. B_RFGAIN_TXBB, tmp_txbb);
  1806. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8),
  1807. B_LOAD_COEF_CFIR, 0x1);
  1808. rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8),
  1809. B_LOAD_COEF_CFIR);
  1810. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1);
  1811. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1812. "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad,
  1813. tmp_txbb);
  1814. } else {
  1815. rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
  1816. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1817. "[DPK] disable manual switch TXCFIR\n");
  1818. }
  1819. }
  1820. static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
  1821. enum rtw89_rf_path path, bool is_bypass)
  1822. {
  1823. if (is_bypass) {
  1824. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
  1825. B_RXIQC_BYPASS2, 0x1);
  1826. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
  1827. B_RXIQC_BYPASS, 0x1);
  1828. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1829. "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
  1830. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
  1831. MASKDWORD));
  1832. } else {
  1833. rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
  1834. rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
  1835. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1836. "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
  1837. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
  1838. MASKDWORD));
  1839. }
  1840. }
  1841. static
  1842. void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
  1843. {
  1844. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1845. if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
  1846. rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
  1847. else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
  1848. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
  1849. else
  1850. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
  1851. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
  1852. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
  1853. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
  1854. }
  1855. static void _dpk_table_select(struct rtw89_dev *rtwdev,
  1856. enum rtw89_rf_path path, u8 kidx, u8 gain)
  1857. {
  1858. u8 val;
  1859. val = 0x80 + kidx * 0x20 + gain * 0x10;
  1860. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
  1861. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1862. "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
  1863. gain, val);
  1864. }
  1865. static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
  1866. enum rtw89_rf_path path)
  1867. {
  1868. #define DPK_SYNC_TH_DC_I 200
  1869. #define DPK_SYNC_TH_DC_Q 200
  1870. #define DPK_SYNC_TH_CORR 170
  1871. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1872. u16 dc_i, dc_q;
  1873. u8 corr_val, corr_idx;
  1874. rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
  1875. corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
  1876. corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
  1877. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1878. "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
  1879. corr_val);
  1880. dpk->corr_idx[path][0] = corr_idx;
  1881. dpk->corr_val[path][0] = corr_val;
  1882. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
  1883. dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  1884. dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
  1885. dc_i = abs(sign_extend32(dc_i, 11));
  1886. dc_q = abs(sign_extend32(dc_q, 11));
  1887. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
  1888. path, dc_i, dc_q);
  1889. dpk->dc_i[path][0] = dc_i;
  1890. dpk->dc_q[path][0] = dc_q;
  1891. if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
  1892. corr_val < DPK_SYNC_TH_CORR)
  1893. return true;
  1894. else
  1895. return false;
  1896. }
  1897. static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1898. enum rtw89_rf_path path, u8 kidx)
  1899. {
  1900. _dpk_tpg_sel(rtwdev, path, kidx);
  1901. _dpk_one_shot(rtwdev, phy, path, SYNC);
  1902. return _dpk_sync_check(rtwdev, path); /*1= fail*/
  1903. }
  1904. static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
  1905. {
  1906. u16 dgain = 0x0;
  1907. rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
  1908. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
  1909. dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  1910. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain,
  1911. dgain);
  1912. return dgain;
  1913. }
  1914. static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
  1915. {
  1916. s8 offset;
  1917. if (dgain >= 0x783)
  1918. offset = 0x6;
  1919. else if (dgain <= 0x782 && dgain >= 0x551)
  1920. offset = 0x3;
  1921. else if (dgain <= 0x550 && dgain >= 0x3c4)
  1922. offset = 0x0;
  1923. else if (dgain <= 0x3c3 && dgain >= 0x2aa)
  1924. offset = -3;
  1925. else if (dgain <= 0x2a9 && dgain >= 0x1e3)
  1926. offset = -6;
  1927. else if (dgain <= 0x1e2 && dgain >= 0x156)
  1928. offset = -9;
  1929. else if (dgain <= 0x155)
  1930. offset = -12;
  1931. else
  1932. offset = 0x0;
  1933. return offset;
  1934. }
  1935. static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
  1936. {
  1937. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
  1938. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
  1939. return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
  1940. }
  1941. static void _dpk_gainloss(struct rtw89_dev *rtwdev,
  1942. enum rtw89_phy_idx phy, enum rtw89_rf_path path,
  1943. u8 kidx)
  1944. {
  1945. _dpk_table_select(rtwdev, path, kidx, 1);
  1946. _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
  1947. }
  1948. #define DPK_TXAGC_LOWER 0x2e
  1949. #define DPK_TXAGC_UPPER 0x3f
  1950. #define DPK_TXAGC_INVAL 0xff
  1951. static u8 _dpk_set_offset(struct rtw89_dev *rtwdev,
  1952. enum rtw89_rf_path path, s8 gain_offset)
  1953. {
  1954. u8 txagc;
  1955. txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK);
  1956. if (txagc - gain_offset < DPK_TXAGC_LOWER)
  1957. txagc = DPK_TXAGC_LOWER;
  1958. else if (txagc - gain_offset > DPK_TXAGC_UPPER)
  1959. txagc = DPK_TXAGC_UPPER;
  1960. else
  1961. txagc = txagc - gain_offset;
  1962. rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc);
  1963. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
  1964. gain_offset, txagc);
  1965. return txagc;
  1966. }
  1967. enum dpk_agc_step {
  1968. DPK_AGC_STEP_SYNC_DGAIN,
  1969. DPK_AGC_STEP_GAIN_ADJ,
  1970. DPK_AGC_STEP_GAIN_LOSS_IDX,
  1971. DPK_AGC_STEP_GL_GT_CRITERION,
  1972. DPK_AGC_STEP_GL_LT_CRITERION,
  1973. DPK_AGC_STEP_SET_TX_GAIN,
  1974. };
  1975. static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
  1976. {
  1977. u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
  1978. u8 i;
  1979. rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl);
  1980. if (is_check) {
  1981. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
  1982. val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
  1983. val1_i = abs(sign_extend32(val1_i, 11));
  1984. val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
  1985. val1_q = abs(sign_extend32(val1_q, 11));
  1986. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
  1987. val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
  1988. val2_i = abs(sign_extend32(val2_i, 11));
  1989. val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
  1990. val2_q = abs(sign_extend32(val2_q, 11));
  1991. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
  1992. phy_div(val1_i * val1_i + val1_q * val1_q,
  1993. val2_i * val2_i + val2_q * val2_q));
  1994. } else {
  1995. for (i = 0; i < 32; i++) {
  1996. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
  1997. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1998. "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
  1999. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
  2000. }
  2001. }
  2002. if ((val1_i * val1_i + val1_q * val1_q) >=
  2003. ((val2_i * val2_i + val2_q * val2_q) * 8 / 5))
  2004. return 1;
  2005. else
  2006. return 0;
  2007. }
  2008. static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2009. enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
  2010. bool loss_only)
  2011. {
  2012. #define DPK_AGC_ADJ_LMT 6
  2013. #define DPK_DGAIN_UPPER 1922
  2014. #define DPK_DGAIN_LOWER 342
  2015. #define DPK_RXBB_UPPER 0x1f
  2016. #define DPK_RXBB_LOWER 0
  2017. #define DPK_GL_CRIT 7
  2018. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2019. u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
  2020. u8 agc_cnt = 0;
  2021. bool limited_rxbb = false;
  2022. s8 offset = 0;
  2023. u16 dgain = 0;
  2024. u8 step = DPK_AGC_STEP_SYNC_DGAIN;
  2025. bool goout = false;
  2026. tmp_txagc = init_txagc;
  2027. do {
  2028. switch (step) {
  2029. case DPK_AGC_STEP_SYNC_DGAIN:
  2030. if (_dpk_sync(rtwdev, phy, path, kidx)) {
  2031. tmp_txagc = DPK_TXAGC_INVAL;
  2032. goout = true;
  2033. break;
  2034. }
  2035. dgain = _dpk_dgain_read(rtwdev);
  2036. if (loss_only || limited_rxbb)
  2037. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  2038. else
  2039. step = DPK_AGC_STEP_GAIN_ADJ;
  2040. break;
  2041. case DPK_AGC_STEP_GAIN_ADJ:
  2042. tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
  2043. offset = _dpk_dgain_mapping(rtwdev, dgain);
  2044. if (tmp_rxbb + offset > DPK_RXBB_UPPER) {
  2045. tmp_rxbb = DPK_RXBB_UPPER;
  2046. limited_rxbb = true;
  2047. } else if (tmp_rxbb + offset < DPK_RXBB_LOWER) {
  2048. tmp_rxbb = DPK_RXBB_LOWER;
  2049. limited_rxbb = true;
  2050. } else {
  2051. tmp_rxbb = tmp_rxbb + offset;
  2052. }
  2053. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
  2054. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2055. "[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
  2056. tmp_rxbb);
  2057. if (offset != 0 || agc_cnt == 0) {
  2058. if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
  2059. _dpk_bypass_rxcfir(rtwdev, path, true);
  2060. else
  2061. _dpk_lbk_rxiqk(rtwdev, phy, path);
  2062. }
  2063. if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
  2064. step = DPK_AGC_STEP_SYNC_DGAIN;
  2065. else
  2066. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  2067. agc_cnt++;
  2068. break;
  2069. case DPK_AGC_STEP_GAIN_LOSS_IDX:
  2070. _dpk_gainloss(rtwdev, phy, path, kidx);
  2071. tmp_gl_idx = _dpk_gainloss_read(rtwdev);
  2072. if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
  2073. tmp_gl_idx > DPK_GL_CRIT)
  2074. step = DPK_AGC_STEP_GL_GT_CRITERION;
  2075. else if (tmp_gl_idx == 0)
  2076. step = DPK_AGC_STEP_GL_LT_CRITERION;
  2077. else
  2078. step = DPK_AGC_STEP_SET_TX_GAIN;
  2079. break;
  2080. case DPK_AGC_STEP_GL_GT_CRITERION:
  2081. if (tmp_txagc == DPK_TXAGC_LOWER) {
  2082. goout = true;
  2083. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2084. "[DPK] Txagc@lower bound!!\n");
  2085. } else {
  2086. tmp_txagc = _dpk_set_offset(rtwdev, path, 3);
  2087. }
  2088. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  2089. agc_cnt++;
  2090. break;
  2091. case DPK_AGC_STEP_GL_LT_CRITERION:
  2092. if (tmp_txagc == DPK_TXAGC_UPPER) {
  2093. goout = true;
  2094. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2095. "[DPK] Txagc@upper bound!!\n");
  2096. } else {
  2097. tmp_txagc = _dpk_set_offset(rtwdev, path, -2);
  2098. }
  2099. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  2100. agc_cnt++;
  2101. break;
  2102. case DPK_AGC_STEP_SET_TX_GAIN:
  2103. tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx);
  2104. goout = true;
  2105. agc_cnt++;
  2106. break;
  2107. default:
  2108. goout = true;
  2109. break;
  2110. }
  2111. } while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT));
  2112. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2113. "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
  2114. tmp_rxbb);
  2115. return tmp_txagc;
  2116. }
  2117. static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
  2118. {
  2119. switch (order) {
  2120. case 0:
  2121. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
  2122. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
  2123. rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
  2124. break;
  2125. case 1:
  2126. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
  2127. rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
  2128. rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
  2129. break;
  2130. case 2:
  2131. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
  2132. rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
  2133. rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
  2134. break;
  2135. default:
  2136. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2137. "[DPK] Wrong MDPD order!!(0x%x)\n", order);
  2138. break;
  2139. }
  2140. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2141. "[DPK] Set MDPD order to 0x%x for IDL\n", order);
  2142. }
  2143. static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2144. enum rtw89_rf_path path, u8 kidx, u8 gain)
  2145. {
  2146. _dpk_set_mdpd_para(rtwdev, 0x0);
  2147. _dpk_table_select(rtwdev, path, kidx, 1);
  2148. _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
  2149. }
  2150. static void _dpk_fill_result(struct rtw89_dev *rtwdev,
  2151. enum rtw89_rf_path path, u8 kidx, u8 gain,
  2152. u8 txagc)
  2153. {
  2154. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2155. u16 pwsf = 0x78;
  2156. u8 gs = 0x5b;
  2157. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
  2158. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2159. "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
  2160. pwsf, gs);
  2161. dpk->bp[path][kidx].txagc_dpk = txagc;
  2162. rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
  2163. 0x3F << ((gain << 3) + (kidx << 4)), txagc);
  2164. dpk->bp[path][kidx].pwsf = pwsf;
  2165. rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
  2166. 0x1FF << (gain << 4), pwsf);
  2167. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
  2168. rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD);
  2169. dpk->bp[path][kidx].gs = gs;
  2170. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2171. MASKDWORD, 0x065b5b5b);
  2172. rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD);
  2173. rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL);
  2174. }
  2175. static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2176. enum rtw89_rf_path path)
  2177. {
  2178. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2179. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2180. bool is_reload = false;
  2181. u8 idx, cur_band, cur_ch;
  2182. cur_band = chan->band_type;
  2183. cur_ch = chan->channel;
  2184. for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
  2185. if (cur_band != dpk->bp[path][idx].band ||
  2186. cur_ch != dpk->bp[path][idx].ch)
  2187. continue;
  2188. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
  2189. B_COEF_SEL_MDPD, idx);
  2190. dpk->cur_idx[path] = idx;
  2191. is_reload = true;
  2192. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2193. "[DPK] reload S%d[%d] success\n", path, idx);
  2194. }
  2195. return is_reload;
  2196. }
  2197. static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2198. enum rtw89_rf_path path, u8 gain)
  2199. {
  2200. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2201. u8 txagc = 0, kidx = dpk->cur_idx[path];
  2202. bool is_fail = false;
  2203. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2204. "[DPK] ========= S%d[%d] DPK Start =========\n", path,
  2205. kidx);
  2206. _rf_direct_cntrl(rtwdev, path, false);
  2207. txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
  2208. _dpk_rf_setting(rtwdev, gain, path, kidx);
  2209. _dpk_rx_dck(rtwdev, phy, path);
  2210. _dpk_kip_setting(rtwdev, path, kidx);
  2211. _dpk_manual_txcfir(rtwdev, path, true);
  2212. txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
  2213. if (txagc == DPK_TXAGC_INVAL)
  2214. is_fail = true;
  2215. _dpk_get_thermal(rtwdev, kidx, path);
  2216. _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
  2217. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  2218. _dpk_fill_result(rtwdev, path, kidx, gain, txagc);
  2219. _dpk_manual_txcfir(rtwdev, path, false);
  2220. if (!is_fail)
  2221. dpk->bp[path][kidx].path_ok = true;
  2222. else
  2223. dpk->bp[path][kidx].path_ok = false;
  2224. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
  2225. is_fail ? "Check" : "Success");
  2226. return is_fail;
  2227. }
  2228. static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
  2229. enum rtw89_phy_idx phy, u8 kpath)
  2230. {
  2231. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2232. u32 backup_bb_val[BACKUP_BB_REGS_NR];
  2233. u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR];
  2234. u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}};
  2235. u32 kip_reg[] = {R_RXIQC, R_IQK_RES};
  2236. u8 path;
  2237. bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false};
  2238. if (dpk->is_dpk_reload_en) {
  2239. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
  2240. if (!(kpath & BIT(path)))
  2241. continue;
  2242. reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
  2243. if (!reloaded[path] && dpk->bp[path][0].ch != 0)
  2244. dpk->cur_idx[path] = !dpk->cur_idx[path];
  2245. else
  2246. _dpk_onoff(rtwdev, path, false);
  2247. }
  2248. } else {
  2249. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++)
  2250. dpk->cur_idx[path] = 0;
  2251. }
  2252. if ((kpath == RF_A && reloaded[RF_PATH_A]) ||
  2253. (kpath == RF_B && reloaded[RF_PATH_B]) ||
  2254. (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B]))
  2255. return;
  2256. _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
  2257. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
  2258. if (!(kpath & BIT(path)) || reloaded[path])
  2259. continue;
  2260. if (rtwdev->is_tssi_mode[path])
  2261. _dpk_tssi_pause(rtwdev, path, true);
  2262. _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
  2263. _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  2264. _dpk_information(rtwdev, phy, path);
  2265. }
  2266. _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
  2267. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
  2268. if (!(kpath & BIT(path)) || reloaded[path])
  2269. continue;
  2270. is_fail = _dpk_main(rtwdev, phy, path, 1);
  2271. _dpk_onoff(rtwdev, path, is_fail);
  2272. }
  2273. _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
  2274. _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
  2275. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
  2276. if (!(kpath & BIT(path)) || reloaded[path])
  2277. continue;
  2278. _dpk_kip_restore(rtwdev, path);
  2279. _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
  2280. _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  2281. if (rtwdev->is_tssi_mode[path])
  2282. _dpk_tssi_pause(rtwdev, path, false);
  2283. }
  2284. }
  2285. static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2286. {
  2287. struct rtw89_fem_info *fem = &rtwdev->fem;
  2288. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2289. if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
  2290. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2291. "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
  2292. return true;
  2293. } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
  2294. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2295. "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
  2296. return true;
  2297. }
  2298. return false;
  2299. }
  2300. static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2301. {
  2302. u8 path, kpath;
  2303. kpath = _kpath(rtwdev, phy);
  2304. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
  2305. if (kpath & BIT(path))
  2306. _dpk_onoff(rtwdev, path, true);
  2307. }
  2308. }
  2309. static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
  2310. {
  2311. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2312. "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
  2313. RTW8852A_DPK_VER, rtwdev->hal.cv,
  2314. RTW8852A_RF_REL_VERSION);
  2315. if (_dpk_bypass_check(rtwdev, phy))
  2316. _dpk_force_bypass(rtwdev, phy);
  2317. else
  2318. _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
  2319. }
  2320. static void _dpk_onoff(struct rtw89_dev *rtwdev,
  2321. enum rtw89_rf_path path, bool off)
  2322. {
  2323. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2324. u8 val, kidx = dpk->cur_idx[path];
  2325. val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
  2326. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2327. MASKBYTE3, 0x6 | val);
  2328. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
  2329. kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
  2330. }
  2331. static void _dpk_track(struct rtw89_dev *rtwdev)
  2332. {
  2333. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2334. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2335. u8 path, kidx;
  2336. u8 trk_idx = 0, txagc_rf = 0;
  2337. s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0;
  2338. u16 pwsf[2];
  2339. u8 cur_ther;
  2340. s8 delta_ther[2] = {0};
  2341. for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
  2342. kidx = dpk->cur_idx[path];
  2343. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2344. "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
  2345. path, kidx, dpk->bp[path][kidx].ch);
  2346. cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  2347. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2348. "[DPK_TRK] thermal now = %d\n", cur_ther);
  2349. if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
  2350. delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
  2351. if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
  2352. delta_ther[path] = delta_ther[path] * 3 / 2;
  2353. else
  2354. delta_ther[path] = delta_ther[path] * 5 / 2;
  2355. txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
  2356. RR_MODOPT_M_TXPWR);
  2357. if (rtwdev->is_tssi_mode[path]) {
  2358. trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
  2359. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2360. "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
  2361. txagc_rf, trk_idx);
  2362. txagc_bb =
  2363. (s8)rtw89_phy_read32_mask(rtwdev,
  2364. R_TXAGC_BB + (path << 13),
  2365. MASKBYTE2);
  2366. txagc_bb_tp =
  2367. (u8)rtw89_phy_read32_mask(rtwdev,
  2368. R_TXAGC_TP + (path << 13),
  2369. B_TXAGC_TP);
  2370. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2371. "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
  2372. txagc_bb_tp, txagc_bb);
  2373. txagc_ofst =
  2374. (s8)rtw89_phy_read32_mask(rtwdev,
  2375. R_TXAGC_BB + (path << 13),
  2376. MASKBYTE3);
  2377. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2378. "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
  2379. txagc_ofst, delta_ther[path]);
  2380. if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
  2381. BIT(15)) == 0x1)
  2382. txagc_ofst = 0;
  2383. if (txagc_rf != 0 && cur_ther != 0)
  2384. ini_diff = txagc_ofst + delta_ther[path];
  2385. if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13),
  2386. B_P0_TXDPD) == 0x0) {
  2387. pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
  2388. txagc_bb + ini_diff +
  2389. tssi_info->extra_ofst[path];
  2390. pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
  2391. txagc_bb + ini_diff +
  2392. tssi_info->extra_ofst[path];
  2393. } else {
  2394. pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff +
  2395. tssi_info->extra_ofst[path];
  2396. pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff +
  2397. tssi_info->extra_ofst[path];
  2398. }
  2399. } else {
  2400. pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
  2401. pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
  2402. }
  2403. if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 &&
  2404. txagc_rf != 0) {
  2405. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2406. "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
  2407. pwsf[0], pwsf[1]);
  2408. rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
  2409. 0x000001FF, pwsf[0]);
  2410. rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
  2411. 0x01FF0000, pwsf[1]);
  2412. }
  2413. }
  2414. }
  2415. static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2416. enum rtw89_rf_path path)
  2417. {
  2418. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2419. enum rtw89_band band = chan->band_type;
  2420. if (band == RTW89_BAND_2G)
  2421. rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
  2422. else
  2423. rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
  2424. }
  2425. static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2426. {
  2427. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2428. enum rtw89_band band = chan->band_type;
  2429. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
  2430. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2431. &rtw8852a_tssi_sys_defs_2g_tbl,
  2432. &rtw8852a_tssi_sys_defs_5g_tbl);
  2433. }
  2434. static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2435. enum rtw89_rf_path path)
  2436. {
  2437. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2438. enum rtw89_band band = chan->band_type;
  2439. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2440. &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
  2441. &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl);
  2442. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2443. &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl,
  2444. &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl);
  2445. }
  2446. static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
  2447. enum rtw89_phy_idx phy,
  2448. enum rtw89_rf_path path)
  2449. {
  2450. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2451. &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
  2452. &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
  2453. }
  2454. static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2455. enum rtw89_rf_path path)
  2456. {
  2457. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2458. &rtw8852a_tssi_dck_defs_a_tbl,
  2459. &rtw8852a_tssi_dck_defs_b_tbl);
  2460. }
  2461. static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2462. enum rtw89_rf_path path)
  2463. {
  2464. #define __get_val(ptr, idx) \
  2465. ({ \
  2466. s8 *__ptr = (ptr); \
  2467. u8 __idx = (idx), __i, __v; \
  2468. u32 __val = 0; \
  2469. for (__i = 0; __i < 4; __i++) { \
  2470. __v = (__ptr[__idx + __i]); \
  2471. __val |= (__v << (8 * __i)); \
  2472. } \
  2473. __val; \
  2474. })
  2475. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2476. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2477. u8 ch = chan->channel;
  2478. u8 subband = chan->subband_type;
  2479. const s8 *thm_up_a = NULL;
  2480. const s8 *thm_down_a = NULL;
  2481. const s8 *thm_up_b = NULL;
  2482. const s8 *thm_down_b = NULL;
  2483. u8 thermal = 0xff;
  2484. s8 thm_ofst[64] = {0};
  2485. u32 tmp = 0;
  2486. u8 i, j;
  2487. switch (subband) {
  2488. default:
  2489. case RTW89_CH_2G:
  2490. thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
  2491. thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
  2492. thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p;
  2493. thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n;
  2494. break;
  2495. case RTW89_CH_5G_BAND_1:
  2496. thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0];
  2497. thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0];
  2498. thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0];
  2499. thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0];
  2500. break;
  2501. case RTW89_CH_5G_BAND_3:
  2502. thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1];
  2503. thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1];
  2504. thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1];
  2505. thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1];
  2506. break;
  2507. case RTW89_CH_5G_BAND_4:
  2508. thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2];
  2509. thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2];
  2510. thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2];
  2511. thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2];
  2512. break;
  2513. }
  2514. if (path == RF_PATH_A) {
  2515. thermal = tssi_info->thermal[RF_PATH_A];
  2516. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2517. "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
  2518. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
  2519. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
  2520. if (thermal == 0xff) {
  2521. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
  2522. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
  2523. for (i = 0; i < 64; i += 4) {
  2524. rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
  2525. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2526. "[TSSI] write 0x%x val=0x%08x\n",
  2527. 0x5c00 + i, 0x0);
  2528. }
  2529. } else {
  2530. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
  2531. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
  2532. thermal);
  2533. i = 0;
  2534. for (j = 0; j < 32; j++)
  2535. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2536. -thm_down_a[i++] :
  2537. -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
  2538. i = 1;
  2539. for (j = 63; j >= 32; j--)
  2540. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2541. thm_up_a[i++] :
  2542. thm_up_a[DELTA_SWINGIDX_SIZE - 1];
  2543. for (i = 0; i < 64; i += 4) {
  2544. tmp = __get_val(thm_ofst, i);
  2545. rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
  2546. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2547. "[TSSI] write 0x%x val=0x%08x\n",
  2548. 0x5c00 + i, tmp);
  2549. }
  2550. }
  2551. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
  2552. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
  2553. } else {
  2554. thermal = tssi_info->thermal[RF_PATH_B];
  2555. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2556. "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
  2557. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
  2558. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
  2559. if (thermal == 0xff) {
  2560. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
  2561. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
  2562. for (i = 0; i < 64; i += 4) {
  2563. rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
  2564. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2565. "[TSSI] write 0x%x val=0x%08x\n",
  2566. 0x7c00 + i, 0x0);
  2567. }
  2568. } else {
  2569. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
  2570. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
  2571. thermal);
  2572. i = 0;
  2573. for (j = 0; j < 32; j++)
  2574. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2575. -thm_down_b[i++] :
  2576. -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
  2577. i = 1;
  2578. for (j = 63; j >= 32; j--)
  2579. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2580. thm_up_b[i++] :
  2581. thm_up_b[DELTA_SWINGIDX_SIZE - 1];
  2582. for (i = 0; i < 64; i += 4) {
  2583. tmp = __get_val(thm_ofst, i);
  2584. rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
  2585. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2586. "[TSSI] write 0x%x val=0x%08x\n",
  2587. 0x7c00 + i, tmp);
  2588. }
  2589. }
  2590. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
  2591. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
  2592. }
  2593. #undef __get_val
  2594. }
  2595. static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2596. enum rtw89_rf_path path)
  2597. {
  2598. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2599. &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl,
  2600. &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl);
  2601. }
  2602. static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2603. enum rtw89_rf_path path)
  2604. {
  2605. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2606. &rtw8852a_tssi_slope_cal_org_defs_a_tbl,
  2607. &rtw8852a_tssi_slope_cal_org_defs_b_tbl);
  2608. }
  2609. static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2610. enum rtw89_rf_path path)
  2611. {
  2612. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2613. &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl,
  2614. &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl);
  2615. }
  2616. static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2617. enum rtw89_rf_path path)
  2618. {
  2619. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2620. &rtw8852a_tssi_slope_defs_a_tbl,
  2621. &rtw8852a_tssi_slope_defs_b_tbl);
  2622. }
  2623. static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2624. enum rtw89_rf_path path)
  2625. {
  2626. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2627. &rtw8852a_tssi_track_defs_a_tbl,
  2628. &rtw8852a_tssi_track_defs_b_tbl);
  2629. }
  2630. static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
  2631. enum rtw89_phy_idx phy,
  2632. enum rtw89_rf_path path)
  2633. {
  2634. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2635. &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl,
  2636. &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl);
  2637. }
  2638. static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2639. enum rtw89_rf_path path)
  2640. {
  2641. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2642. u8 subband = chan->subband_type;
  2643. switch (subband) {
  2644. default:
  2645. case RTW89_CH_2G:
  2646. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2647. &rtw8852a_tssi_pak_defs_a_2g_tbl,
  2648. &rtw8852a_tssi_pak_defs_b_2g_tbl);
  2649. break;
  2650. case RTW89_CH_5G_BAND_1:
  2651. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2652. &rtw8852a_tssi_pak_defs_a_5g_1_tbl,
  2653. &rtw8852a_tssi_pak_defs_b_5g_1_tbl);
  2654. break;
  2655. case RTW89_CH_5G_BAND_3:
  2656. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2657. &rtw8852a_tssi_pak_defs_a_5g_3_tbl,
  2658. &rtw8852a_tssi_pak_defs_b_5g_3_tbl);
  2659. break;
  2660. case RTW89_CH_5G_BAND_4:
  2661. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2662. &rtw8852a_tssi_pak_defs_a_5g_4_tbl,
  2663. &rtw8852a_tssi_pak_defs_b_5g_4_tbl);
  2664. break;
  2665. }
  2666. }
  2667. static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2668. {
  2669. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2670. u8 i;
  2671. for (i = 0; i < RF_PATH_NUM_8852A; i++) {
  2672. _tssi_set_track(rtwdev, phy, i);
  2673. _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
  2674. rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
  2675. &rtw8852a_tssi_enable_defs_a_tbl,
  2676. &rtw8852a_tssi_enable_defs_b_tbl);
  2677. tssi_info->base_thermal[i] =
  2678. ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
  2679. rtwdev->is_tssi_mode[i] = true;
  2680. }
  2681. }
  2682. static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2683. {
  2684. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
  2685. rtwdev->is_tssi_mode[RF_PATH_A] = false;
  2686. rtwdev->is_tssi_mode[RF_PATH_B] = false;
  2687. }
  2688. static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
  2689. {
  2690. switch (ch) {
  2691. case 1 ... 2:
  2692. return 0;
  2693. case 3 ... 5:
  2694. return 1;
  2695. case 6 ... 8:
  2696. return 2;
  2697. case 9 ... 11:
  2698. return 3;
  2699. case 12 ... 13:
  2700. return 4;
  2701. case 14:
  2702. return 5;
  2703. }
  2704. return 0;
  2705. }
  2706. #define TSSI_EXTRA_GROUP_BIT (BIT(31))
  2707. #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
  2708. #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
  2709. #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
  2710. #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
  2711. static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
  2712. {
  2713. switch (ch) {
  2714. case 1 ... 2:
  2715. return 0;
  2716. case 3 ... 5:
  2717. return 1;
  2718. case 6 ... 8:
  2719. return 2;
  2720. case 9 ... 11:
  2721. return 3;
  2722. case 12 ... 14:
  2723. return 4;
  2724. case 36 ... 40:
  2725. return 5;
  2726. case 41 ... 43:
  2727. return TSSI_EXTRA_GROUP(5);
  2728. case 44 ... 48:
  2729. return 6;
  2730. case 49 ... 51:
  2731. return TSSI_EXTRA_GROUP(6);
  2732. case 52 ... 56:
  2733. return 7;
  2734. case 57 ... 59:
  2735. return TSSI_EXTRA_GROUP(7);
  2736. case 60 ... 64:
  2737. return 8;
  2738. case 100 ... 104:
  2739. return 9;
  2740. case 105 ... 107:
  2741. return TSSI_EXTRA_GROUP(9);
  2742. case 108 ... 112:
  2743. return 10;
  2744. case 113 ... 115:
  2745. return TSSI_EXTRA_GROUP(10);
  2746. case 116 ... 120:
  2747. return 11;
  2748. case 121 ... 123:
  2749. return TSSI_EXTRA_GROUP(11);
  2750. case 124 ... 128:
  2751. return 12;
  2752. case 129 ... 131:
  2753. return TSSI_EXTRA_GROUP(12);
  2754. case 132 ... 136:
  2755. return 13;
  2756. case 137 ... 139:
  2757. return TSSI_EXTRA_GROUP(13);
  2758. case 140 ... 144:
  2759. return 14;
  2760. case 149 ... 153:
  2761. return 15;
  2762. case 154 ... 156:
  2763. return TSSI_EXTRA_GROUP(15);
  2764. case 157 ... 161:
  2765. return 16;
  2766. case 162 ... 164:
  2767. return TSSI_EXTRA_GROUP(16);
  2768. case 165 ... 169:
  2769. return 17;
  2770. case 170 ... 172:
  2771. return TSSI_EXTRA_GROUP(17);
  2772. case 173 ... 177:
  2773. return 18;
  2774. }
  2775. return 0;
  2776. }
  2777. static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
  2778. {
  2779. switch (ch) {
  2780. case 1 ... 8:
  2781. return 0;
  2782. case 9 ... 14:
  2783. return 1;
  2784. case 36 ... 48:
  2785. return 2;
  2786. case 52 ... 64:
  2787. return 3;
  2788. case 100 ... 112:
  2789. return 4;
  2790. case 116 ... 128:
  2791. return 5;
  2792. case 132 ... 144:
  2793. return 6;
  2794. case 149 ... 177:
  2795. return 7;
  2796. }
  2797. return 0;
  2798. }
  2799. static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2800. enum rtw89_rf_path path)
  2801. {
  2802. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2803. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2804. u8 ch = chan->channel;
  2805. u32 gidx, gidx_1st, gidx_2nd;
  2806. s8 de_1st = 0;
  2807. s8 de_2nd = 0;
  2808. s8 val;
  2809. gidx = _tssi_get_ofdm_group(rtwdev, ch);
  2810. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2811. "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
  2812. path, gidx);
  2813. if (IS_TSSI_EXTRA_GROUP(gidx)) {
  2814. gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
  2815. gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
  2816. de_1st = tssi_info->tssi_mcs[path][gidx_1st];
  2817. de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
  2818. val = (de_1st + de_2nd) / 2;
  2819. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2820. "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
  2821. path, val, de_1st, de_2nd);
  2822. } else {
  2823. val = tssi_info->tssi_mcs[path][gidx];
  2824. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2825. "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
  2826. }
  2827. return val;
  2828. }
  2829. static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
  2830. enum rtw89_phy_idx phy,
  2831. enum rtw89_rf_path path)
  2832. {
  2833. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2834. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2835. u8 ch = chan->channel;
  2836. u32 tgidx, tgidx_1st, tgidx_2nd;
  2837. s8 tde_1st = 0;
  2838. s8 tde_2nd = 0;
  2839. s8 val;
  2840. tgidx = _tssi_get_trim_group(rtwdev, ch);
  2841. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2842. "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
  2843. path, tgidx);
  2844. if (IS_TSSI_EXTRA_GROUP(tgidx)) {
  2845. tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
  2846. tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
  2847. tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
  2848. tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
  2849. val = (tde_1st + tde_2nd) / 2;
  2850. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2851. "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
  2852. path, val, tde_1st, tde_2nd);
  2853. } else {
  2854. val = tssi_info->tssi_trim[path][tgidx];
  2855. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2856. "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
  2857. path, val);
  2858. }
  2859. return val;
  2860. }
  2861. static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
  2862. enum rtw89_phy_idx phy)
  2863. {
  2864. #define __DE_MASK 0x003ff000
  2865. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2866. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2867. static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
  2868. static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
  2869. static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
  2870. static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840};
  2871. static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848};
  2872. static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
  2873. static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
  2874. static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
  2875. u8 ch = chan->channel;
  2876. u8 i, gidx;
  2877. s8 ofdm_de;
  2878. s8 trim_de;
  2879. s32 val;
  2880. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
  2881. phy, ch);
  2882. for (i = 0; i < RF_PATH_NUM_8852A; i++) {
  2883. gidx = _tssi_get_cck_group(rtwdev, ch);
  2884. trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
  2885. val = tssi_info->tssi_cck[i][gidx] + trim_de;
  2886. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2887. "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
  2888. i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
  2889. rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val);
  2890. rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val);
  2891. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2892. "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
  2893. r_cck_long[i],
  2894. rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
  2895. __DE_MASK));
  2896. ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
  2897. trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
  2898. val = ofdm_de + trim_de;
  2899. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2900. "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
  2901. i, ofdm_de, trim_de);
  2902. rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val);
  2903. rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val);
  2904. rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val);
  2905. rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val);
  2906. rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val);
  2907. rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val);
  2908. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2909. "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
  2910. r_mcs_20m[i],
  2911. rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i],
  2912. __DE_MASK));
  2913. }
  2914. #undef __DE_MASK
  2915. }
  2916. static void _tssi_track(struct rtw89_dev *rtwdev)
  2917. {
  2918. static const u32 tx_gain_scale_table[] = {
  2919. 0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c,
  2920. 0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1
  2921. };
  2922. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2923. u8 path;
  2924. u8 cur_ther;
  2925. s32 delta_ther = 0, gain_offset_int, gain_offset_float;
  2926. s8 gain_offset;
  2927. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n",
  2928. __func__);
  2929. if (!rtwdev->is_tssi_mode[RF_PATH_A])
  2930. return;
  2931. if (!rtwdev->is_tssi_mode[RF_PATH_B])
  2932. return;
  2933. for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) {
  2934. if (!tssi_info->tssi_tracking_check[path]) {
  2935. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n");
  2936. continue;
  2937. }
  2938. cur_ther = (u8)rtw89_phy_read32_mask(rtwdev,
  2939. R_TSSI_THER + (path << 13),
  2940. B_TSSI_THER);
  2941. if (cur_ther == 0 || tssi_info->base_thermal[path] == 0)
  2942. continue;
  2943. delta_ther = cur_ther - tssi_info->base_thermal[path];
  2944. gain_offset = (s8)delta_ther * 15 / 10;
  2945. tssi_info->extra_ofst[path] = gain_offset;
  2946. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2947. "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n",
  2948. tssi_info->base_thermal[path], gain_offset, path);
  2949. gain_offset_int = gain_offset >> 3;
  2950. gain_offset_float = gain_offset & 7;
  2951. if (gain_offset_int > 15)
  2952. gain_offset_int = 15;
  2953. else if (gain_offset_int < -16)
  2954. gain_offset_int = -16;
  2955. rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13),
  2956. B_DPD_OFT_EN, 0x1);
  2957. rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
  2958. B_TXGAIN_SCALE_EN, 0x1);
  2959. rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13),
  2960. B_DPD_OFT_ADDR, gain_offset_int);
  2961. rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
  2962. B_TXGAIN_SCALE_OFT,
  2963. tx_gain_scale_table[gain_offset_float]);
  2964. }
  2965. }
  2966. static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2967. {
  2968. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2969. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2970. u8 ch = chan->channel, ch_tmp;
  2971. u8 bw = chan->band_width;
  2972. u8 band = chan->band_type;
  2973. u8 subband = chan->subband_type;
  2974. s8 power;
  2975. s32 xdbm;
  2976. if (bw == RTW89_CHANNEL_WIDTH_40)
  2977. ch_tmp = ch - 2;
  2978. else if (bw == RTW89_CHANNEL_WIDTH_80)
  2979. ch_tmp = ch - 6;
  2980. else
  2981. ch_tmp = ch;
  2982. power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX,
  2983. RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
  2984. xdbm = power * 100 / 4;
  2985. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n",
  2986. __func__, phy, xdbm);
  2987. if (xdbm > 1800 && subband == RTW89_CH_2G) {
  2988. tssi_info->tssi_tracking_check[RF_PATH_A] = true;
  2989. tssi_info->tssi_tracking_check[RF_PATH_B] = true;
  2990. } else {
  2991. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl);
  2992. tssi_info->extra_ofst[RF_PATH_A] = 0;
  2993. tssi_info->extra_ofst[RF_PATH_B] = 0;
  2994. tssi_info->tssi_tracking_check[RF_PATH_A] = false;
  2995. tssi_info->tssi_tracking_check[RF_PATH_B] = false;
  2996. }
  2997. }
  2998. static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2999. u8 path, s16 pwr_dbm, u8 enable)
  3000. {
  3001. rtw8852a_bb_set_plcp_tx(rtwdev);
  3002. rtw8852a_bb_cfg_tx_path(rtwdev, path);
  3003. rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
  3004. rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
  3005. }
  3006. static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3007. {
  3008. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3009. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3010. const struct rtw89_chip_info *mac_reg = rtwdev->chip;
  3011. u8 ch = chan->channel, ch_tmp;
  3012. u8 bw = chan->band_width;
  3013. u8 band = chan->band_type;
  3014. u32 tx_en;
  3015. u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
  3016. s8 power;
  3017. s16 xdbm;
  3018. u32 i, tx_counter = 0;
  3019. if (bw == RTW89_CHANNEL_WIDTH_40)
  3020. ch_tmp = ch - 2;
  3021. else if (bw == RTW89_CHANNEL_WIDTH_80)
  3022. ch_tmp = ch - 6;
  3023. else
  3024. ch_tmp = ch;
  3025. power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20,
  3026. RTW89_1TX, RTW89_RS_OFDM,
  3027. RTW89_NONBF, ch_tmp);
  3028. xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
  3029. if (xdbm > 1800)
  3030. xdbm = 68;
  3031. else
  3032. xdbm = power * 2;
  3033. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3034. "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n",
  3035. __func__, phy, power, xdbm);
  3036. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
  3037. rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3038. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
  3039. tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
  3040. _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
  3041. mdelay(15);
  3042. _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
  3043. tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
  3044. tx_counter;
  3045. if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 &&
  3046. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) {
  3047. for (i = 0; i < 6; i++) {
  3048. tssi_info->default_txagc_offset[RF_PATH_A] =
  3049. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
  3050. MASKBYTE3);
  3051. if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0)
  3052. break;
  3053. }
  3054. }
  3055. if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 &&
  3056. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) {
  3057. for (i = 0; i < 6; i++) {
  3058. tssi_info->default_txagc_offset[RF_PATH_B] =
  3059. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
  3060. MASKBYTE3);
  3061. if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0)
  3062. break;
  3063. }
  3064. }
  3065. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3066. "[TSSI] %s: tx counter=%d\n",
  3067. __func__, tx_counter);
  3068. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3069. "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n",
  3070. tssi_info->default_txagc_offset[RF_PATH_A],
  3071. tssi_info->default_txagc_offset[RF_PATH_B]);
  3072. rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
  3073. rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
  3074. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
  3075. }
  3076. void rtw8852a_rck(struct rtw89_dev *rtwdev)
  3077. {
  3078. u8 path;
  3079. for (path = 0; path < 2; path++)
  3080. _rck(rtwdev, path);
  3081. }
  3082. void rtw8852a_dack(struct rtw89_dev *rtwdev)
  3083. {
  3084. u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
  3085. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
  3086. _dac_cal(rtwdev, false);
  3087. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
  3088. }
  3089. void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3090. {
  3091. u32 tx_en;
  3092. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3093. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
  3094. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3095. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3096. _iqk_init(rtwdev);
  3097. if (rtwdev->dbcc_en)
  3098. _iqk_dbcc(rtwdev, phy_idx);
  3099. else
  3100. _iqk(rtwdev, phy_idx, false);
  3101. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3102. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
  3103. }
  3104. void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  3105. bool is_afe)
  3106. {
  3107. u32 tx_en;
  3108. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3109. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
  3110. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3111. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3112. _rx_dck(rtwdev, phy_idx, is_afe);
  3113. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3114. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
  3115. }
  3116. void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3117. {
  3118. u32 tx_en;
  3119. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3120. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
  3121. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3122. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3123. rtwdev->dpk.is_dpk_enable = true;
  3124. rtwdev->dpk.is_dpk_reload_en = false;
  3125. _dpk(rtwdev, phy_idx, false);
  3126. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3127. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
  3128. }
  3129. void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
  3130. {
  3131. _dpk_track(rtwdev);
  3132. }
  3133. void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3134. {
  3135. u8 i;
  3136. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
  3137. __func__, phy);
  3138. _tssi_disable(rtwdev, phy);
  3139. for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
  3140. _tssi_rf_setting(rtwdev, phy, i);
  3141. _tssi_set_sys(rtwdev, phy);
  3142. _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
  3143. _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
  3144. _tssi_set_dck(rtwdev, phy, i);
  3145. _tssi_set_tmeter_tbl(rtwdev, phy, i);
  3146. _tssi_set_dac_gain_tbl(rtwdev, phy, i);
  3147. _tssi_slope_cal_org(rtwdev, phy, i);
  3148. _tssi_set_rf_gap_tbl(rtwdev, phy, i);
  3149. _tssi_set_slope(rtwdev, phy, i);
  3150. _tssi_pak(rtwdev, phy, i);
  3151. }
  3152. _tssi_enable(rtwdev, phy);
  3153. _tssi_set_efuse_to_de(rtwdev, phy);
  3154. _tssi_high_power(rtwdev, phy);
  3155. _tssi_pre_tx(rtwdev, phy);
  3156. }
  3157. void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3158. {
  3159. u8 i;
  3160. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
  3161. __func__, phy);
  3162. if (!rtwdev->is_tssi_mode[RF_PATH_A])
  3163. return;
  3164. if (!rtwdev->is_tssi_mode[RF_PATH_B])
  3165. return;
  3166. _tssi_disable(rtwdev, phy);
  3167. for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
  3168. _tssi_rf_setting(rtwdev, phy, i);
  3169. _tssi_set_sys(rtwdev, phy);
  3170. _tssi_set_tmeter_tbl(rtwdev, phy, i);
  3171. _tssi_pak(rtwdev, phy, i);
  3172. }
  3173. _tssi_enable(rtwdev, phy);
  3174. _tssi_set_efuse_to_de(rtwdev, phy);
  3175. }
  3176. void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
  3177. {
  3178. _tssi_track(rtwdev);
  3179. }
  3180. static
  3181. void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3182. {
  3183. if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
  3184. return;
  3185. /* disable */
  3186. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
  3187. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0);
  3188. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0);
  3189. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0);
  3190. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0);
  3191. /* enable */
  3192. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
  3193. }
  3194. static
  3195. void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3196. {
  3197. if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
  3198. return;
  3199. /* disable */
  3200. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
  3201. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4);
  3202. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
  3203. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4);
  3204. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
  3205. /* enable */
  3206. rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
  3207. }
  3208. static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev,
  3209. enum rtw89_phy_idx phy, bool enable)
  3210. {
  3211. if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
  3212. return;
  3213. if (enable) {
  3214. /* SCAN_START */
  3215. _rtw8852a_tssi_avg_scan(rtwdev, phy);
  3216. } else {
  3217. /* SCAN_END */
  3218. _rtw8852a_tssi_set_avg(rtwdev, phy);
  3219. }
  3220. }
  3221. static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev,
  3222. enum rtw89_phy_idx phy, bool enable)
  3223. {
  3224. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3225. u8 i;
  3226. if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
  3227. return;
  3228. if (enable) {
  3229. if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
  3230. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
  3231. for (i = 0; i < 6; i++) {
  3232. tssi_info->default_txagc_offset[RF_PATH_A] =
  3233. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
  3234. B_TXAGC_BB);
  3235. if (tssi_info->default_txagc_offset[RF_PATH_A])
  3236. break;
  3237. }
  3238. }
  3239. if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
  3240. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
  3241. for (i = 0; i < 6; i++) {
  3242. tssi_info->default_txagc_offset[RF_PATH_B] =
  3243. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
  3244. B_TXAGC_BB_S1);
  3245. if (tssi_info->default_txagc_offset[RF_PATH_B])
  3246. break;
  3247. }
  3248. }
  3249. } else {
  3250. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
  3251. tssi_info->default_txagc_offset[RF_PATH_A]);
  3252. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
  3253. tssi_info->default_txagc_offset[RF_PATH_B]);
  3254. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
  3255. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
  3256. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
  3257. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
  3258. }
  3259. }
  3260. void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev,
  3261. bool scan_start, enum rtw89_phy_idx phy_idx)
  3262. {
  3263. if (scan_start) {
  3264. rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true);
  3265. rtw8852a_tssi_set_avg(rtwdev, phy_idx, true);
  3266. } else {
  3267. rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false);
  3268. rtw8852a_tssi_set_avg(rtwdev, phy_idx, false);
  3269. }
  3270. }