rtw8852c_rfk.c 135 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2022 Realtek Corporation
  3. */
  4. #include "chan.h"
  5. #include "coex.h"
  6. #include "debug.h"
  7. #include "phy.h"
  8. #include "reg.h"
  9. #include "rtw8852c.h"
  10. #include "rtw8852c_rfk.h"
  11. #include "rtw8852c_rfk_table.h"
  12. #include "rtw8852c_table.h"
  13. struct rxck_def {
  14. u32 ctl;
  15. u32 en;
  16. u32 bw0;
  17. u32 bw1;
  18. u32 mul;
  19. u32 lp;
  20. };
  21. #define _TSSI_DE_MASK GENMASK(21, 12)
  22. static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
  23. static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
  24. static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
  25. static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
  26. static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
  27. static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
  28. static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
  29. static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
  30. static const u32 rtw8852c_backup_bb_regs[] = {
  31. 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x8220, 0xc1d4, 0xc1d8, 0xc1e8
  32. };
  33. static const u32 rtw8852c_backup_rf_regs[] = {
  34. 0xdf, 0x5f, 0x8f, 0x97, 0xa3, 0x5, 0x10005
  35. };
  36. #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
  37. #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
  38. #define RXK_GROUP_NR 4
  39. static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
  40. static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
  41. static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
  42. static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
  43. static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
  44. static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
  45. #define TXK_GROUP_NR 3
  46. static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
  47. static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
  48. static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
  49. static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
  50. static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
  51. static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
  52. static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
  53. static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
  54. static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
  55. static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
  56. static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
  57. static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
  58. static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
  59. {0x8190, 0x8194, 0x8198, 0x81a4},
  60. {0x81a8, 0x81c4, 0x81c8, 0x81e8},
  61. };
  62. static const u8 _dck_addr_bs[RF_PATH_NUM_8852C] = {0x0, 0x10};
  63. static const u8 _dck_addr[RF_PATH_NUM_8852C] = {0xc, 0x1c};
  64. static const struct rxck_def _ck480M = {0x8, 0x2, 0x3, 0xf, 0x0, 0x9};
  65. static const struct rxck_def _ck960M = {0x8, 0x2, 0x2, 0x8, 0x0, 0x9};
  66. static const struct rxck_def _ck1920M = {0x8, 0x0, 0x2, 0x4, 0x6, 0x9};
  67. static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  68. {
  69. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
  70. rtwdev->dbcc_en, phy_idx);
  71. if (!rtwdev->dbcc_en)
  72. return RF_AB;
  73. if (phy_idx == RTW89_PHY_0)
  74. return RF_A;
  75. else
  76. return RF_B;
  77. }
  78. static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
  79. {
  80. u32 i;
  81. for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  82. backup_bb_reg_val[i] =
  83. rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
  84. MASKDWORD);
  85. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  86. "[IQK]backup bb reg : %x, value =%x\n",
  87. rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
  88. }
  89. }
  90. static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
  91. u8 rf_path)
  92. {
  93. u32 i;
  94. for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  95. backup_rf_reg_val[i] =
  96. rtw89_read_rf(rtwdev, rf_path,
  97. rtw8852c_backup_rf_regs[i], RFREG_MASK);
  98. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  99. "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
  100. rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
  101. }
  102. }
  103. static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
  104. {
  105. u32 i;
  106. for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  107. rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
  108. MASKDWORD, backup_bb_reg_val[i]);
  109. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  110. "[IQK]restore bb reg : %x, value =%x\n",
  111. rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
  112. }
  113. }
  114. static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
  115. u8 rf_path)
  116. {
  117. u32 i;
  118. for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  119. rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
  120. RFREG_MASK, backup_rf_reg_val[i]);
  121. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  122. "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
  123. rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
  124. }
  125. }
  126. static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
  127. {
  128. u8 path;
  129. u32 rf_mode;
  130. int ret;
  131. for (path = 0; path < RF_PATH_MAX; path++) {
  132. if (!(kpath & BIT(path)))
  133. continue;
  134. ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
  135. 2, 5000, false, rtwdev, path, 0x00,
  136. RR_MOD_MASK);
  137. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  138. "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
  139. path, ret);
  140. }
  141. }
  142. static void _dack_dump(struct rtw89_dev *rtwdev)
  143. {
  144. struct rtw89_dack_info *dack = &rtwdev->dack;
  145. u8 i;
  146. u8 t;
  147. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  148. "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  149. dack->addck_d[0][0], dack->addck_d[0][1]);
  150. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  151. "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  152. dack->addck_d[1][0], dack->addck_d[1][1]);
  153. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  154. "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  155. dack->dadck_d[0][0], dack->dadck_d[0][1]);
  156. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  157. "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  158. dack->dadck_d[1][0], dack->dadck_d[1][1]);
  159. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  160. "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
  161. dack->biask_d[0][0], dack->biask_d[0][1]);
  162. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  163. "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
  164. dack->biask_d[1][0], dack->biask_d[1][1]);
  165. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
  166. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  167. t = dack->msbk_d[0][0][i];
  168. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  169. }
  170. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
  171. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  172. t = dack->msbk_d[0][1][i];
  173. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  174. }
  175. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
  176. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  177. t = dack->msbk_d[1][0][i];
  178. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  179. }
  180. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
  181. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  182. t = dack->msbk_d[1][1][i];
  183. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  184. }
  185. }
  186. static void _addck_backup(struct rtw89_dev *rtwdev)
  187. {
  188. struct rtw89_dack_info *dack = &rtwdev->dack;
  189. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
  190. dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
  191. B_ADDCKR0_A0);
  192. dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
  193. B_ADDCKR0_A1);
  194. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
  195. dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
  196. B_ADDCKR1_A0);
  197. dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
  198. B_ADDCKR1_A1);
  199. }
  200. static void _addck_reload(struct rtw89_dev *rtwdev)
  201. {
  202. struct rtw89_dack_info *dack = &rtwdev->dack;
  203. rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
  204. dack->addck_d[0][0]);
  205. rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
  206. dack->addck_d[0][1]);
  207. rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
  208. rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
  209. dack->addck_d[1][0]);
  210. rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
  211. dack->addck_d[1][1]);
  212. rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
  213. }
  214. static void _dack_backup_s0(struct rtw89_dev *rtwdev)
  215. {
  216. struct rtw89_dack_info *dack = &rtwdev->dack;
  217. u8 i;
  218. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  219. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  220. rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
  221. dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
  222. R_DACK_S0P2,
  223. B_DACK_S0M0);
  224. rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
  225. dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
  226. R_DACK_S0P3,
  227. B_DACK_S0M1);
  228. }
  229. dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
  230. B_DACK_BIAS00);
  231. dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
  232. B_DACK_BIAS01);
  233. dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
  234. B_DACK_DADCK00);
  235. dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
  236. B_DACK_DADCK01);
  237. }
  238. static void _dack_backup_s1(struct rtw89_dev *rtwdev)
  239. {
  240. struct rtw89_dack_info *dack = &rtwdev->dack;
  241. u8 i;
  242. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  243. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  244. rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
  245. dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
  246. R_DACK10S,
  247. B_DACK10S);
  248. rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
  249. dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
  250. R_DACK11S,
  251. B_DACK11S);
  252. }
  253. dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
  254. B_DACK_BIAS10);
  255. dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
  256. B_DACK_BIAS11);
  257. dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
  258. B_DACK_DADCK10);
  259. dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
  260. B_DACK_DADCK11);
  261. }
  262. static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
  263. enum rtw89_rf_path path, u8 index)
  264. {
  265. struct rtw89_dack_info *dack = &rtwdev->dack;
  266. u32 idx_offset, path_offset;
  267. u32 val32, offset, addr;
  268. u8 i;
  269. idx_offset = (index == 0 ? 0 : 0x14);
  270. path_offset = (path == RF_PATH_A ? 0 : 0x28);
  271. offset = idx_offset + path_offset;
  272. rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
  273. /* msbk_d: 15/14/13/12 */
  274. val32 = 0x0;
  275. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  276. val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
  277. addr = 0xc200 + offset;
  278. rtw89_phy_write32(rtwdev, addr, val32);
  279. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  280. rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  281. /* msbk_d: 11/10/9/8 */
  282. val32 = 0x0;
  283. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  284. val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
  285. addr = 0xc204 + offset;
  286. rtw89_phy_write32(rtwdev, addr, val32);
  287. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  288. rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  289. /* msbk_d: 7/6/5/4 */
  290. val32 = 0x0;
  291. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  292. val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
  293. addr = 0xc208 + offset;
  294. rtw89_phy_write32(rtwdev, addr, val32);
  295. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  296. rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  297. /* msbk_d: 3/2/1/0 */
  298. val32 = 0x0;
  299. for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  300. val32 |= dack->msbk_d[path][index][i] << (i * 8);
  301. addr = 0xc20c + offset;
  302. rtw89_phy_write32(rtwdev, addr, val32);
  303. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  304. rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  305. /* dadak_d/biask_d */
  306. val32 = (dack->biask_d[path][index] << 22) |
  307. (dack->dadck_d[path][index] << 14);
  308. addr = 0xc210 + offset;
  309. rtw89_phy_write32(rtwdev, addr, val32);
  310. rtw89_phy_write32_set(rtwdev, addr, BIT(0));
  311. }
  312. static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  313. {
  314. u8 i;
  315. for (i = 0; i < 2; i++)
  316. _dack_reload_by_path(rtwdev, path, i);
  317. }
  318. static void _addck(struct rtw89_dev *rtwdev)
  319. {
  320. struct rtw89_dack_info *dack = &rtwdev->dack;
  321. u32 val;
  322. int ret;
  323. /* S0 */
  324. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
  325. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
  326. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
  327. fsleep(1);
  328. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
  329. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
  330. 1, 10000, false, rtwdev, 0xc0fc, BIT(0));
  331. if (ret) {
  332. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
  333. dack->addck_timeout[0] = true;
  334. }
  335. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
  336. /* S1 */
  337. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
  338. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
  339. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
  340. udelay(1);
  341. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
  342. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
  343. 1, 10000, false, rtwdev, 0xc1fc, BIT(0));
  344. if (ret) {
  345. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
  346. dack->addck_timeout[0] = true;
  347. }
  348. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
  349. }
  350. static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
  351. {
  352. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  353. &rtw8852c_dack_reset_defs_a_tbl,
  354. &rtw8852c_dack_reset_defs_b_tbl);
  355. }
  356. enum adc_ck {
  357. ADC_NA = 0,
  358. ADC_480M = 1,
  359. ADC_960M = 2,
  360. ADC_1920M = 3,
  361. };
  362. enum dac_ck {
  363. DAC_40M = 0,
  364. DAC_80M = 1,
  365. DAC_120M = 2,
  366. DAC_160M = 3,
  367. DAC_240M = 4,
  368. DAC_320M = 5,
  369. DAC_480M = 6,
  370. DAC_960M = 7,
  371. };
  372. enum rf_mode {
  373. RF_SHUT_DOWN = 0x0,
  374. RF_STANDBY = 0x1,
  375. RF_TX = 0x2,
  376. RF_RX = 0x3,
  377. RF_TXIQK = 0x4,
  378. RF_DPK = 0x5,
  379. RF_RXK1 = 0x6,
  380. RF_RXK2 = 0x7,
  381. };
  382. static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
  383. enum dac_ck ck)
  384. {
  385. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
  386. if (!force)
  387. return;
  388. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
  389. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
  390. }
  391. static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
  392. enum adc_ck ck)
  393. {
  394. const struct rxck_def *def;
  395. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
  396. if (!force)
  397. return;
  398. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
  399. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
  400. switch (ck) {
  401. case ADC_480M:
  402. def = &_ck480M;
  403. break;
  404. case ADC_960M:
  405. def = &_ck960M;
  406. break;
  407. case ADC_1920M:
  408. default:
  409. def = &_ck1920M;
  410. break;
  411. }
  412. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, def->ctl);
  413. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, def->en);
  414. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, def->bw0);
  415. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, def->bw1);
  416. rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, def->mul);
  417. rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, def->lp);
  418. }
  419. static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
  420. {
  421. if (s0) {
  422. if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
  423. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
  424. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
  425. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
  426. return false;
  427. } else {
  428. if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
  429. rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
  430. rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
  431. rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
  432. return false;
  433. }
  434. return true;
  435. }
  436. static void _dack_s0(struct rtw89_dev *rtwdev)
  437. {
  438. struct rtw89_dack_info *dack = &rtwdev->dack;
  439. bool done;
  440. int ret;
  441. rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
  442. rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
  443. _dack_reset(rtwdev, RF_PATH_A);
  444. rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
  445. ret = read_poll_timeout_atomic(_check_dack_done, done, done,
  446. 1, 10000, false, rtwdev, true);
  447. if (ret) {
  448. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
  449. dack->msbk_timeout[0] = true;
  450. }
  451. rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
  452. rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
  453. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
  454. _dack_backup_s0(rtwdev);
  455. _dack_reload(rtwdev, RF_PATH_A);
  456. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
  457. }
  458. static void _dack_s1(struct rtw89_dev *rtwdev)
  459. {
  460. struct rtw89_dack_info *dack = &rtwdev->dack;
  461. bool done;
  462. int ret;
  463. rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
  464. rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
  465. _dack_reset(rtwdev, RF_PATH_B);
  466. rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
  467. ret = read_poll_timeout_atomic(_check_dack_done, done, done,
  468. 1, 10000, false, rtwdev, false);
  469. if (ret) {
  470. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
  471. dack->msbk_timeout[0] = true;
  472. }
  473. rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
  474. rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
  475. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
  476. _dack_backup_s1(rtwdev);
  477. _dack_reload(rtwdev, RF_PATH_B);
  478. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
  479. }
  480. static void _dack(struct rtw89_dev *rtwdev)
  481. {
  482. _dack_s0(rtwdev);
  483. _dack_s1(rtwdev);
  484. }
  485. static void _drck(struct rtw89_dev *rtwdev)
  486. {
  487. u32 val;
  488. int ret;
  489. rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
  490. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
  491. 1, 10000, false, rtwdev, 0xc0c8, BIT(3));
  492. if (ret)
  493. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
  494. rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
  495. val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
  496. rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
  497. rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
  498. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
  499. rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
  500. }
  501. static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
  502. {
  503. struct rtw89_dack_info *dack = &rtwdev->dack;
  504. u32 rf0_0, rf1_0;
  505. u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
  506. dack->dack_done = false;
  507. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
  508. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
  509. rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
  510. rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
  511. _drck(rtwdev);
  512. rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
  513. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
  514. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
  515. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
  516. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
  517. _addck(rtwdev);
  518. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
  519. _addck_backup(rtwdev);
  520. _addck_reload(rtwdev);
  521. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
  522. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
  523. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
  524. _dack(rtwdev);
  525. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
  526. _dack_dump(rtwdev);
  527. dack->dack_done = true;
  528. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
  529. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
  530. rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
  531. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
  532. dack->dack_cnt++;
  533. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
  534. }
  535. #define RTW8852C_NCTL_VER 0xd
  536. #define RTW8852C_IQK_VER 0x2a
  537. #define RTW8852C_IQK_SS 2
  538. #define RTW8852C_IQK_THR_REK 8
  539. #define RTW8852C_IQK_CFIR_GROUP_NR 4
  540. enum rtw8852c_iqk_type {
  541. ID_TXAGC,
  542. ID_G_FLOK_COARSE,
  543. ID_A_FLOK_COARSE,
  544. ID_G_FLOK_FINE,
  545. ID_A_FLOK_FINE,
  546. ID_FLOK_VBUFFER,
  547. ID_TXK,
  548. ID_RXAGC,
  549. ID_RXK,
  550. ID_NBTXK,
  551. ID_NBRXK,
  552. };
  553. static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
  554. {
  555. if (path == RF_PATH_A)
  556. rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
  557. else
  558. rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
  559. }
  560. static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
  561. {
  562. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  563. if (path == RF_PATH_A)
  564. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
  565. else
  566. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
  567. switch (iqk_info->iqk_bw[path]) {
  568. case RTW89_CHANNEL_WIDTH_20:
  569. case RTW89_CHANNEL_WIDTH_40:
  570. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
  571. rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
  572. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
  573. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
  574. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
  575. break;
  576. case RTW89_CHANNEL_WIDTH_80:
  577. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
  578. rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
  579. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
  580. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
  581. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
  582. break;
  583. case RTW89_CHANNEL_WIDTH_160:
  584. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
  585. rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
  586. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
  587. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
  588. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
  589. break;
  590. default:
  591. break;
  592. }
  593. rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
  594. if (path == RF_PATH_A)
  595. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
  596. else
  597. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
  598. }
  599. static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
  600. {
  601. u32 tmp;
  602. u32 val;
  603. int ret;
  604. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
  605. 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
  606. if (ret)
  607. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
  608. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
  609. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
  610. tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
  611. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  612. "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
  613. return false;
  614. }
  615. static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
  616. enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
  617. {
  618. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  619. u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
  620. u32 iqk_cmd;
  621. bool fail;
  622. switch (ktype) {
  623. case ID_TXAGC:
  624. iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
  625. break;
  626. case ID_A_FLOK_COARSE:
  627. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  628. iqk_cmd = 0x008 | (1 << (4 + path));
  629. break;
  630. case ID_G_FLOK_COARSE:
  631. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  632. iqk_cmd = 0x108 | (1 << (4 + path));
  633. break;
  634. case ID_A_FLOK_FINE:
  635. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  636. iqk_cmd = 0x508 | (1 << (4 + path));
  637. break;
  638. case ID_G_FLOK_FINE:
  639. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  640. iqk_cmd = 0x208 | (1 << (4 + path));
  641. break;
  642. case ID_FLOK_VBUFFER:
  643. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  644. iqk_cmd = 0x308 | (1 << (4 + path));
  645. break;
  646. case ID_TXK:
  647. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
  648. iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
  649. break;
  650. case ID_RXAGC:
  651. iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
  652. break;
  653. case ID_RXK:
  654. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  655. iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
  656. break;
  657. case ID_NBTXK:
  658. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
  659. iqk_cmd = 0x408 | (1 << (4 + path));
  660. break;
  661. case ID_NBRXK:
  662. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  663. iqk_cmd = 0x608 | (1 << (4 + path));
  664. break;
  665. default:
  666. return false;
  667. }
  668. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
  669. fsleep(15);
  670. fail = _iqk_check_cal(rtwdev, path, ktype);
  671. rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
  672. return fail;
  673. }
  674. static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
  675. enum rtw89_phy_idx phy_idx, u8 path)
  676. {
  677. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  678. bool fail;
  679. u32 tmp;
  680. u32 bkrf0;
  681. u8 gp;
  682. bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
  683. if (path == RF_PATH_B) {
  684. rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
  685. tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
  686. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
  687. tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
  688. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
  689. }
  690. switch (iqk_info->iqk_band[path]) {
  691. case RTW89_BAND_2G:
  692. default:
  693. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  694. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  695. rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
  696. break;
  697. case RTW89_BAND_5G:
  698. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  699. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  700. rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
  701. break;
  702. case RTW89_BAND_6G:
  703. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  704. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  705. rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
  706. break;
  707. }
  708. fsleep(10);
  709. for (gp = 0; gp < RXK_GROUP_NR; gp++) {
  710. switch (iqk_info->iqk_band[path]) {
  711. case RTW89_BAND_2G:
  712. default:
  713. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
  714. _rxk_g_idxrxgain[gp]);
  715. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
  716. _rxk_g_idxattc2[gp]);
  717. break;
  718. case RTW89_BAND_5G:
  719. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
  720. _rxk_a_idxrxgain[gp]);
  721. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
  722. _rxk_a_idxattc2[gp]);
  723. break;
  724. case RTW89_BAND_6G:
  725. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
  726. _rxk_a6_idxrxgain[gp]);
  727. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
  728. _rxk_a6_idxattc2[gp]);
  729. break;
  730. }
  731. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  732. B_CFIR_LUT_SEL, 0x1);
  733. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  734. B_CFIR_LUT_SET, 0x0);
  735. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  736. B_CFIR_LUT_GP_V1, gp);
  737. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
  738. }
  739. if (path == RF_PATH_B)
  740. rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
  741. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
  742. if (fail) {
  743. iqk_info->nb_rxcfir[path] = 0x40000002;
  744. iqk_info->is_wb_rxiqk[path] = false;
  745. } else {
  746. iqk_info->nb_rxcfir[path] = 0x40000000;
  747. iqk_info->is_wb_rxiqk[path] = true;
  748. }
  749. return false;
  750. }
  751. static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
  752. enum rtw89_phy_idx phy_idx, u8 path)
  753. {
  754. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  755. bool fail;
  756. u32 tmp;
  757. u32 bkrf0;
  758. u8 gp = 0x2;
  759. bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
  760. if (path == RF_PATH_B) {
  761. rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
  762. tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
  763. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
  764. tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
  765. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
  766. }
  767. switch (iqk_info->iqk_band[path]) {
  768. case RTW89_BAND_2G:
  769. default:
  770. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  771. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  772. rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
  773. break;
  774. case RTW89_BAND_5G:
  775. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  776. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  777. rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
  778. break;
  779. case RTW89_BAND_6G:
  780. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  781. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  782. rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
  783. break;
  784. }
  785. fsleep(10);
  786. switch (iqk_info->iqk_band[path]) {
  787. case RTW89_BAND_2G:
  788. default:
  789. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
  790. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
  791. break;
  792. case RTW89_BAND_5G:
  793. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
  794. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
  795. break;
  796. case RTW89_BAND_6G:
  797. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
  798. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
  799. break;
  800. }
  801. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
  802. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
  803. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
  804. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
  805. if (path == RF_PATH_B)
  806. rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
  807. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
  808. if (fail)
  809. iqk_info->nb_rxcfir[path] =
  810. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
  811. MASKDWORD) | 0x2;
  812. else
  813. iqk_info->nb_rxcfir[path] = 0x40000002;
  814. iqk_info->is_wb_rxiqk[path] = false;
  815. return fail;
  816. }
  817. static bool _txk_group_sel(struct rtw89_dev *rtwdev,
  818. enum rtw89_phy_idx phy_idx, u8 path)
  819. {
  820. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  821. bool fail;
  822. u8 gp;
  823. for (gp = 0; gp < TXK_GROUP_NR; gp++) {
  824. switch (iqk_info->iqk_band[path]) {
  825. case RTW89_BAND_2G:
  826. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  827. _txk_g_power_range[gp]);
  828. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  829. _txk_g_track_range[gp]);
  830. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  831. _txk_g_gain_bb[gp]);
  832. rtw89_phy_write32_mask(rtwdev,
  833. R_KIP_IQP + (path << 8),
  834. MASKDWORD, _txk_g_itqt[gp]);
  835. break;
  836. case RTW89_BAND_5G:
  837. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  838. _txk_a_power_range[gp]);
  839. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  840. _txk_a_track_range[gp]);
  841. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  842. _txk_a_gain_bb[gp]);
  843. rtw89_phy_write32_mask(rtwdev,
  844. R_KIP_IQP + (path << 8),
  845. MASKDWORD, _txk_a_itqt[gp]);
  846. break;
  847. case RTW89_BAND_6G:
  848. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  849. _txk_a6_power_range[gp]);
  850. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  851. _txk_a6_track_range[gp]);
  852. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  853. _txk_a6_gain_bb[gp]);
  854. rtw89_phy_write32_mask(rtwdev,
  855. R_KIP_IQP + (path << 8),
  856. MASKDWORD, _txk_a6_itqt[gp]);
  857. break;
  858. default:
  859. break;
  860. }
  861. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  862. B_CFIR_LUT_SEL, 0x1);
  863. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  864. B_CFIR_LUT_SET, 0x1);
  865. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  866. B_CFIR_LUT_G2, 0x0);
  867. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  868. B_CFIR_LUT_GP, gp + 1);
  869. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
  870. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  871. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
  872. }
  873. if (fail) {
  874. iqk_info->nb_txcfir[path] = 0x40000002;
  875. iqk_info->is_wb_txiqk[path] = false;
  876. } else {
  877. iqk_info->nb_txcfir[path] = 0x40000000;
  878. iqk_info->is_wb_txiqk[path] = true;
  879. }
  880. return fail;
  881. }
  882. static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
  883. enum rtw89_phy_idx phy_idx, u8 path)
  884. {
  885. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  886. bool fail;
  887. u8 gp = 0x2;
  888. switch (iqk_info->iqk_band[path]) {
  889. case RTW89_BAND_2G:
  890. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
  891. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
  892. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
  893. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  894. MASKDWORD, _txk_g_itqt[gp]);
  895. break;
  896. case RTW89_BAND_5G:
  897. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
  898. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
  899. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
  900. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  901. MASKDWORD, _txk_a_itqt[gp]);
  902. break;
  903. case RTW89_BAND_6G:
  904. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
  905. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
  906. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
  907. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  908. MASKDWORD, _txk_a6_itqt[gp]);
  909. break;
  910. default:
  911. break;
  912. }
  913. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
  914. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
  915. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
  916. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
  917. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
  918. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  919. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
  920. if (!fail)
  921. iqk_info->nb_txcfir[path] =
  922. rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
  923. MASKDWORD) | 0x2;
  924. else
  925. iqk_info->nb_txcfir[path] = 0x40000002;
  926. iqk_info->is_wb_txiqk[path] = false;
  927. return fail;
  928. }
  929. static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
  930. {
  931. struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
  932. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  933. u8 idx = rfk_mcc->table_idx;
  934. bool is_fail1, is_fail2;
  935. u32 val;
  936. u32 core_i;
  937. u32 core_q;
  938. u32 vbuff_i;
  939. u32 vbuff_q;
  940. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  941. val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
  942. core_i = FIELD_GET(RR_TXMO_COI, val);
  943. core_q = FIELD_GET(RR_TXMO_COQ, val);
  944. if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
  945. is_fail1 = true;
  946. else
  947. is_fail1 = false;
  948. iqk_info->lok_idac[idx][path] = val;
  949. val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
  950. vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
  951. vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
  952. if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
  953. is_fail2 = true;
  954. else
  955. is_fail2 = false;
  956. iqk_info->lok_vbuf[idx][path] = val;
  957. return is_fail1 || is_fail2;
  958. }
  959. static bool _iqk_lok(struct rtw89_dev *rtwdev,
  960. enum rtw89_phy_idx phy_idx, u8 path)
  961. {
  962. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  963. u8 tmp_id = 0x0;
  964. bool fail = false;
  965. bool tmp = false;
  966. /* Step 0: Init RF gain & tone idx= 8.25Mhz */
  967. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
  968. /* Step 1 START: _lok_coarse_fine_wi_swap */
  969. switch (iqk_info->iqk_band[path]) {
  970. case RTW89_BAND_2G:
  971. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
  972. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  973. B_KIP_IQP_IQSW, 0x9);
  974. tmp_id = ID_G_FLOK_COARSE;
  975. break;
  976. case RTW89_BAND_5G:
  977. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
  978. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  979. B_KIP_IQP_IQSW, 0x9);
  980. tmp_id = ID_A_FLOK_COARSE;
  981. break;
  982. case RTW89_BAND_6G:
  983. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
  984. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  985. B_KIP_IQP_IQSW, 0x9);
  986. tmp_id = ID_A_FLOK_COARSE;
  987. break;
  988. default:
  989. break;
  990. }
  991. tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
  992. iqk_info->lok_cor_fail[0][path] = tmp;
  993. /* Step 2 */
  994. switch (iqk_info->iqk_band[path]) {
  995. case RTW89_BAND_2G:
  996. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  997. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  998. B_KIP_IQP_IQSW, 0x1b);
  999. break;
  1000. case RTW89_BAND_5G:
  1001. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1002. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1003. B_KIP_IQP_IQSW, 0x1b);
  1004. break;
  1005. case RTW89_BAND_6G:
  1006. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1007. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1008. B_KIP_IQP_IQSW, 0x1b);
  1009. break;
  1010. default:
  1011. break;
  1012. }
  1013. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
  1014. /* Step 3 */
  1015. switch (iqk_info->iqk_band[path]) {
  1016. case RTW89_BAND_2G:
  1017. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
  1018. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1019. B_KIP_IQP_IQSW, 0x9);
  1020. tmp_id = ID_G_FLOK_FINE;
  1021. break;
  1022. case RTW89_BAND_5G:
  1023. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
  1024. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1025. B_KIP_IQP_IQSW, 0x9);
  1026. tmp_id = ID_A_FLOK_FINE;
  1027. break;
  1028. case RTW89_BAND_6G:
  1029. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
  1030. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1031. B_KIP_IQP_IQSW, 0x9);
  1032. tmp_id = ID_A_FLOK_FINE;
  1033. break;
  1034. default:
  1035. break;
  1036. }
  1037. tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
  1038. iqk_info->lok_fin_fail[0][path] = tmp;
  1039. /* Step 4 large rf gain */
  1040. switch (iqk_info->iqk_band[path]) {
  1041. case RTW89_BAND_2G:
  1042. default:
  1043. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1044. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1045. B_KIP_IQP_IQSW, 0x1b);
  1046. break;
  1047. case RTW89_BAND_5G:
  1048. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1049. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1050. B_KIP_IQP_IQSW, 0x1b);
  1051. break;
  1052. case RTW89_BAND_6G:
  1053. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1054. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1055. B_KIP_IQP_IQSW, 0x1b);
  1056. break;
  1057. }
  1058. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
  1059. fail = _lok_finetune_check(rtwdev, path);
  1060. return fail;
  1061. }
  1062. static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
  1063. {
  1064. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1065. switch (iqk_info->iqk_band[path]) {
  1066. case RTW89_BAND_2G:
  1067. default:
  1068. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
  1069. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
  1070. rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
  1071. rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
  1072. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
  1073. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1074. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1075. 0x403e0 | iqk_info->syn1to2);
  1076. fsleep(10);
  1077. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
  1078. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
  1079. break;
  1080. case RTW89_BAND_5G:
  1081. rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
  1082. rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
  1083. rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
  1084. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
  1085. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1086. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1087. 0x403e0 | iqk_info->syn1to2);
  1088. fsleep(10);
  1089. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
  1090. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
  1091. break;
  1092. case RTW89_BAND_6G:
  1093. rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
  1094. rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
  1095. rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
  1096. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
  1097. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1098. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1099. 0x403e0 | iqk_info->syn1to2);
  1100. fsleep(10);
  1101. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
  1102. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
  1103. break;
  1104. }
  1105. }
  1106. static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  1107. u8 path)
  1108. {
  1109. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1110. u32 tmp;
  1111. bool flag;
  1112. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %lu\n", path,
  1113. ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]));
  1114. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
  1115. iqk_info->lok_cor_fail[0][path]);
  1116. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
  1117. iqk_info->lok_fin_fail[0][path]);
  1118. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
  1119. iqk_info->iqk_tx_fail[0][path]);
  1120. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
  1121. iqk_info->iqk_rx_fail[0][path]);
  1122. flag = iqk_info->lok_cor_fail[0][path];
  1123. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
  1124. flag = iqk_info->lok_fin_fail[0][path];
  1125. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
  1126. flag = iqk_info->iqk_tx_fail[0][path];
  1127. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
  1128. flag = iqk_info->iqk_rx_fail[0][path];
  1129. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
  1130. tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
  1131. iqk_info->bp_iqkenable[path] = tmp;
  1132. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  1133. iqk_info->bp_txkresult[path] = tmp;
  1134. tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
  1135. iqk_info->bp_rxkresult[path] = tmp;
  1136. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
  1137. iqk_info->iqk_times);
  1138. tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
  1139. if (tmp != 0x0)
  1140. iqk_info->iqk_fail_cnt++;
  1141. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
  1142. iqk_info->iqk_fail_cnt);
  1143. }
  1144. static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  1145. {
  1146. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1147. _iqk_txk_setting(rtwdev, path);
  1148. iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
  1149. if (iqk_info->is_nbiqk)
  1150. iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
  1151. else
  1152. iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
  1153. _iqk_rxk_setting(rtwdev, path);
  1154. if (iqk_info->is_nbiqk)
  1155. iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
  1156. else
  1157. iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
  1158. _iqk_info_iqk(rtwdev, phy_idx, path);
  1159. }
  1160. static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
  1161. enum rtw89_phy_idx phy, u8 path)
  1162. {
  1163. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1164. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1165. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  1166. iqk_info->iqk_band[path] = chan->band_type;
  1167. iqk_info->iqk_bw[path] = chan->band_width;
  1168. iqk_info->iqk_ch[path] = chan->channel;
  1169. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1170. "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
  1171. iqk_info->iqk_band[path]);
  1172. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
  1173. path, iqk_info->iqk_bw[path]);
  1174. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
  1175. path, iqk_info->iqk_ch[path]);
  1176. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1177. "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
  1178. rtwdev->dbcc_en ? "on" : "off",
  1179. iqk_info->iqk_band[path] == 0 ? "2G" :
  1180. iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
  1181. iqk_info->iqk_ch[path],
  1182. iqk_info->iqk_bw[path] == 0 ? "20M" :
  1183. iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
  1184. if (!rtwdev->dbcc_en)
  1185. iqk_info->syn1to2 = 0x1;
  1186. else
  1187. iqk_info->syn1to2 = 0x3;
  1188. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
  1189. rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
  1190. iqk_info->iqk_band[path]);
  1191. rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
  1192. iqk_info->iqk_bw[path]);
  1193. rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
  1194. iqk_info->iqk_ch[path]);
  1195. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
  1196. }
  1197. static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  1198. u8 path)
  1199. {
  1200. _iqk_by_path(rtwdev, phy_idx, path);
  1201. }
  1202. static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
  1203. {
  1204. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1205. bool fail;
  1206. rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
  1207. iqk_info->nb_txcfir[path]);
  1208. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
  1209. iqk_info->nb_rxcfir[path]);
  1210. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
  1211. 0x00001219 + (path << 4));
  1212. fsleep(200);
  1213. fail = _iqk_check_cal(rtwdev, path, 0x12);
  1214. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail);
  1215. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  1216. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
  1217. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
  1218. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
  1219. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  1220. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
  1221. }
  1222. static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
  1223. enum rtw89_phy_idx phy_idx, u8 path)
  1224. {
  1225. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  1226. &rtw8852c_iqk_afebb_restore_defs_a_tbl,
  1227. &rtw8852c_iqk_afebb_restore_defs_b_tbl);
  1228. rtw8852c_disable_rxagc(rtwdev, path, 0x1);
  1229. }
  1230. static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
  1231. {
  1232. struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
  1233. u8 idx = 0;
  1234. idx = rfk_mcc->table_idx;
  1235. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
  1236. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
  1237. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1238. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
  1239. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
  1240. }
  1241. static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
  1242. enum rtw89_phy_idx phy_idx, u8 path)
  1243. {
  1244. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
  1245. /* 01_BB_AFE_for DPK_S0_20210820 */
  1246. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
  1247. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
  1248. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
  1249. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
  1250. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
  1251. /* disable rxgac */
  1252. rtw8852c_disable_rxagc(rtwdev, path, 0x0);
  1253. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
  1254. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
  1255. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
  1256. rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
  1257. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
  1258. rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
  1259. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
  1260. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
  1261. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
  1262. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
  1263. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
  1264. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
  1265. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
  1266. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
  1267. }
  1268. static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  1269. {
  1270. u32 rf_reg5, rck_val = 0;
  1271. u32 val;
  1272. int ret;
  1273. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
  1274. rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
  1275. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1276. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  1277. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
  1278. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
  1279. /* RCK trigger */
  1280. rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
  1281. ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
  1282. false, rtwdev, path, 0x1c, BIT(3));
  1283. if (ret)
  1284. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
  1285. rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
  1286. rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
  1287. rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
  1288. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1289. "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
  1290. rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
  1291. rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
  1292. }
  1293. static void _iqk_init(struct rtw89_dev *rtwdev)
  1294. {
  1295. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1296. u8 ch, path;
  1297. rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
  1298. if (iqk_info->is_iqk_init)
  1299. return;
  1300. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  1301. iqk_info->is_iqk_init = true;
  1302. iqk_info->is_nbiqk = false;
  1303. iqk_info->iqk_fft_en = false;
  1304. iqk_info->iqk_sram_en = false;
  1305. iqk_info->iqk_cfir_en = false;
  1306. iqk_info->iqk_xym_en = false;
  1307. iqk_info->iqk_times = 0x0;
  1308. for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
  1309. iqk_info->iqk_channel[ch] = 0x0;
  1310. for (path = 0; path < RTW8852C_IQK_SS; path++) {
  1311. iqk_info->lok_cor_fail[ch][path] = false;
  1312. iqk_info->lok_fin_fail[ch][path] = false;
  1313. iqk_info->iqk_tx_fail[ch][path] = false;
  1314. iqk_info->iqk_rx_fail[ch][path] = false;
  1315. iqk_info->iqk_mcc_ch[ch][path] = 0x0;
  1316. iqk_info->iqk_table_idx[path] = 0x0;
  1317. }
  1318. }
  1319. }
  1320. static void _doiqk(struct rtw89_dev *rtwdev, bool force,
  1321. enum rtw89_phy_idx phy_idx, u8 path)
  1322. {
  1323. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1324. u32 backup_bb_val[BACKUP_BB_REGS_NR];
  1325. u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
  1326. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
  1327. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
  1328. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1329. "[IQK]==========IQK start!!!!!==========\n");
  1330. iqk_info->iqk_times++;
  1331. iqk_info->version = RTW8852C_IQK_VER;
  1332. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
  1333. _iqk_get_ch_info(rtwdev, phy_idx, path);
  1334. _rfk_backup_bb_reg(rtwdev, backup_bb_val);
  1335. _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
  1336. _iqk_macbb_setting(rtwdev, phy_idx, path);
  1337. _iqk_preset(rtwdev, path);
  1338. _iqk_start_iqk(rtwdev, phy_idx, path);
  1339. _iqk_restore(rtwdev, path);
  1340. _iqk_afebb_restore(rtwdev, phy_idx, path);
  1341. _rfk_restore_bb_reg(rtwdev, backup_bb_val);
  1342. _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
  1343. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
  1344. }
  1345. static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
  1346. {
  1347. switch (_kpath(rtwdev, phy_idx)) {
  1348. case RF_A:
  1349. _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
  1350. break;
  1351. case RF_B:
  1352. _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
  1353. break;
  1354. case RF_AB:
  1355. _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
  1356. _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
  1357. break;
  1358. default:
  1359. break;
  1360. }
  1361. }
  1362. static void _rx_dck_value_rewrite(struct rtw89_dev *rtwdev, u8 path, u8 addr,
  1363. u8 val_i, u8 val_q)
  1364. {
  1365. u32 ofst_val;
  1366. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1367. "[RX_DCK] rewrite val_i = 0x%x, val_q = 0x%x\n", val_i, val_q);
  1368. /* val_i and val_q are 7 bits, and target is 6 bits. */
  1369. ofst_val = u32_encode_bits(val_q >> 1, RR_LUTWD0_MB) |
  1370. u32_encode_bits(val_i >> 1, RR_LUTWD0_LB);
  1371. rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x1);
  1372. rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x1);
  1373. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x1);
  1374. rtw89_write_rf(rtwdev, path, RR_LUTWA, MASKBYTE0, addr);
  1375. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
  1376. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
  1377. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
  1378. rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x0);
  1379. rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x0);
  1380. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] Final val_i = 0x%x, val_q = 0x%x\n",
  1381. u32_get_bits(ofst_val, RR_LUTWD0_LB) << 1,
  1382. u32_get_bits(ofst_val, RR_LUTWD0_MB) << 1);
  1383. }
  1384. static bool _rx_dck_rek_check(struct rtw89_dev *rtwdev, u8 path)
  1385. {
  1386. u8 i_even_bs, q_even_bs;
  1387. u8 i_odd_bs, q_odd_bs;
  1388. u8 i_even, q_even;
  1389. u8 i_odd, q_odd;
  1390. const u8 th = 10;
  1391. u8 i;
  1392. for (i = 0; i < RF_PATH_NUM_8852C; i++) {
  1393. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
  1394. i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1395. q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1396. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1397. "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
  1398. _dck_addr_bs[i], i_even_bs, q_even_bs);
  1399. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
  1400. i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1401. q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1402. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1403. "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
  1404. _dck_addr[i], i_even, q_even);
  1405. if (abs(i_even_bs - i_even) > th || abs(q_even_bs - q_even) > th)
  1406. return true;
  1407. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
  1408. i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1409. q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1410. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1411. "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
  1412. _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
  1413. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
  1414. i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1415. q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1416. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1417. "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
  1418. _dck_addr[i] + 1, i_odd, q_odd);
  1419. if (abs(i_odd_bs - i_odd) > th || abs(q_odd_bs - q_odd) > th)
  1420. return true;
  1421. }
  1422. return false;
  1423. }
  1424. static void _rx_dck_fix_if_need(struct rtw89_dev *rtwdev, u8 path, u8 addr,
  1425. u8 val_i_bs, u8 val_q_bs, u8 val_i, u8 val_q)
  1426. {
  1427. const u8 th = 10;
  1428. if ((abs(val_i_bs - val_i) < th) && (abs(val_q_bs - val_q) <= th)) {
  1429. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] offset check PASS!!\n");
  1430. return;
  1431. }
  1432. if (abs(val_i_bs - val_i) > th) {
  1433. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1434. "[RX_DCK] val_i over TH (0x%x / 0x%x)\n", val_i_bs, val_i);
  1435. val_i = val_i_bs;
  1436. }
  1437. if (abs(val_q_bs - val_q) > th) {
  1438. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1439. "[RX_DCK] val_q over TH (0x%x / 0x%x)\n", val_q_bs, val_q);
  1440. val_q = val_q_bs;
  1441. }
  1442. _rx_dck_value_rewrite(rtwdev, path, addr, val_i, val_q);
  1443. }
  1444. static void _rx_dck_recover(struct rtw89_dev *rtwdev, u8 path)
  1445. {
  1446. u8 i_even_bs, q_even_bs;
  1447. u8 i_odd_bs, q_odd_bs;
  1448. u8 i_even, q_even;
  1449. u8 i_odd, q_odd;
  1450. u8 i;
  1451. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ===> recovery\n");
  1452. for (i = 0; i < RF_PATH_NUM_8852C; i++) {
  1453. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
  1454. i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1455. q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1456. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
  1457. i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1458. q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1459. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1460. "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
  1461. _dck_addr_bs[i], i_even_bs, q_even_bs);
  1462. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
  1463. i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1464. q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1465. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1466. "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
  1467. _dck_addr[i], i_even, q_even);
  1468. _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i],
  1469. i_even_bs, q_even_bs, i_even, q_even);
  1470. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1471. "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
  1472. _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
  1473. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
  1474. i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
  1475. q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
  1476. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1477. "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
  1478. _dck_addr[i] + 1, i_odd, q_odd);
  1479. _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i] + 1,
  1480. i_odd_bs, q_odd_bs, i_odd, q_odd);
  1481. }
  1482. }
  1483. static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
  1484. {
  1485. int ret;
  1486. u32 val;
  1487. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
  1488. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
  1489. ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
  1490. 2, 2000, false, rtwdev, path,
  1491. RR_DCK1, RR_DCK1_DONE);
  1492. if (ret)
  1493. rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
  1494. else
  1495. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
  1496. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
  1497. }
  1498. static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
  1499. bool is_afe)
  1500. {
  1501. u8 res;
  1502. rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
  1503. _rx_dck_toggle(rtwdev, path);
  1504. if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
  1505. return;
  1506. res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
  1507. if (res > 1) {
  1508. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
  1509. _rx_dck_toggle(rtwdev, path);
  1510. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
  1511. }
  1512. }
  1513. static
  1514. u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
  1515. {
  1516. u8 target_ch = 0;
  1517. if (chan->band_type == RTW89_BAND_5G) {
  1518. if (chan->channel >= 36 && chan->channel <= 64) {
  1519. target_ch = 100;
  1520. } else if (chan->channel >= 100 && chan->channel <= 144) {
  1521. target_ch = chan->channel + 32;
  1522. if (target_ch > 144)
  1523. target_ch = chan->channel + 33;
  1524. } else if (chan->channel >= 149 && chan->channel <= 177) {
  1525. target_ch = chan->channel - 33;
  1526. }
  1527. } else if (chan->band_type == RTW89_BAND_6G) {
  1528. if (chan->channel >= 1 && chan->channel <= 125)
  1529. target_ch = chan->channel + 32;
  1530. else
  1531. target_ch = chan->channel - 32;
  1532. } else {
  1533. target_ch = chan->channel;
  1534. }
  1535. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1536. "[RX_DCK] cur_ch / target_ch = %d / %d\n",
  1537. chan->channel, target_ch);
  1538. return target_ch;
  1539. }
  1540. #define RTW8852C_RF_REL_VERSION 34
  1541. #define RTW8852C_DPK_VER 0xf
  1542. #define RTW8852C_DPK_TH_AVG_NUM 4
  1543. #define RTW8852C_DPK_RF_PATH 2
  1544. #define RTW8852C_DPK_KIP_REG_NUM 7
  1545. #define RTW8852C_DPK_RXSRAM_DBG 0
  1546. enum rtw8852c_dpk_id {
  1547. LBK_RXIQK = 0x06,
  1548. SYNC = 0x10,
  1549. MDPK_IDL = 0x11,
  1550. MDPK_MPA = 0x12,
  1551. GAIN_LOSS = 0x13,
  1552. GAIN_CAL = 0x14,
  1553. DPK_RXAGC = 0x15,
  1554. KIP_PRESET = 0x16,
  1555. KIP_RESTORE = 0x17,
  1556. DPK_TXAGC = 0x19,
  1557. D_KIP_PRESET = 0x28,
  1558. D_TXAGC = 0x29,
  1559. D_RXAGC = 0x2a,
  1560. D_SYNC = 0x2b,
  1561. D_GAIN_LOSS = 0x2c,
  1562. D_MDPK_IDL = 0x2d,
  1563. D_GAIN_NORM = 0x2f,
  1564. D_KIP_THERMAL = 0x30,
  1565. D_KIP_RESTORE = 0x31
  1566. };
  1567. #define DPK_TXAGC_LOWER 0x2e
  1568. #define DPK_TXAGC_UPPER 0x3f
  1569. #define DPK_TXAGC_INVAL 0xff
  1570. enum dpk_agc_step {
  1571. DPK_AGC_STEP_SYNC_DGAIN,
  1572. DPK_AGC_STEP_GAIN_LOSS_IDX,
  1573. DPK_AGC_STEP_GL_GT_CRITERION,
  1574. DPK_AGC_STEP_GL_LT_CRITERION,
  1575. DPK_AGC_STEP_SET_TX_GAIN,
  1576. };
  1577. enum dpk_pas_result {
  1578. DPK_PAS_NOR,
  1579. DPK_PAS_GT,
  1580. DPK_PAS_LT,
  1581. };
  1582. static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
  1583. enum rtw89_rf_path path, bool is_bybb)
  1584. {
  1585. if (is_bybb)
  1586. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
  1587. else
  1588. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1589. }
  1590. static void _dpk_onoff(struct rtw89_dev *rtwdev,
  1591. enum rtw89_rf_path path, bool off);
  1592. static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
  1593. u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
  1594. {
  1595. u8 i;
  1596. for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
  1597. reg_bkup[path][i] =
  1598. rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
  1599. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
  1600. reg[i] + (path << 8), reg_bkup[path][i]);
  1601. }
  1602. }
  1603. static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
  1604. u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
  1605. {
  1606. u8 i;
  1607. for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
  1608. rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
  1609. MASKDWORD, reg_bkup[path][i]);
  1610. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
  1611. reg[i] + (path << 8), reg_bkup[path][i]);
  1612. }
  1613. }
  1614. static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1615. enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
  1616. {
  1617. u16 dpk_cmd;
  1618. u32 val;
  1619. int ret;
  1620. dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
  1621. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
  1622. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
  1623. 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
  1624. udelay(10);
  1625. rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
  1626. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1627. "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
  1628. id == 0x06 ? "LBK_RXIQK" :
  1629. id == 0x10 ? "SYNC" :
  1630. id == 0x11 ? "MDPK_IDL" :
  1631. id == 0x12 ? "MDPK_MPA" :
  1632. id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
  1633. dpk_cmd, ret);
  1634. if (ret) {
  1635. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1636. "[DPK] one-shot over 20ms!!!!\n");
  1637. return 1;
  1638. }
  1639. return 0;
  1640. }
  1641. static void _dpk_information(struct rtw89_dev *rtwdev,
  1642. enum rtw89_phy_idx phy,
  1643. enum rtw89_rf_path path)
  1644. {
  1645. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1646. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1647. u8 kidx = dpk->cur_idx[path];
  1648. dpk->bp[path][kidx].band = chan->band_type;
  1649. dpk->bp[path][kidx].ch = chan->channel;
  1650. dpk->bp[path][kidx].bw = chan->band_width;
  1651. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1652. "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
  1653. path, dpk->cur_idx[path], phy,
  1654. rtwdev->is_tssi_mode[path] ? "on" : "off",
  1655. rtwdev->dbcc_en ? "on" : "off",
  1656. dpk->bp[path][kidx].band == 0 ? "2G" :
  1657. dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
  1658. dpk->bp[path][kidx].ch,
  1659. dpk->bp[path][kidx].bw == 0 ? "20M" :
  1660. dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
  1661. }
  1662. static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
  1663. enum rtw89_phy_idx phy,
  1664. enum rtw89_rf_path path, u8 kpath)
  1665. {
  1666. /*1. Keep ADC_fifo reset*/
  1667. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
  1668. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
  1669. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
  1670. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
  1671. /*2. BB for IQK DBG mode*/
  1672. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
  1673. /*3.Set DAC clk*/
  1674. rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
  1675. /*4. Set ADC clk*/
  1676. rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
  1677. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
  1678. B_P0_NRBW_DBG, 0x1);
  1679. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
  1680. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
  1681. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
  1682. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
  1683. /*5. ADDA fifo rst*/
  1684. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
  1685. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
  1686. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
  1687. }
  1688. static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
  1689. {
  1690. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
  1691. B_P0_NRBW_DBG, 0x0);
  1692. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
  1693. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
  1694. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
  1695. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
  1696. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
  1697. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
  1698. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
  1699. rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
  1700. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
  1701. }
  1702. static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
  1703. enum rtw89_rf_path path, bool is_pause)
  1704. {
  1705. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
  1706. B_P0_TSSI_TRK_EN, is_pause);
  1707. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
  1708. is_pause ? "pause" : "resume");
  1709. }
  1710. static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
  1711. {
  1712. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
  1713. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
  1714. ctrl_by_kip ? "KIP" : "BB");
  1715. }
  1716. static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
  1717. {
  1718. rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
  1719. rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
  1720. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n",
  1721. path, force ? "on" : "off");
  1722. }
  1723. static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1724. enum rtw89_rf_path path)
  1725. {
  1726. _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
  1727. _dpk_kip_control_rfc(rtwdev, path, false);
  1728. _dpk_txpwr_bb_force(rtwdev, path, false);
  1729. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
  1730. }
  1731. static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
  1732. enum rtw89_phy_idx phy,
  1733. enum rtw89_rf_path path)
  1734. {
  1735. #define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
  1736. u8 cur_rxbb;
  1737. u32 rf_11, reg_81cc;
  1738. rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
  1739. rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
  1740. _dpk_kip_control_rfc(rtwdev, path, false);
  1741. cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
  1742. rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
  1743. reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
  1744. B_KIP_IQP_SW);
  1745. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
  1746. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
  1747. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
  1748. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
  1749. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
  1750. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
  1751. _dpk_kip_control_rfc(rtwdev, path, true);
  1752. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
  1753. _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
  1754. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
  1755. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
  1756. _dpk_kip_control_rfc(rtwdev, path, false);
  1757. rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
  1758. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
  1759. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
  1760. rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
  1761. rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
  1762. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
  1763. _dpk_kip_control_rfc(rtwdev, path, true);
  1764. }
  1765. static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
  1766. enum rtw89_rf_path path, u8 kidx)
  1767. {
  1768. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1769. if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
  1770. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1771. 0x50121 | BIT(rtwdev->dbcc_en));
  1772. rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
  1773. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
  1774. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
  1775. rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
  1776. rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
  1777. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1778. "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
  1779. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
  1780. rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
  1781. rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
  1782. rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
  1783. rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
  1784. rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
  1785. } else {
  1786. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
  1787. 0x50101 | BIT(rtwdev->dbcc_en));
  1788. rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
  1789. if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
  1790. rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
  1791. rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
  1792. rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
  1793. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
  1794. rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
  1795. rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
  1796. rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
  1797. if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
  1798. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
  1799. }
  1800. }
  1801. static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
  1802. {
  1803. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1804. if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
  1805. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
  1806. rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
  1807. } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
  1808. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
  1809. rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
  1810. } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
  1811. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
  1812. rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
  1813. } else {
  1814. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
  1815. rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
  1816. }
  1817. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
  1818. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
  1819. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
  1820. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
  1821. }
  1822. static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
  1823. {
  1824. #define DPK_SYNC_TH_DC_I 200
  1825. #define DPK_SYNC_TH_DC_Q 200
  1826. #define DPK_SYNC_TH_CORR 170
  1827. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1828. u16 dc_i, dc_q;
  1829. u8 corr_val, corr_idx, rxbb;
  1830. u8 rxbb_ov;
  1831. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
  1832. corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
  1833. corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
  1834. dpk->corr_idx[path][kidx] = corr_idx;
  1835. dpk->corr_val[path][kidx] = corr_val;
  1836. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
  1837. dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  1838. dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
  1839. dc_i = abs(sign_extend32(dc_i, 11));
  1840. dc_q = abs(sign_extend32(dc_q, 11));
  1841. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1842. "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
  1843. path, corr_idx, corr_val, dc_i, dc_q);
  1844. dpk->dc_i[path][kidx] = dc_i;
  1845. dpk->dc_q[path][kidx] = dc_q;
  1846. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
  1847. rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
  1848. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
  1849. rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
  1850. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1851. "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
  1852. path, rxbb,
  1853. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
  1854. rxbb_ov);
  1855. if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
  1856. corr_val < DPK_SYNC_TH_CORR)
  1857. return true;
  1858. else
  1859. return false;
  1860. }
  1861. static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
  1862. {
  1863. u16 dgain = 0x0;
  1864. rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
  1865. dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  1866. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
  1867. return dgain;
  1868. }
  1869. static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
  1870. {
  1871. u8 result;
  1872. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
  1873. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
  1874. result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
  1875. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
  1876. return result;
  1877. }
  1878. static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  1879. {
  1880. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1881. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
  1882. dpk->cur_k_set =
  1883. rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
  1884. }
  1885. static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1886. enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
  1887. {
  1888. if (set_from_bb) {
  1889. dbm = clamp_t(u8, dbm, 7, 24);
  1890. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
  1891. rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
  1892. }
  1893. _dpk_one_shot(rtwdev, phy, path, D_TXAGC);
  1894. _dpk_kset_query(rtwdev, path);
  1895. }
  1896. static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1897. enum rtw89_rf_path path, u8 kidx)
  1898. {
  1899. _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
  1900. _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
  1901. rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
  1902. rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
  1903. return _dpk_gainloss_read(rtwdev);
  1904. }
  1905. static enum dpk_pas_result _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
  1906. {
  1907. u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
  1908. u32 val1_sqrt_sum, val2_sqrt_sum;
  1909. u8 i;
  1910. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
  1911. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
  1912. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
  1913. if (is_check) {
  1914. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
  1915. val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
  1916. val1_i = abs(sign_extend32(val1_i, 11));
  1917. val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
  1918. val1_q = abs(sign_extend32(val1_q, 11));
  1919. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
  1920. val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
  1921. val2_i = abs(sign_extend32(val2_i, 11));
  1922. val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
  1923. val2_q = abs(sign_extend32(val2_q, 11));
  1924. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
  1925. phy_div(val1_i * val1_i + val1_q * val1_q,
  1926. val2_i * val2_i + val2_q * val2_q));
  1927. } else {
  1928. for (i = 0; i < 32; i++) {
  1929. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
  1930. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
  1931. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
  1932. }
  1933. }
  1934. val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q;
  1935. val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q;
  1936. if (val1_sqrt_sum < val2_sqrt_sum)
  1937. return DPK_PAS_LT;
  1938. else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5)
  1939. return DPK_PAS_GT;
  1940. else
  1941. return DPK_PAS_NOR;
  1942. }
  1943. static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1944. enum rtw89_rf_path path, u8 kidx)
  1945. {
  1946. _dpk_kip_control_rfc(rtwdev, path, false);
  1947. rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
  1948. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
  1949. _dpk_kip_control_rfc(rtwdev, path, true);
  1950. _dpk_one_shot(rtwdev, phy, path, D_RXAGC);
  1951. return _dpk_sync_check(rtwdev, path, kidx);
  1952. }
  1953. static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
  1954. {
  1955. u32 addr;
  1956. rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
  1957. for (addr = 0; addr < 0x200; addr++) {
  1958. rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
  1959. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
  1960. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
  1961. }
  1962. rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
  1963. }
  1964. static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  1965. {
  1966. rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
  1967. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
  1968. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
  1969. }
  1970. static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1971. enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
  1972. {
  1973. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1974. u8 step = DPK_AGC_STEP_SYNC_DGAIN;
  1975. u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
  1976. u8 tmp_rxbb;
  1977. u8 goout = 0, agc_cnt = 0;
  1978. enum dpk_pas_result pas;
  1979. u16 dgain = 0;
  1980. bool is_fail = false;
  1981. int limit = 200;
  1982. do {
  1983. switch (step) {
  1984. case DPK_AGC_STEP_SYNC_DGAIN:
  1985. is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
  1986. if (RTW8852C_DPK_RXSRAM_DBG)
  1987. _dpk_read_rxsram(rtwdev);
  1988. if (is_fail) {
  1989. goout = 1;
  1990. break;
  1991. }
  1992. dgain = _dpk_dgain_read(rtwdev);
  1993. if (dgain > 0x5fc || dgain < 0x556) {
  1994. _dpk_one_shot(rtwdev, phy, path, D_SYNC);
  1995. dgain = _dpk_dgain_read(rtwdev);
  1996. }
  1997. if (agc_cnt == 0) {
  1998. if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
  1999. _dpk_bypass_rxiqc(rtwdev, path);
  2000. else
  2001. _dpk_lbk_rxiqk(rtwdev, phy, path);
  2002. }
  2003. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  2004. break;
  2005. case DPK_AGC_STEP_GAIN_LOSS_IDX:
  2006. tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
  2007. pas = _dpk_pas_read(rtwdev, true);
  2008. if (pas == DPK_PAS_LT && tmp_gl_idx > 0)
  2009. step = DPK_AGC_STEP_GL_LT_CRITERION;
  2010. else if (pas == DPK_PAS_GT && tmp_gl_idx == 0)
  2011. step = DPK_AGC_STEP_GL_GT_CRITERION;
  2012. else if (tmp_gl_idx >= 7)
  2013. step = DPK_AGC_STEP_GL_GT_CRITERION;
  2014. else if (tmp_gl_idx == 0)
  2015. step = DPK_AGC_STEP_GL_LT_CRITERION;
  2016. else
  2017. step = DPK_AGC_STEP_SET_TX_GAIN;
  2018. break;
  2019. case DPK_AGC_STEP_GL_GT_CRITERION:
  2020. if (tmp_dbm <= 7) {
  2021. goout = 1;
  2022. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
  2023. } else {
  2024. tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
  2025. _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
  2026. }
  2027. step = DPK_AGC_STEP_SYNC_DGAIN;
  2028. agc_cnt++;
  2029. break;
  2030. case DPK_AGC_STEP_GL_LT_CRITERION:
  2031. if (tmp_dbm >= 24) {
  2032. goout = 1;
  2033. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
  2034. } else {
  2035. tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
  2036. _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
  2037. }
  2038. step = DPK_AGC_STEP_SYNC_DGAIN;
  2039. agc_cnt++;
  2040. break;
  2041. case DPK_AGC_STEP_SET_TX_GAIN:
  2042. _dpk_kip_control_rfc(rtwdev, path, false);
  2043. tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
  2044. if (tmp_rxbb + tmp_gl_idx > 0x1f)
  2045. tmp_rxbb = 0x1f;
  2046. else
  2047. tmp_rxbb = tmp_rxbb + tmp_gl_idx;
  2048. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
  2049. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
  2050. tmp_gl_idx, tmp_rxbb);
  2051. _dpk_kip_control_rfc(rtwdev, path, true);
  2052. goout = 1;
  2053. break;
  2054. default:
  2055. goout = 1;
  2056. break;
  2057. }
  2058. } while (!goout && agc_cnt < 6 && --limit > 0);
  2059. if (limit <= 0)
  2060. rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
  2061. return is_fail;
  2062. }
  2063. static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
  2064. {
  2065. static const struct rtw89_rfk_tbl *order_tbls[] = {
  2066. &rtw8852c_dpk_mdpd_order0_defs_tbl,
  2067. &rtw8852c_dpk_mdpd_order1_defs_tbl,
  2068. &rtw8852c_dpk_mdpd_order2_defs_tbl,
  2069. &rtw8852c_dpk_mdpd_order3_defs_tbl,
  2070. };
  2071. if (order >= ARRAY_SIZE(order_tbls)) {
  2072. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
  2073. return;
  2074. }
  2075. rtw89_rfk_parser(rtwdev, order_tbls[order]);
  2076. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
  2077. order == 0x0 ? "(5,3,1)" :
  2078. order == 0x1 ? "(5,3,0)" :
  2079. order == 0x2 ? "(5,0,0)" : "(7,3,1)");
  2080. }
  2081. static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2082. enum rtw89_rf_path path, u8 kidx)
  2083. {
  2084. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2085. u8 cnt;
  2086. u8 ov_flag;
  2087. u32 dpk_sync;
  2088. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
  2089. if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
  2090. _dpk_set_mdpd_para(rtwdev, 0x2);
  2091. else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
  2092. _dpk_set_mdpd_para(rtwdev, 0x1);
  2093. else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
  2094. _dpk_set_mdpd_para(rtwdev, 0x0);
  2095. else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
  2096. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
  2097. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
  2098. _dpk_set_mdpd_para(rtwdev, 0x2);
  2099. else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
  2100. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
  2101. _dpk_set_mdpd_para(rtwdev, 0x1);
  2102. else
  2103. _dpk_set_mdpd_para(rtwdev, 0x0);
  2104. rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
  2105. fsleep(1000);
  2106. _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
  2107. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
  2108. dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
  2109. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
  2110. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
  2111. ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
  2112. for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
  2113. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
  2114. _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
  2115. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
  2116. ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
  2117. }
  2118. if (ov_flag) {
  2119. _dpk_set_mdpd_para(rtwdev, 0x2);
  2120. _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
  2121. }
  2122. }
  2123. static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2124. enum rtw89_rf_path path)
  2125. {
  2126. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2127. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2128. bool is_reload = false;
  2129. u8 idx, cur_band, cur_ch;
  2130. cur_band = chan->band_type;
  2131. cur_ch = chan->channel;
  2132. for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
  2133. if (cur_band != dpk->bp[path][idx].band ||
  2134. cur_ch != dpk->bp[path][idx].ch)
  2135. continue;
  2136. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
  2137. B_COEF_SEL_MDPD, idx);
  2138. dpk->cur_idx[path] = idx;
  2139. is_reload = true;
  2140. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2141. "[DPK] reload S%d[%d] success\n", path, idx);
  2142. }
  2143. return is_reload;
  2144. }
  2145. static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
  2146. {
  2147. rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
  2148. &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
  2149. }
  2150. static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2151. enum rtw89_rf_path path, u8 kidx)
  2152. {
  2153. rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
  2154. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
  2155. if (rtwdev->hal.cv == CHIP_CAV)
  2156. rtw89_phy_write32_mask(rtwdev,
  2157. R_DPD_CH0A + (path << 8) + (kidx << 2),
  2158. B_DPD_SEL, 0x01);
  2159. else
  2160. rtw89_phy_write32_mask(rtwdev,
  2161. R_DPD_CH0A + (path << 8) + (kidx << 2),
  2162. B_DPD_SEL, 0x0c);
  2163. _dpk_kip_control_rfc(rtwdev, path, true);
  2164. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
  2165. _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
  2166. }
  2167. static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
  2168. {
  2169. #define _DPK_PARA_TXAGC GENMASK(15, 10)
  2170. #define _DPK_PARA_THER GENMASK(31, 26)
  2171. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2172. u32 para;
  2173. para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
  2174. MASKDWORD);
  2175. dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
  2176. dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
  2177. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
  2178. dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
  2179. }
  2180. static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2181. enum rtw89_rf_path path, u8 kidx, bool is_execute)
  2182. {
  2183. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2184. if (is_execute) {
  2185. rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
  2186. rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
  2187. _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
  2188. } else {
  2189. rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
  2190. 0x0000007F, 0x5b);
  2191. }
  2192. dpk->bp[path][kidx].gs =
  2193. rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
  2194. 0x0000007F);
  2195. }
  2196. static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
  2197. {
  2198. u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
  2199. u8 val;
  2200. switch (val32) {
  2201. case 0:
  2202. val = 0x6;
  2203. break;
  2204. case 1:
  2205. val = 0x2;
  2206. break;
  2207. case 2:
  2208. val = 0x0;
  2209. break;
  2210. case 3:
  2211. val = 0x7;
  2212. break;
  2213. default:
  2214. val = 0xff;
  2215. break;
  2216. }
  2217. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
  2218. return val;
  2219. }
  2220. static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2221. enum rtw89_rf_path path, u8 kidx)
  2222. {
  2223. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2224. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
  2225. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
  2226. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2227. B_DPD_ORDER, _dpk_order_convert(rtwdev));
  2228. dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
  2229. dpk->bp[path][kidx].path_ok = true;
  2230. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
  2231. path, kidx, dpk->bp[path][kidx].mdpd_en);
  2232. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2233. B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
  2234. _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
  2235. }
  2236. static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2237. enum rtw89_rf_path path, u8 gain)
  2238. {
  2239. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2240. u8 kidx = dpk->cur_idx[path];
  2241. u8 init_xdbm = 15;
  2242. bool is_fail;
  2243. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2244. "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
  2245. _dpk_kip_control_rfc(rtwdev, path, false);
  2246. _rf_direct_cntrl(rtwdev, path, false);
  2247. rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
  2248. _dpk_rf_setting(rtwdev, gain, path, kidx);
  2249. _set_rx_dck(rtwdev, phy, path, false);
  2250. _dpk_kip_pwr_clk_onoff(rtwdev, true);
  2251. _dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
  2252. _dpk_txpwr_bb_force(rtwdev, path, true);
  2253. _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
  2254. _dpk_tpg_sel(rtwdev, path, kidx);
  2255. is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
  2256. if (is_fail)
  2257. goto _error;
  2258. _dpk_idl_mpa(rtwdev, phy, path, kidx);
  2259. _dpk_para_query(rtwdev, path, kidx);
  2260. _dpk_on(rtwdev, phy, path, kidx);
  2261. _error:
  2262. _dpk_kip_control_rfc(rtwdev, path, false);
  2263. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
  2264. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
  2265. dpk->cur_k_set, is_fail ? "need Check" : "is Success");
  2266. return is_fail;
  2267. }
  2268. static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
  2269. {
  2270. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2271. u8 kidx = dpk->cur_idx[path];
  2272. dpk->bp[path][kidx].path_ok = false;
  2273. }
  2274. static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
  2275. {
  2276. if (is_bybb)
  2277. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
  2278. else
  2279. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
  2280. }
  2281. static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
  2282. enum rtw89_phy_idx phy, u8 kpath)
  2283. {
  2284. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2285. static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
  2286. u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
  2287. u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
  2288. u8 path;
  2289. bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
  2290. static_assert(ARRAY_SIZE(kip_reg) == RTW8852C_DPK_KIP_REG_NUM);
  2291. if (dpk->is_dpk_reload_en) {
  2292. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
  2293. if (!(kpath & BIT(path)))
  2294. continue;
  2295. reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
  2296. if (!reloaded[path] && dpk->bp[path][0].ch != 0)
  2297. dpk->cur_idx[path] = !dpk->cur_idx[path];
  2298. else
  2299. _dpk_onoff(rtwdev, path, false);
  2300. }
  2301. } else {
  2302. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
  2303. dpk->cur_idx[path] = 0;
  2304. }
  2305. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
  2306. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2307. "[DPK] ========= S%d[%d] DPK Init =========\n",
  2308. path, dpk->cur_idx[path]);
  2309. _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
  2310. _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
  2311. _dpk_information(rtwdev, phy, path);
  2312. _dpk_init(rtwdev, path);
  2313. if (rtwdev->is_tssi_mode[path])
  2314. _dpk_tssi_pause(rtwdev, path, true);
  2315. }
  2316. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
  2317. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2318. "[DPK] ========= S%d[%d] DPK Start =========\n",
  2319. path, dpk->cur_idx[path]);
  2320. rtw8852c_disable_rxagc(rtwdev, path, 0x0);
  2321. _dpk_drf_direct_cntrl(rtwdev, path, false);
  2322. _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
  2323. is_fail = _dpk_main(rtwdev, phy, path, 1);
  2324. _dpk_onoff(rtwdev, path, is_fail);
  2325. }
  2326. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
  2327. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2328. "[DPK] ========= S%d[%d] DPK Restore =========\n",
  2329. path, dpk->cur_idx[path]);
  2330. _dpk_kip_restore(rtwdev, phy, path);
  2331. _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
  2332. _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
  2333. _dpk_bb_afe_restore(rtwdev, path);
  2334. rtw8852c_disable_rxagc(rtwdev, path, 0x1);
  2335. if (rtwdev->is_tssi_mode[path])
  2336. _dpk_tssi_pause(rtwdev, path, false);
  2337. }
  2338. _dpk_kip_pwr_clk_onoff(rtwdev, false);
  2339. }
  2340. static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2341. {
  2342. struct rtw89_fem_info *fem = &rtwdev->fem;
  2343. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2344. u8 band = chan->band_type;
  2345. if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
  2346. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
  2347. return true;
  2348. } else if (fem->epa_2g && band == RTW89_BAND_2G) {
  2349. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
  2350. return true;
  2351. } else if (fem->epa_5g && band == RTW89_BAND_5G) {
  2352. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
  2353. return true;
  2354. } else if (fem->epa_6g && band == RTW89_BAND_6G) {
  2355. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
  2356. return true;
  2357. }
  2358. return false;
  2359. }
  2360. static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2361. {
  2362. u8 path, kpath;
  2363. kpath = _kpath(rtwdev, phy);
  2364. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
  2365. if (kpath & BIT(path))
  2366. _dpk_onoff(rtwdev, path, true);
  2367. }
  2368. }
  2369. static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
  2370. {
  2371. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2372. "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
  2373. RTW8852C_DPK_VER, rtwdev->hal.cv,
  2374. RTW8852C_RF_REL_VERSION);
  2375. if (_dpk_bypass_check(rtwdev, phy))
  2376. _dpk_force_bypass(rtwdev, phy);
  2377. else
  2378. _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
  2379. if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
  2380. rtw8852c_rx_dck(rtwdev, phy, false);
  2381. }
  2382. static void _dpk_onoff(struct rtw89_dev *rtwdev,
  2383. enum rtw89_rf_path path, bool off)
  2384. {
  2385. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2386. u8 val, kidx = dpk->cur_idx[path];
  2387. val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
  2388. dpk->bp[path][kidx].mdpd_en : 0;
  2389. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2390. B_DPD_MEN, val);
  2391. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
  2392. kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
  2393. }
  2394. static void _dpk_track(struct rtw89_dev *rtwdev)
  2395. {
  2396. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2397. u8 path, kidx;
  2398. u8 txagc_rf = 0;
  2399. s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
  2400. u8 cur_ther;
  2401. s8 delta_ther = 0;
  2402. s16 pwsf_tssi_ofst;
  2403. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
  2404. kidx = dpk->cur_idx[path];
  2405. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2406. "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
  2407. path, kidx, dpk->bp[path][kidx].ch);
  2408. txagc_rf =
  2409. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
  2410. txagc_bb =
  2411. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
  2412. txagc_bb_tp =
  2413. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
  2414. /* report from KIP */
  2415. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
  2416. cur_ther =
  2417. rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
  2418. txagc_ofst =
  2419. rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
  2420. pwsf_tssi_ofst =
  2421. rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
  2422. pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
  2423. cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  2424. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2425. "[DPK_TRK] thermal now = %d\n", cur_ther);
  2426. if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
  2427. delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
  2428. delta_ther = delta_ther * 1 / 2;
  2429. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2430. "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
  2431. delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
  2432. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2433. "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
  2434. txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
  2435. dpk->bp[path][kidx].txagc_dpk);
  2436. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2437. "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
  2438. txagc_ofst, pwsf_tssi_ofst);
  2439. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2440. "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
  2441. txagc_bb_tp, txagc_bb);
  2442. if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
  2443. txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
  2444. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2445. "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
  2446. rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
  2447. 0x07FC0000, 0x78 - delta_ther);
  2448. }
  2449. }
  2450. }
  2451. static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2452. enum rtw89_rf_path path)
  2453. {
  2454. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2455. enum rtw89_bandwidth bw = chan->band_width;
  2456. enum rtw89_band band = chan->band_type;
  2457. u32 clk = 0x0;
  2458. rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
  2459. switch (bw) {
  2460. case RTW89_CHANNEL_WIDTH_80:
  2461. clk = 0x1;
  2462. break;
  2463. case RTW89_CHANNEL_WIDTH_80_80:
  2464. case RTW89_CHANNEL_WIDTH_160:
  2465. clk = 0x2;
  2466. break;
  2467. default:
  2468. break;
  2469. }
  2470. if (path == RF_PATH_A) {
  2471. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ADC_CLK,
  2472. B_P0_TSSI_ADC_CLK, clk);
  2473. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2474. &rtw8852c_tssi_sys_defs_2g_a_tbl,
  2475. &rtw8852c_tssi_sys_defs_5g_a_tbl);
  2476. } else {
  2477. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ADC_CLK,
  2478. B_P1_TSSI_ADC_CLK, clk);
  2479. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2480. &rtw8852c_tssi_sys_defs_2g_b_tbl,
  2481. &rtw8852c_tssi_sys_defs_5g_b_tbl);
  2482. }
  2483. }
  2484. static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2485. enum rtw89_rf_path path)
  2486. {
  2487. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2488. &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
  2489. &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
  2490. }
  2491. static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
  2492. enum rtw89_phy_idx phy,
  2493. enum rtw89_rf_path path)
  2494. {
  2495. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2496. &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
  2497. &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
  2498. }
  2499. static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2500. enum rtw89_rf_path path)
  2501. {
  2502. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2503. enum rtw89_band band = chan->band_type;
  2504. if (path == RF_PATH_A) {
  2505. rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
  2506. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2507. &rtw8852c_tssi_dck_defs_2g_a_tbl,
  2508. &rtw8852c_tssi_dck_defs_5g_a_tbl);
  2509. } else {
  2510. rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
  2511. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2512. &rtw8852c_tssi_dck_defs_2g_b_tbl,
  2513. &rtw8852c_tssi_dck_defs_5g_b_tbl);
  2514. }
  2515. }
  2516. static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2517. enum rtw89_rf_path path)
  2518. {
  2519. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2520. &rtw8852c_tssi_set_bbgain_split_a_tbl,
  2521. &rtw8852c_tssi_set_bbgain_split_b_tbl);
  2522. }
  2523. static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2524. enum rtw89_rf_path path)
  2525. {
  2526. #define RTW8852C_TSSI_GET_VAL(ptr, idx) \
  2527. ({ \
  2528. s8 *__ptr = (ptr); \
  2529. u8 __idx = (idx), __i, __v; \
  2530. u32 __val = 0; \
  2531. for (__i = 0; __i < 4; __i++) { \
  2532. __v = (__ptr[__idx + __i]); \
  2533. __val |= (__v << (8 * __i)); \
  2534. } \
  2535. __val; \
  2536. })
  2537. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2538. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2539. u8 ch = chan->channel;
  2540. u8 subband = chan->subband_type;
  2541. const s8 *thm_up_a = NULL;
  2542. const s8 *thm_down_a = NULL;
  2543. const s8 *thm_up_b = NULL;
  2544. const s8 *thm_down_b = NULL;
  2545. u8 thermal = 0xff;
  2546. s8 thm_ofst[64] = {0};
  2547. u32 tmp = 0;
  2548. u8 i, j;
  2549. switch (subband) {
  2550. default:
  2551. case RTW89_CH_2G:
  2552. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
  2553. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
  2554. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
  2555. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
  2556. break;
  2557. case RTW89_CH_5G_BAND_1:
  2558. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
  2559. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
  2560. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
  2561. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
  2562. break;
  2563. case RTW89_CH_5G_BAND_3:
  2564. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
  2565. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
  2566. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
  2567. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
  2568. break;
  2569. case RTW89_CH_5G_BAND_4:
  2570. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
  2571. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
  2572. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
  2573. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
  2574. break;
  2575. case RTW89_CH_6G_BAND_IDX0:
  2576. case RTW89_CH_6G_BAND_IDX1:
  2577. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
  2578. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
  2579. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
  2580. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
  2581. break;
  2582. case RTW89_CH_6G_BAND_IDX2:
  2583. case RTW89_CH_6G_BAND_IDX3:
  2584. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
  2585. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
  2586. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
  2587. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
  2588. break;
  2589. case RTW89_CH_6G_BAND_IDX4:
  2590. case RTW89_CH_6G_BAND_IDX5:
  2591. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
  2592. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
  2593. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
  2594. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
  2595. break;
  2596. case RTW89_CH_6G_BAND_IDX6:
  2597. case RTW89_CH_6G_BAND_IDX7:
  2598. thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
  2599. thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
  2600. thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
  2601. thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
  2602. break;
  2603. }
  2604. if (path == RF_PATH_A) {
  2605. thermal = tssi_info->thermal[RF_PATH_A];
  2606. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2607. "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
  2608. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
  2609. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
  2610. if (thermal == 0xff) {
  2611. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
  2612. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
  2613. for (i = 0; i < 64; i += 4) {
  2614. rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
  2615. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2616. "[TSSI] write 0x%x val=0x%08x\n",
  2617. 0x5c00 + i, 0x0);
  2618. }
  2619. } else {
  2620. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
  2621. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
  2622. thermal);
  2623. i = 0;
  2624. for (j = 0; j < 32; j++)
  2625. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2626. -thm_down_a[i++] :
  2627. -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
  2628. i = 1;
  2629. for (j = 63; j >= 32; j--)
  2630. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2631. thm_up_a[i++] :
  2632. thm_up_a[DELTA_SWINGIDX_SIZE - 1];
  2633. for (i = 0; i < 64; i += 4) {
  2634. tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
  2635. rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
  2636. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2637. "[TSSI] write 0x%x val=0x%08x\n",
  2638. 0x5c00 + i, tmp);
  2639. }
  2640. }
  2641. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
  2642. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
  2643. } else {
  2644. thermal = tssi_info->thermal[RF_PATH_B];
  2645. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2646. "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
  2647. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
  2648. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
  2649. if (thermal == 0xff) {
  2650. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
  2651. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
  2652. for (i = 0; i < 64; i += 4) {
  2653. rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
  2654. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2655. "[TSSI] write 0x%x val=0x%08x\n",
  2656. 0x7c00 + i, 0x0);
  2657. }
  2658. } else {
  2659. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
  2660. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
  2661. thermal);
  2662. i = 0;
  2663. for (j = 0; j < 32; j++)
  2664. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2665. -thm_down_b[i++] :
  2666. -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
  2667. i = 1;
  2668. for (j = 63; j >= 32; j--)
  2669. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2670. thm_up_b[i++] :
  2671. thm_up_b[DELTA_SWINGIDX_SIZE - 1];
  2672. for (i = 0; i < 64; i += 4) {
  2673. tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
  2674. rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
  2675. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2676. "[TSSI] write 0x%x val=0x%08x\n",
  2677. 0x7c00 + i, tmp);
  2678. }
  2679. }
  2680. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
  2681. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
  2682. }
  2683. #undef RTW8852C_TSSI_GET_VAL
  2684. }
  2685. static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2686. enum rtw89_rf_path path)
  2687. {
  2688. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2689. enum rtw89_band band = chan->band_type;
  2690. if (path == RF_PATH_A) {
  2691. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2692. &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
  2693. &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
  2694. } else {
  2695. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2696. &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
  2697. &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
  2698. }
  2699. }
  2700. static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2701. enum rtw89_rf_path path)
  2702. {
  2703. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2704. enum rtw89_band band = chan->band_type;
  2705. const struct rtw89_rfk_tbl *tbl;
  2706. if (path == RF_PATH_A) {
  2707. if (band == RTW89_BAND_2G)
  2708. tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
  2709. else if (band == RTW89_BAND_6G)
  2710. tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
  2711. else
  2712. tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
  2713. } else {
  2714. if (band == RTW89_BAND_2G)
  2715. tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
  2716. else if (band == RTW89_BAND_6G)
  2717. tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
  2718. else
  2719. tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
  2720. }
  2721. rtw89_rfk_parser(rtwdev, tbl);
  2722. }
  2723. static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2724. enum rtw89_rf_path path)
  2725. {
  2726. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2727. &rtw8852c_tssi_slope_defs_a_tbl,
  2728. &rtw8852c_tssi_slope_defs_b_tbl);
  2729. }
  2730. static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2731. enum rtw89_rf_path path)
  2732. {
  2733. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2734. &rtw8852c_tssi_run_slope_defs_a_tbl,
  2735. &rtw8852c_tssi_run_slope_defs_b_tbl);
  2736. }
  2737. static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2738. enum rtw89_rf_path path)
  2739. {
  2740. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2741. &rtw8852c_tssi_track_defs_a_tbl,
  2742. &rtw8852c_tssi_track_defs_b_tbl);
  2743. }
  2744. static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
  2745. enum rtw89_phy_idx phy,
  2746. enum rtw89_rf_path path)
  2747. {
  2748. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2749. &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
  2750. &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
  2751. }
  2752. static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2753. {
  2754. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2755. u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
  2756. if (rtwdev->dbcc_en) {
  2757. if (phy == RTW89_PHY_0) {
  2758. path = RF_PATH_A;
  2759. path_max = RF_PATH_B;
  2760. } else if (phy == RTW89_PHY_1) {
  2761. path = RF_PATH_B;
  2762. path_max = RF_PATH_NUM_8852C;
  2763. }
  2764. }
  2765. for (i = path; i < path_max; i++) {
  2766. _tssi_set_track(rtwdev, phy, i);
  2767. _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
  2768. rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
  2769. &rtw8852c_tssi_enable_defs_a_tbl,
  2770. &rtw8852c_tssi_enable_defs_b_tbl);
  2771. tssi_info->base_thermal[i] =
  2772. ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
  2773. rtwdev->is_tssi_mode[i] = true;
  2774. }
  2775. }
  2776. static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2777. {
  2778. u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
  2779. if (rtwdev->dbcc_en) {
  2780. if (phy == RTW89_PHY_0) {
  2781. path = RF_PATH_A;
  2782. path_max = RF_PATH_B;
  2783. } else if (phy == RTW89_PHY_1) {
  2784. path = RF_PATH_B;
  2785. path_max = RF_PATH_NUM_8852C;
  2786. }
  2787. }
  2788. for (i = path; i < path_max; i++) {
  2789. if (i == RF_PATH_A) {
  2790. rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
  2791. rtwdev->is_tssi_mode[RF_PATH_A] = false;
  2792. } else if (i == RF_PATH_B) {
  2793. rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
  2794. rtwdev->is_tssi_mode[RF_PATH_B] = false;
  2795. }
  2796. }
  2797. }
  2798. static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
  2799. {
  2800. switch (ch) {
  2801. case 1 ... 2:
  2802. return 0;
  2803. case 3 ... 5:
  2804. return 1;
  2805. case 6 ... 8:
  2806. return 2;
  2807. case 9 ... 11:
  2808. return 3;
  2809. case 12 ... 13:
  2810. return 4;
  2811. case 14:
  2812. return 5;
  2813. }
  2814. return 0;
  2815. }
  2816. #define TSSI_EXTRA_GROUP_BIT (BIT(31))
  2817. #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
  2818. #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
  2819. #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
  2820. #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
  2821. static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
  2822. {
  2823. switch (ch) {
  2824. case 1 ... 2:
  2825. return 0;
  2826. case 3 ... 5:
  2827. return 1;
  2828. case 6 ... 8:
  2829. return 2;
  2830. case 9 ... 11:
  2831. return 3;
  2832. case 12 ... 14:
  2833. return 4;
  2834. case 36 ... 40:
  2835. return 5;
  2836. case 41 ... 43:
  2837. return TSSI_EXTRA_GROUP(5);
  2838. case 44 ... 48:
  2839. return 6;
  2840. case 49 ... 51:
  2841. return TSSI_EXTRA_GROUP(6);
  2842. case 52 ... 56:
  2843. return 7;
  2844. case 57 ... 59:
  2845. return TSSI_EXTRA_GROUP(7);
  2846. case 60 ... 64:
  2847. return 8;
  2848. case 100 ... 104:
  2849. return 9;
  2850. case 105 ... 107:
  2851. return TSSI_EXTRA_GROUP(9);
  2852. case 108 ... 112:
  2853. return 10;
  2854. case 113 ... 115:
  2855. return TSSI_EXTRA_GROUP(10);
  2856. case 116 ... 120:
  2857. return 11;
  2858. case 121 ... 123:
  2859. return TSSI_EXTRA_GROUP(11);
  2860. case 124 ... 128:
  2861. return 12;
  2862. case 129 ... 131:
  2863. return TSSI_EXTRA_GROUP(12);
  2864. case 132 ... 136:
  2865. return 13;
  2866. case 137 ... 139:
  2867. return TSSI_EXTRA_GROUP(13);
  2868. case 140 ... 144:
  2869. return 14;
  2870. case 149 ... 153:
  2871. return 15;
  2872. case 154 ... 156:
  2873. return TSSI_EXTRA_GROUP(15);
  2874. case 157 ... 161:
  2875. return 16;
  2876. case 162 ... 164:
  2877. return TSSI_EXTRA_GROUP(16);
  2878. case 165 ... 169:
  2879. return 17;
  2880. case 170 ... 172:
  2881. return TSSI_EXTRA_GROUP(17);
  2882. case 173 ... 177:
  2883. return 18;
  2884. }
  2885. return 0;
  2886. }
  2887. static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
  2888. {
  2889. switch (ch) {
  2890. case 1 ... 5:
  2891. return 0;
  2892. case 6 ... 8:
  2893. return TSSI_EXTRA_GROUP(0);
  2894. case 9 ... 13:
  2895. return 1;
  2896. case 14 ... 16:
  2897. return TSSI_EXTRA_GROUP(1);
  2898. case 17 ... 21:
  2899. return 2;
  2900. case 22 ... 24:
  2901. return TSSI_EXTRA_GROUP(2);
  2902. case 25 ... 29:
  2903. return 3;
  2904. case 33 ... 37:
  2905. return 4;
  2906. case 38 ... 40:
  2907. return TSSI_EXTRA_GROUP(4);
  2908. case 41 ... 45:
  2909. return 5;
  2910. case 46 ... 48:
  2911. return TSSI_EXTRA_GROUP(5);
  2912. case 49 ... 53:
  2913. return 6;
  2914. case 54 ... 56:
  2915. return TSSI_EXTRA_GROUP(6);
  2916. case 57 ... 61:
  2917. return 7;
  2918. case 65 ... 69:
  2919. return 8;
  2920. case 70 ... 72:
  2921. return TSSI_EXTRA_GROUP(8);
  2922. case 73 ... 77:
  2923. return 9;
  2924. case 78 ... 80:
  2925. return TSSI_EXTRA_GROUP(9);
  2926. case 81 ... 85:
  2927. return 10;
  2928. case 86 ... 88:
  2929. return TSSI_EXTRA_GROUP(10);
  2930. case 89 ... 93:
  2931. return 11;
  2932. case 97 ... 101:
  2933. return 12;
  2934. case 102 ... 104:
  2935. return TSSI_EXTRA_GROUP(12);
  2936. case 105 ... 109:
  2937. return 13;
  2938. case 110 ... 112:
  2939. return TSSI_EXTRA_GROUP(13);
  2940. case 113 ... 117:
  2941. return 14;
  2942. case 118 ... 120:
  2943. return TSSI_EXTRA_GROUP(14);
  2944. case 121 ... 125:
  2945. return 15;
  2946. case 129 ... 133:
  2947. return 16;
  2948. case 134 ... 136:
  2949. return TSSI_EXTRA_GROUP(16);
  2950. case 137 ... 141:
  2951. return 17;
  2952. case 142 ... 144:
  2953. return TSSI_EXTRA_GROUP(17);
  2954. case 145 ... 149:
  2955. return 18;
  2956. case 150 ... 152:
  2957. return TSSI_EXTRA_GROUP(18);
  2958. case 153 ... 157:
  2959. return 19;
  2960. case 161 ... 165:
  2961. return 20;
  2962. case 166 ... 168:
  2963. return TSSI_EXTRA_GROUP(20);
  2964. case 169 ... 173:
  2965. return 21;
  2966. case 174 ... 176:
  2967. return TSSI_EXTRA_GROUP(21);
  2968. case 177 ... 181:
  2969. return 22;
  2970. case 182 ... 184:
  2971. return TSSI_EXTRA_GROUP(22);
  2972. case 185 ... 189:
  2973. return 23;
  2974. case 193 ... 197:
  2975. return 24;
  2976. case 198 ... 200:
  2977. return TSSI_EXTRA_GROUP(24);
  2978. case 201 ... 205:
  2979. return 25;
  2980. case 206 ... 208:
  2981. return TSSI_EXTRA_GROUP(25);
  2982. case 209 ... 213:
  2983. return 26;
  2984. case 214 ... 216:
  2985. return TSSI_EXTRA_GROUP(26);
  2986. case 217 ... 221:
  2987. return 27;
  2988. case 225 ... 229:
  2989. return 28;
  2990. case 230 ... 232:
  2991. return TSSI_EXTRA_GROUP(28);
  2992. case 233 ... 237:
  2993. return 29;
  2994. case 238 ... 240:
  2995. return TSSI_EXTRA_GROUP(29);
  2996. case 241 ... 245:
  2997. return 30;
  2998. case 246 ... 248:
  2999. return TSSI_EXTRA_GROUP(30);
  3000. case 249 ... 253:
  3001. return 31;
  3002. }
  3003. return 0;
  3004. }
  3005. static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
  3006. {
  3007. switch (ch) {
  3008. case 1 ... 8:
  3009. return 0;
  3010. case 9 ... 14:
  3011. return 1;
  3012. case 36 ... 48:
  3013. return 2;
  3014. case 49 ... 51:
  3015. return TSSI_EXTRA_GROUP(2);
  3016. case 52 ... 64:
  3017. return 3;
  3018. case 100 ... 112:
  3019. return 4;
  3020. case 113 ... 115:
  3021. return TSSI_EXTRA_GROUP(4);
  3022. case 116 ... 128:
  3023. return 5;
  3024. case 132 ... 144:
  3025. return 6;
  3026. case 149 ... 177:
  3027. return 7;
  3028. }
  3029. return 0;
  3030. }
  3031. static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
  3032. {
  3033. switch (ch) {
  3034. case 1 ... 13:
  3035. return 0;
  3036. case 14 ... 16:
  3037. return TSSI_EXTRA_GROUP(0);
  3038. case 17 ... 29:
  3039. return 1;
  3040. case 33 ... 45:
  3041. return 2;
  3042. case 46 ... 48:
  3043. return TSSI_EXTRA_GROUP(2);
  3044. case 49 ... 61:
  3045. return 3;
  3046. case 65 ... 77:
  3047. return 4;
  3048. case 78 ... 80:
  3049. return TSSI_EXTRA_GROUP(4);
  3050. case 81 ... 93:
  3051. return 5;
  3052. case 97 ... 109:
  3053. return 6;
  3054. case 110 ... 112:
  3055. return TSSI_EXTRA_GROUP(6);
  3056. case 113 ... 125:
  3057. return 7;
  3058. case 129 ... 141:
  3059. return 8;
  3060. case 142 ... 144:
  3061. return TSSI_EXTRA_GROUP(8);
  3062. case 145 ... 157:
  3063. return 9;
  3064. case 161 ... 173:
  3065. return 10;
  3066. case 174 ... 176:
  3067. return TSSI_EXTRA_GROUP(10);
  3068. case 177 ... 189:
  3069. return 11;
  3070. case 193 ... 205:
  3071. return 12;
  3072. case 206 ... 208:
  3073. return TSSI_EXTRA_GROUP(12);
  3074. case 209 ... 221:
  3075. return 13;
  3076. case 225 ... 237:
  3077. return 14;
  3078. case 238 ... 240:
  3079. return TSSI_EXTRA_GROUP(14);
  3080. case 241 ... 253:
  3081. return 15;
  3082. }
  3083. return 0;
  3084. }
  3085. static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3086. enum rtw89_rf_path path)
  3087. {
  3088. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3089. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3090. enum rtw89_band band = chan->band_type;
  3091. u8 ch = chan->channel;
  3092. u32 gidx, gidx_1st, gidx_2nd;
  3093. s8 de_1st;
  3094. s8 de_2nd;
  3095. s8 val;
  3096. if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
  3097. gidx = _tssi_get_ofdm_group(rtwdev, ch);
  3098. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3099. "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
  3100. path, gidx);
  3101. if (IS_TSSI_EXTRA_GROUP(gidx)) {
  3102. gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
  3103. gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
  3104. de_1st = tssi_info->tssi_mcs[path][gidx_1st];
  3105. de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
  3106. val = (de_1st + de_2nd) / 2;
  3107. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3108. "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
  3109. path, val, de_1st, de_2nd);
  3110. } else {
  3111. val = tssi_info->tssi_mcs[path][gidx];
  3112. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3113. "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
  3114. }
  3115. } else {
  3116. gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
  3117. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3118. "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
  3119. path, gidx);
  3120. if (IS_TSSI_EXTRA_GROUP(gidx)) {
  3121. gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
  3122. gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
  3123. de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
  3124. de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
  3125. val = (de_1st + de_2nd) / 2;
  3126. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3127. "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
  3128. path, val, de_1st, de_2nd);
  3129. } else {
  3130. val = tssi_info->tssi_6g_mcs[path][gidx];
  3131. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3132. "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
  3133. }
  3134. }
  3135. return val;
  3136. }
  3137. static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
  3138. enum rtw89_phy_idx phy,
  3139. enum rtw89_rf_path path)
  3140. {
  3141. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3142. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3143. enum rtw89_band band = chan->band_type;
  3144. u8 ch = chan->channel;
  3145. u32 tgidx, tgidx_1st, tgidx_2nd;
  3146. s8 tde_1st = 0;
  3147. s8 tde_2nd = 0;
  3148. s8 val;
  3149. if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
  3150. tgidx = _tssi_get_trim_group(rtwdev, ch);
  3151. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3152. "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
  3153. path, tgidx);
  3154. if (IS_TSSI_EXTRA_GROUP(tgidx)) {
  3155. tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
  3156. tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
  3157. tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
  3158. tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
  3159. val = (tde_1st + tde_2nd) / 2;
  3160. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3161. "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
  3162. path, val, tde_1st, tde_2nd);
  3163. } else {
  3164. val = tssi_info->tssi_trim[path][tgidx];
  3165. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3166. "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
  3167. path, val);
  3168. }
  3169. } else {
  3170. tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
  3171. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3172. "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
  3173. path, tgidx);
  3174. if (IS_TSSI_EXTRA_GROUP(tgidx)) {
  3175. tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
  3176. tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
  3177. tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
  3178. tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
  3179. val = (tde_1st + tde_2nd) / 2;
  3180. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3181. "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
  3182. path, val, tde_1st, tde_2nd);
  3183. } else {
  3184. val = tssi_info->tssi_trim_6g[path][tgidx];
  3185. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3186. "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
  3187. path, val);
  3188. }
  3189. }
  3190. return val;
  3191. }
  3192. static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
  3193. enum rtw89_phy_idx phy)
  3194. {
  3195. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3196. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3197. u8 ch = chan->channel;
  3198. u8 gidx;
  3199. s8 ofdm_de;
  3200. s8 trim_de;
  3201. s32 val;
  3202. u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
  3203. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
  3204. phy, ch);
  3205. if (rtwdev->dbcc_en) {
  3206. if (phy == RTW89_PHY_0) {
  3207. path = RF_PATH_A;
  3208. path_max = RF_PATH_B;
  3209. } else if (phy == RTW89_PHY_1) {
  3210. path = RF_PATH_B;
  3211. path_max = RF_PATH_NUM_8852C;
  3212. }
  3213. }
  3214. for (i = path; i < path_max; i++) {
  3215. gidx = _tssi_get_cck_group(rtwdev, ch);
  3216. trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
  3217. val = tssi_info->tssi_cck[i][gidx] + trim_de;
  3218. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3219. "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
  3220. i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
  3221. rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
  3222. rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
  3223. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3224. "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
  3225. _tssi_de_cck_long[i],
  3226. rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
  3227. _TSSI_DE_MASK));
  3228. ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
  3229. trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
  3230. val = ofdm_de + trim_de;
  3231. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3232. "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
  3233. i, ofdm_de, trim_de);
  3234. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
  3235. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
  3236. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
  3237. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
  3238. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
  3239. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
  3240. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  3241. "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
  3242. _tssi_de_mcs_20m[i],
  3243. rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
  3244. _TSSI_DE_MASK));
  3245. }
  3246. }
  3247. static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
  3248. enum rtw89_rf_path path)
  3249. {
  3250. static const u32 tssi_trk[2] = {0x5818, 0x7818};
  3251. static const u32 tssi_en[2] = {0x5820, 0x7820};
  3252. if (en) {
  3253. rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
  3254. rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
  3255. if (rtwdev->dbcc_en && path == RF_PATH_B)
  3256. _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
  3257. else
  3258. _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
  3259. } else {
  3260. rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
  3261. rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
  3262. }
  3263. }
  3264. void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
  3265. {
  3266. if (!rtwdev->dbcc_en) {
  3267. rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
  3268. rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
  3269. } else {
  3270. if (phy_idx == RTW89_PHY_0)
  3271. rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
  3272. else
  3273. rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
  3274. }
  3275. }
  3276. static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
  3277. enum rtw89_bandwidth bw, bool is_dav)
  3278. {
  3279. u32 rf_reg18;
  3280. u32 reg_reg18_addr;
  3281. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
  3282. if (is_dav)
  3283. reg_reg18_addr = RR_CFGCH;
  3284. else
  3285. reg_reg18_addr = RR_CFGCH_V1;
  3286. rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
  3287. rf_reg18 &= ~RR_CFGCH_BW;
  3288. switch (bw) {
  3289. case RTW89_CHANNEL_WIDTH_5:
  3290. case RTW89_CHANNEL_WIDTH_10:
  3291. case RTW89_CHANNEL_WIDTH_20:
  3292. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
  3293. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
  3294. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
  3295. break;
  3296. case RTW89_CHANNEL_WIDTH_40:
  3297. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
  3298. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
  3299. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
  3300. break;
  3301. case RTW89_CHANNEL_WIDTH_80:
  3302. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
  3303. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
  3304. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
  3305. break;
  3306. case RTW89_CHANNEL_WIDTH_160:
  3307. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
  3308. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
  3309. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
  3310. break;
  3311. default:
  3312. break;
  3313. }
  3314. rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
  3315. }
  3316. static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3317. enum rtw89_bandwidth bw)
  3318. {
  3319. bool is_dav;
  3320. u8 kpath, path;
  3321. u32 tmp = 0;
  3322. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
  3323. kpath = _kpath(rtwdev, phy);
  3324. for (path = 0; path < 2; path++) {
  3325. if (!(kpath & BIT(path)))
  3326. continue;
  3327. is_dav = true;
  3328. _bw_setting(rtwdev, path, bw, is_dav);
  3329. is_dav = false;
  3330. _bw_setting(rtwdev, path, bw, is_dav);
  3331. if (rtwdev->dbcc_en)
  3332. continue;
  3333. if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
  3334. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
  3335. tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
  3336. rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
  3337. rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
  3338. fsleep(100);
  3339. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
  3340. }
  3341. }
  3342. }
  3343. static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
  3344. u8 central_ch, enum rtw89_band band, bool is_dav)
  3345. {
  3346. u32 rf_reg18;
  3347. u32 reg_reg18_addr;
  3348. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
  3349. if (is_dav)
  3350. reg_reg18_addr = 0x18;
  3351. else
  3352. reg_reg18_addr = 0x10018;
  3353. rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
  3354. rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
  3355. rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
  3356. switch (band) {
  3357. case RTW89_BAND_2G:
  3358. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
  3359. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
  3360. break;
  3361. case RTW89_BAND_5G:
  3362. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
  3363. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
  3364. break;
  3365. case RTW89_BAND_6G:
  3366. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
  3367. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
  3368. break;
  3369. default:
  3370. break;
  3371. }
  3372. rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
  3373. fsleep(100);
  3374. }
  3375. static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3376. u8 central_ch, enum rtw89_band band)
  3377. {
  3378. u8 kpath, path;
  3379. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
  3380. if (band != RTW89_BAND_6G) {
  3381. if ((central_ch > 14 && central_ch < 36) ||
  3382. (central_ch > 64 && central_ch < 100) ||
  3383. (central_ch > 144 && central_ch < 149) || central_ch > 177)
  3384. return;
  3385. } else {
  3386. if (central_ch > 253 || central_ch == 2)
  3387. return;
  3388. }
  3389. kpath = _kpath(rtwdev, phy);
  3390. for (path = 0; path < 2; path++) {
  3391. if (kpath & BIT(path)) {
  3392. _ch_setting(rtwdev, path, central_ch, band, true);
  3393. _ch_setting(rtwdev, path, central_ch, band, false);
  3394. }
  3395. }
  3396. }
  3397. static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3398. enum rtw89_bandwidth bw)
  3399. {
  3400. u8 kpath;
  3401. u8 path;
  3402. u32 val;
  3403. kpath = _kpath(rtwdev, phy);
  3404. for (path = 0; path < 2; path++) {
  3405. if (!(kpath & BIT(path)))
  3406. continue;
  3407. rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
  3408. rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
  3409. switch (bw) {
  3410. case RTW89_CHANNEL_WIDTH_20:
  3411. val = 0x1b;
  3412. break;
  3413. case RTW89_CHANNEL_WIDTH_40:
  3414. val = 0x13;
  3415. break;
  3416. case RTW89_CHANNEL_WIDTH_80:
  3417. val = 0xb;
  3418. break;
  3419. case RTW89_CHANNEL_WIDTH_160:
  3420. default:
  3421. val = 0x3;
  3422. break;
  3423. }
  3424. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
  3425. rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
  3426. }
  3427. }
  3428. static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
  3429. {
  3430. struct rtw89_lck_info *lck = &rtwdev->lck;
  3431. int path;
  3432. for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
  3433. lck->thermal[path] =
  3434. ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  3435. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  3436. "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
  3437. }
  3438. }
  3439. static void _lck(struct rtw89_dev *rtwdev)
  3440. {
  3441. u32 tmp18[2];
  3442. int path = rtwdev->dbcc_en ? 2 : 1;
  3443. int i;
  3444. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
  3445. tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
  3446. tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
  3447. for (i = 0; i < path; i++) {
  3448. rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
  3449. rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
  3450. rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
  3451. }
  3452. _lck_keep_thermal(rtwdev);
  3453. }
  3454. #define RTW8852C_LCK_TH 8
  3455. void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
  3456. {
  3457. struct rtw89_lck_info *lck = &rtwdev->lck;
  3458. u8 cur_thermal;
  3459. int delta;
  3460. int path;
  3461. for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
  3462. cur_thermal =
  3463. ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  3464. delta = abs((int)cur_thermal - lck->thermal[path]);
  3465. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  3466. "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
  3467. path, cur_thermal, delta);
  3468. if (delta >= RTW8852C_LCK_TH) {
  3469. _lck(rtwdev);
  3470. return;
  3471. }
  3472. }
  3473. }
  3474. void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
  3475. {
  3476. _lck_keep_thermal(rtwdev);
  3477. }
  3478. static
  3479. void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3480. u8 central_ch, enum rtw89_band band,
  3481. enum rtw89_bandwidth bw)
  3482. {
  3483. _ctrl_ch(rtwdev, phy, central_ch, band);
  3484. _ctrl_bw(rtwdev, phy, bw);
  3485. _rxbb_bw(rtwdev, phy, bw);
  3486. }
  3487. void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
  3488. const struct rtw89_chan *chan,
  3489. enum rtw89_phy_idx phy_idx)
  3490. {
  3491. rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
  3492. chan->band_type,
  3493. chan->band_width);
  3494. }
  3495. void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3496. {
  3497. struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
  3498. DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {};
  3499. const struct rtw89_chan *chan;
  3500. enum rtw89_entity_mode mode;
  3501. u8 chan_idx;
  3502. u8 idx;
  3503. u8 i;
  3504. mode = rtw89_get_entity_mode(rtwdev);
  3505. switch (mode) {
  3506. case RTW89_ENTITY_MODE_MCC_PREPARE:
  3507. chan_idx = RTW89_SUB_ENTITY_1;
  3508. break;
  3509. default:
  3510. chan_idx = RTW89_SUB_ENTITY_0;
  3511. break;
  3512. }
  3513. for (i = 0; i <= chan_idx; i++) {
  3514. chan = rtw89_chan_get(rtwdev, i);
  3515. for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
  3516. if (rfk_mcc->ch[idx] == chan->channel &&
  3517. rfk_mcc->band[idx] == chan->band_type) {
  3518. if (i != chan_idx) {
  3519. set_bit(idx, map);
  3520. break;
  3521. }
  3522. goto bottom;
  3523. }
  3524. }
  3525. }
  3526. idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR);
  3527. if (idx == RTW89_IQK_CHS_NR) {
  3528. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3529. "%s: no empty rfk table; force replace the first\n",
  3530. __func__);
  3531. idx = 0;
  3532. }
  3533. rfk_mcc->ch[idx] = chan->channel;
  3534. rfk_mcc->band[idx] = chan->band_type;
  3535. bottom:
  3536. rfk_mcc->table_idx = idx;
  3537. }
  3538. void rtw8852c_rck(struct rtw89_dev *rtwdev)
  3539. {
  3540. u8 path;
  3541. for (path = 0; path < 2; path++)
  3542. _rck(rtwdev, path);
  3543. }
  3544. void rtw8852c_dack(struct rtw89_dev *rtwdev)
  3545. {
  3546. u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
  3547. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
  3548. _dac_cal(rtwdev, false);
  3549. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
  3550. }
  3551. void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3552. {
  3553. u32 tx_en;
  3554. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3555. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
  3556. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3557. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3558. _iqk_init(rtwdev);
  3559. _iqk(rtwdev, phy_idx, false);
  3560. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3561. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
  3562. }
  3563. #define RXDCK_VER_8852C 0xe
  3564. static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3565. bool is_afe, u8 retry_limit)
  3566. {
  3567. struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
  3568. u8 path, kpath;
  3569. u32 rf_reg5;
  3570. bool is_fail;
  3571. u8 rek_cnt;
  3572. kpath = _kpath(rtwdev, phy);
  3573. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3574. "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
  3575. RXDCK_VER_8852C, rtwdev->hal.cv);
  3576. for (path = 0; path < 2; path++) {
  3577. rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
  3578. if (!(kpath & BIT(path)))
  3579. continue;
  3580. if (rtwdev->is_tssi_mode[path])
  3581. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
  3582. B_P0_TSSI_TRK_EN, 0x1);
  3583. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  3584. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  3585. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_LO_SEL, rtwdev->dbcc_en);
  3586. for (rek_cnt = 0; rek_cnt < retry_limit; rek_cnt++) {
  3587. _set_rx_dck(rtwdev, phy, path, is_afe);
  3588. /* To reduce IO of dck_rek_check(), the last try is seen
  3589. * as failure always, and then do recovery procedure.
  3590. */
  3591. if (rek_cnt == retry_limit - 1) {
  3592. _rx_dck_recover(rtwdev, path);
  3593. break;
  3594. }
  3595. is_fail = _rx_dck_rek_check(rtwdev, path);
  3596. if (!is_fail)
  3597. break;
  3598. }
  3599. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] rek_cnt[%d]=%d",
  3600. path, rek_cnt);
  3601. rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  3602. rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
  3603. if (rtwdev->is_tssi_mode[path])
  3604. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
  3605. B_P0_TSSI_TRK_EN, 0x0);
  3606. }
  3607. }
  3608. void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
  3609. {
  3610. _rx_dck(rtwdev, phy, is_afe, 1);
  3611. }
  3612. #define RTW8852C_RX_DCK_TH 12
  3613. void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
  3614. {
  3615. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3616. struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
  3617. enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
  3618. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3619. u8 dck_channel;
  3620. u8 cur_thermal;
  3621. u32 tx_en;
  3622. int delta;
  3623. int path;
  3624. if (chan->band_type == RTW89_BAND_2G)
  3625. return;
  3626. if (rtwdev->scanning)
  3627. return;
  3628. for (path = 0; path < RF_PATH_NUM_8852C; path++) {
  3629. cur_thermal =
  3630. ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  3631. delta = abs((int)cur_thermal - rx_dck->thermal[path]);
  3632. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  3633. "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
  3634. path, cur_thermal, delta);
  3635. if (delta >= RTW8852C_RX_DCK_TH)
  3636. goto trigger_rx_dck;
  3637. }
  3638. return;
  3639. trigger_rx_dck:
  3640. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
  3641. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3642. for (path = 0; path < RF_PATH_NUM_8852C; path++) {
  3643. dck_channel = _rx_dck_channel_calc(rtwdev, chan);
  3644. _ctrl_ch(rtwdev, RTW89_PHY_0, dck_channel, chan->band_type);
  3645. }
  3646. _rx_dck(rtwdev, RTW89_PHY_0, false, 20);
  3647. for (path = 0; path < RF_PATH_NUM_8852C; path++)
  3648. _ctrl_ch(rtwdev, RTW89_PHY_0, chan->channel, chan->band_type);
  3649. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3650. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
  3651. }
  3652. void rtw8852c_dpk_init(struct rtw89_dev *rtwdev)
  3653. {
  3654. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  3655. dpk->is_dpk_enable = true;
  3656. dpk->is_dpk_reload_en = false;
  3657. }
  3658. void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3659. {
  3660. u32 tx_en;
  3661. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3662. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
  3663. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3664. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3665. _dpk(rtwdev, phy_idx, false);
  3666. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3667. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
  3668. }
  3669. void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
  3670. {
  3671. _dpk_track(rtwdev);
  3672. }
  3673. void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3674. {
  3675. u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
  3676. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
  3677. if (rtwdev->dbcc_en) {
  3678. if (phy == RTW89_PHY_0) {
  3679. path = RF_PATH_A;
  3680. path_max = RF_PATH_B;
  3681. } else if (phy == RTW89_PHY_1) {
  3682. path = RF_PATH_B;
  3683. path_max = RF_PATH_NUM_8852C;
  3684. }
  3685. }
  3686. _tssi_disable(rtwdev, phy);
  3687. for (i = path; i < path_max; i++) {
  3688. _tssi_set_sys(rtwdev, phy, i);
  3689. _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
  3690. _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
  3691. _tssi_set_dck(rtwdev, phy, i);
  3692. _tssi_set_bbgain_split(rtwdev, phy, i);
  3693. _tssi_set_tmeter_tbl(rtwdev, phy, i);
  3694. _tssi_slope_cal_org(rtwdev, phy, i);
  3695. _tssi_set_aligk_default(rtwdev, phy, i);
  3696. _tssi_set_slope(rtwdev, phy, i);
  3697. _tssi_run_slope(rtwdev, phy, i);
  3698. }
  3699. _tssi_enable(rtwdev, phy);
  3700. _tssi_set_efuse_to_de(rtwdev, phy);
  3701. }
  3702. void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3703. {
  3704. u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
  3705. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
  3706. __func__, phy);
  3707. if (!rtwdev->is_tssi_mode[RF_PATH_A])
  3708. return;
  3709. if (!rtwdev->is_tssi_mode[RF_PATH_B])
  3710. return;
  3711. if (rtwdev->dbcc_en) {
  3712. if (phy == RTW89_PHY_0) {
  3713. path = RF_PATH_A;
  3714. path_max = RF_PATH_B;
  3715. } else if (phy == RTW89_PHY_1) {
  3716. path = RF_PATH_B;
  3717. path_max = RF_PATH_NUM_8852C;
  3718. }
  3719. }
  3720. _tssi_disable(rtwdev, phy);
  3721. for (i = path; i < path_max; i++) {
  3722. _tssi_set_sys(rtwdev, phy, i);
  3723. _tssi_set_dck(rtwdev, phy, i);
  3724. _tssi_set_tmeter_tbl(rtwdev, phy, i);
  3725. _tssi_slope_cal_org(rtwdev, phy, i);
  3726. _tssi_set_aligk_default(rtwdev, phy, i);
  3727. }
  3728. _tssi_enable(rtwdev, phy);
  3729. _tssi_set_efuse_to_de(rtwdev, phy);
  3730. }
  3731. static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
  3732. enum rtw89_phy_idx phy, bool enable)
  3733. {
  3734. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3735. u8 i;
  3736. if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
  3737. return;
  3738. if (enable) {
  3739. /* SCAN_START */
  3740. if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
  3741. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
  3742. for (i = 0; i < 6; i++) {
  3743. tssi_info->default_txagc_offset[RF_PATH_A] =
  3744. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
  3745. B_TXAGC_BB);
  3746. if (tssi_info->default_txagc_offset[RF_PATH_A])
  3747. break;
  3748. }
  3749. }
  3750. if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
  3751. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
  3752. for (i = 0; i < 6; i++) {
  3753. tssi_info->default_txagc_offset[RF_PATH_B] =
  3754. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
  3755. B_TXAGC_BB_S1);
  3756. if (tssi_info->default_txagc_offset[RF_PATH_B])
  3757. break;
  3758. }
  3759. }
  3760. } else {
  3761. /* SCAN_END */
  3762. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
  3763. tssi_info->default_txagc_offset[RF_PATH_A]);
  3764. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
  3765. tssi_info->default_txagc_offset[RF_PATH_B]);
  3766. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
  3767. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
  3768. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
  3769. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
  3770. }
  3771. }
  3772. void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
  3773. bool scan_start, enum rtw89_phy_idx phy_idx)
  3774. {
  3775. if (scan_start)
  3776. rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
  3777. else
  3778. rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
  3779. }
  3780. void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
  3781. enum rtw89_chanctx_state state)
  3782. {
  3783. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  3784. u8 path;
  3785. switch (state) {
  3786. case RTW89_CHANCTX_STATE_MCC_START:
  3787. dpk->is_dpk_enable = false;
  3788. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
  3789. _dpk_onoff(rtwdev, path, false);
  3790. break;
  3791. case RTW89_CHANCTX_STATE_MCC_STOP:
  3792. dpk->is_dpk_enable = true;
  3793. for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
  3794. _dpk_onoff(rtwdev, path, false);
  3795. rtw8852c_dpk(rtwdev, RTW89_PHY_0);
  3796. break;
  3797. default:
  3798. break;
  3799. }
  3800. }