rtw8852b_rfk.c 130 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2022 Realtek Corporation
  3. */
  4. #include "coex.h"
  5. #include "debug.h"
  6. #include "mac.h"
  7. #include "phy.h"
  8. #include "reg.h"
  9. #include "rtw8852b.h"
  10. #include "rtw8852b_rfk.h"
  11. #include "rtw8852b_rfk_table.h"
  12. #include "rtw8852b_table.h"
  13. #define RTW8852B_RXDCK_VER 0x1
  14. #define RTW8852B_IQK_VER 0x2a
  15. #define RTW8852B_IQK_SS 2
  16. #define RTW8852B_RXK_GROUP_NR 4
  17. #define RTW8852B_TSSI_PATH_NR 2
  18. #define RTW8852B_RF_REL_VERSION 34
  19. #define RTW8852B_DPK_VER 0x0d
  20. #define RTW8852B_DPK_RF_PATH 2
  21. #define RTW8852B_DPK_KIP_REG_NUM 2
  22. #define _TSSI_DE_MASK GENMASK(21, 12)
  23. #define ADDC_T_AVG 100
  24. #define DPK_TXAGC_LOWER 0x2e
  25. #define DPK_TXAGC_UPPER 0x3f
  26. #define DPK_TXAGC_INVAL 0xff
  27. #define RFREG_MASKRXBB 0x003e0
  28. #define RFREG_MASKMODE 0xf0000
  29. enum rtw8852b_dpk_id {
  30. LBK_RXIQK = 0x06,
  31. SYNC = 0x10,
  32. MDPK_IDL = 0x11,
  33. MDPK_MPA = 0x12,
  34. GAIN_LOSS = 0x13,
  35. GAIN_CAL = 0x14,
  36. DPK_RXAGC = 0x15,
  37. KIP_PRESET = 0x16,
  38. KIP_RESTORE = 0x17,
  39. DPK_TXAGC = 0x19,
  40. D_KIP_PRESET = 0x28,
  41. D_TXAGC = 0x29,
  42. D_RXAGC = 0x2a,
  43. D_SYNC = 0x2b,
  44. D_GAIN_LOSS = 0x2c,
  45. D_MDPK_IDL = 0x2d,
  46. D_GAIN_NORM = 0x2f,
  47. D_KIP_THERMAL = 0x30,
  48. D_KIP_RESTORE = 0x31
  49. };
  50. enum dpk_agc_step {
  51. DPK_AGC_STEP_SYNC_DGAIN,
  52. DPK_AGC_STEP_GAIN_ADJ,
  53. DPK_AGC_STEP_GAIN_LOSS_IDX,
  54. DPK_AGC_STEP_GL_GT_CRITERION,
  55. DPK_AGC_STEP_GL_LT_CRITERION,
  56. DPK_AGC_STEP_SET_TX_GAIN,
  57. };
  58. enum rtw8852b_iqk_type {
  59. ID_TXAGC = 0x0,
  60. ID_FLOK_COARSE = 0x1,
  61. ID_FLOK_FINE = 0x2,
  62. ID_TXK = 0x3,
  63. ID_RXAGC = 0x4,
  64. ID_RXK = 0x5,
  65. ID_NBTXK = 0x6,
  66. ID_NBRXK = 0x7,
  67. ID_FLOK_VBUFFER = 0x8,
  68. ID_A_FLOK_COARSE = 0x9,
  69. ID_G_FLOK_COARSE = 0xa,
  70. ID_A_FLOK_FINE = 0xb,
  71. ID_G_FLOK_FINE = 0xc,
  72. ID_IQK_RESTORE = 0x10,
  73. };
  74. static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820};
  75. static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18};
  76. static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = {
  77. {0x5634, 0x5630, 0x5630, 0x5630},
  78. {0x7634, 0x7630, 0x7630, 0x7630} };
  79. static const u32 _tssi_cw_default_mask[4] = {
  80. 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
  81. static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858};
  82. static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860};
  83. static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838};
  84. static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840};
  85. static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848};
  86. static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850};
  87. static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828};
  88. static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830};
  89. static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352};
  90. static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f};
  91. static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0};
  92. static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360};
  93. static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f};
  94. static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1};
  95. static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
  96. static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6};
  97. static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
  98. static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b};
  99. static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
  100. static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6};
  101. static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
  102. static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24};
  103. static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800};
  104. static const u32 rtw8852b_backup_rf_regs[] = {
  105. 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005
  106. };
  107. #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs)
  108. #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs)
  109. static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = {
  110. {0x20fc, 0xffff0000, 0x0303},
  111. {0x5864, 0x18000000, 0x3},
  112. {0x7864, 0x18000000, 0x3},
  113. {0x12b8, 0x40000000, 0x1},
  114. {0x32b8, 0x40000000, 0x1},
  115. {0x030c, 0xff000000, 0x13},
  116. {0x032c, 0xffff0000, 0x0041},
  117. {0x12b8, 0x10000000, 0x1},
  118. {0x58c8, 0x01000000, 0x1},
  119. {0x78c8, 0x01000000, 0x1},
  120. {0x5864, 0xc0000000, 0x3},
  121. {0x7864, 0xc0000000, 0x3},
  122. {0x2008, 0x01ffffff, 0x1ffffff},
  123. {0x0c1c, 0x00000004, 0x1},
  124. {0x0700, 0x08000000, 0x1},
  125. {0x0c70, 0x000003ff, 0x3ff},
  126. {0x0c60, 0x00000003, 0x3},
  127. {0x0c6c, 0x00000001, 0x1},
  128. {0x58ac, 0x08000000, 0x1},
  129. {0x78ac, 0x08000000, 0x1},
  130. {0x0c3c, 0x00000200, 0x1},
  131. {0x2344, 0x80000000, 0x1},
  132. {0x4490, 0x80000000, 0x1},
  133. {0x12a0, 0x00007000, 0x7},
  134. {0x12a0, 0x00008000, 0x1},
  135. {0x12a0, 0x00070000, 0x3},
  136. {0x12a0, 0x00080000, 0x1},
  137. {0x32a0, 0x00070000, 0x3},
  138. {0x32a0, 0x00080000, 0x1},
  139. {0x0700, 0x01000000, 0x1},
  140. {0x0700, 0x06000000, 0x2},
  141. {0x20fc, 0xffff0000, 0x3333},
  142. };
  143. static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = {
  144. {0x20fc, 0xffff0000, 0x0303},
  145. {0x12b8, 0x40000000, 0x0},
  146. {0x32b8, 0x40000000, 0x0},
  147. {0x5864, 0xc0000000, 0x0},
  148. {0x7864, 0xc0000000, 0x0},
  149. {0x2008, 0x01ffffff, 0x0000000},
  150. {0x0c1c, 0x00000004, 0x0},
  151. {0x0700, 0x08000000, 0x0},
  152. {0x0c70, 0x0000001f, 0x03},
  153. {0x0c70, 0x000003e0, 0x03},
  154. {0x12a0, 0x000ff000, 0x00},
  155. {0x32a0, 0x000ff000, 0x00},
  156. {0x0700, 0x07000000, 0x0},
  157. {0x20fc, 0xffff0000, 0x0000},
  158. {0x58c8, 0x01000000, 0x0},
  159. {0x78c8, 0x01000000, 0x0},
  160. {0x0c3c, 0x00000200, 0x0},
  161. {0x2344, 0x80000000, 0x0},
  162. };
  163. static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
  164. {
  165. u32 i;
  166. for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  167. backup_bb_reg_val[i] =
  168. rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
  169. MASKDWORD);
  170. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  171. "[RFK]backup bb reg : %x, value =%x\n",
  172. rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
  173. }
  174. }
  175. static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
  176. u8 rf_path)
  177. {
  178. u32 i;
  179. for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  180. backup_rf_reg_val[i] =
  181. rtw89_read_rf(rtwdev, rf_path,
  182. rtw8852b_backup_rf_regs[i], RFREG_MASK);
  183. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  184. "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
  185. rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
  186. }
  187. }
  188. static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
  189. const u32 backup_bb_reg_val[])
  190. {
  191. u32 i;
  192. for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  193. rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
  194. MASKDWORD, backup_bb_reg_val[i]);
  195. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  196. "[RFK]restore bb reg : %x, value =%x\n",
  197. rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
  198. }
  199. }
  200. static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
  201. const u32 backup_rf_reg_val[], u8 rf_path)
  202. {
  203. u32 i;
  204. for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  205. rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i],
  206. RFREG_MASK, backup_rf_reg_val[i]);
  207. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  208. "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
  209. rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
  210. }
  211. }
  212. static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
  213. enum rtw89_rf_path path, bool is_bybb)
  214. {
  215. if (is_bybb)
  216. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
  217. else
  218. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  219. }
  220. static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
  221. enum rtw89_rf_path path, bool is_bybb)
  222. {
  223. if (is_bybb)
  224. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
  225. else
  226. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
  227. }
  228. static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
  229. {
  230. bool fail = true;
  231. u32 val;
  232. int ret;
  233. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
  234. 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
  235. if (ret)
  236. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
  237. udelay(200);
  238. if (!ret)
  239. fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
  240. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
  241. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
  242. val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
  243. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val);
  244. return fail;
  245. }
  246. static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  247. {
  248. u8 val;
  249. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
  250. rtwdev->dbcc_en, phy_idx);
  251. if (!rtwdev->dbcc_en) {
  252. val = RF_AB;
  253. } else {
  254. if (phy_idx == RTW89_PHY_0)
  255. val = RF_A;
  256. else
  257. val = RF_B;
  258. }
  259. return val;
  260. }
  261. static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  262. enum rtw89_rf_path path)
  263. {
  264. rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
  265. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
  266. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
  267. mdelay(1);
  268. }
  269. static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  270. {
  271. u8 path, dck_tune;
  272. u32 rf_reg5;
  273. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  274. "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
  275. RTW8852B_RXDCK_VER, rtwdev->hal.cv);
  276. for (path = 0; path < RF_PATH_NUM_8852B; path++) {
  277. rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
  278. dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
  279. if (rtwdev->is_tssi_mode[path])
  280. rtw89_phy_write32_mask(rtwdev,
  281. R_P0_TSSI_TRK + (path << 13),
  282. B_P0_TSSI_TRK_EN, 0x1);
  283. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  284. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
  285. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  286. _set_rx_dck(rtwdev, phy, path);
  287. rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
  288. rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
  289. if (rtwdev->is_tssi_mode[path])
  290. rtw89_phy_write32_mask(rtwdev,
  291. R_P0_TSSI_TRK + (path << 13),
  292. B_P0_TSSI_TRK_EN, 0x0);
  293. }
  294. }
  295. static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  296. {
  297. u32 rf_reg5;
  298. u32 rck_val;
  299. u32 val;
  300. int ret;
  301. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
  302. rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
  303. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  304. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  305. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
  306. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
  307. /* RCK trigger */
  308. rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
  309. ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
  310. false, rtwdev, path, RR_RCKS, BIT(3));
  311. rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
  312. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
  313. rck_val, ret);
  314. rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
  315. rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
  316. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
  317. rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
  318. }
  319. static void _afe_init(struct rtw89_dev *rtwdev)
  320. {
  321. rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf);
  322. rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl);
  323. }
  324. static void _drck(struct rtw89_dev *rtwdev)
  325. {
  326. u32 rck_d;
  327. u32 val;
  328. int ret;
  329. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
  330. rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1);
  331. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  332. false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE);
  333. if (ret)
  334. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
  335. rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0);
  336. rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
  337. udelay(1);
  338. rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
  339. rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS);
  340. rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0);
  341. rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d);
  342. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n",
  343. rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD));
  344. }
  345. static void _addck_backup(struct rtw89_dev *rtwdev)
  346. {
  347. struct rtw89_dack_info *dack = &rtwdev->dack;
  348. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
  349. dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
  350. dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
  351. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
  352. dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
  353. dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
  354. }
  355. static void _addck_reload(struct rtw89_dev *rtwdev)
  356. {
  357. struct rtw89_dack_info *dack = &rtwdev->dack;
  358. /* S0 */
  359. rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]);
  360. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6);
  361. rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f);
  362. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3);
  363. /* S1 */
  364. rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]);
  365. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6);
  366. rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f);
  367. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3);
  368. }
  369. static void _dack_backup_s0(struct rtw89_dev *rtwdev)
  370. {
  371. struct rtw89_dack_info *dack = &rtwdev->dack;
  372. u8 i;
  373. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  374. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  375. rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
  376. dack->msbk_d[0][0][i] =
  377. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
  378. rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
  379. dack->msbk_d[0][1][i] =
  380. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
  381. }
  382. dack->biask_d[0][0] =
  383. rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
  384. dack->biask_d[0][1] =
  385. rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
  386. dack->dadck_d[0][0] =
  387. rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
  388. dack->dadck_d[0][1] =
  389. rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
  390. }
  391. static void _dack_backup_s1(struct rtw89_dev *rtwdev)
  392. {
  393. struct rtw89_dack_info *dack = &rtwdev->dack;
  394. u8 i;
  395. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  396. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  397. rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
  398. dack->msbk_d[1][0][i] =
  399. rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
  400. rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
  401. dack->msbk_d[1][1][i] =
  402. rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
  403. }
  404. dack->biask_d[1][0] =
  405. rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
  406. dack->biask_d[1][1] =
  407. rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
  408. dack->dadck_d[1][0] =
  409. rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
  410. dack->dadck_d[1][1] =
  411. rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
  412. }
  413. static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  414. {
  415. s32 dc_re = 0, dc_im = 0;
  416. u32 tmp;
  417. u32 i;
  418. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  419. &rtw8852b_check_addc_defs_a_tbl,
  420. &rtw8852b_check_addc_defs_b_tbl);
  421. for (i = 0; i < ADDC_T_AVG; i++) {
  422. tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
  423. dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
  424. dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
  425. }
  426. dc_re /= ADDC_T_AVG;
  427. dc_im /= ADDC_T_AVG;
  428. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  429. "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
  430. }
  431. static void _addck(struct rtw89_dev *rtwdev)
  432. {
  433. struct rtw89_dack_info *dack = &rtwdev->dack;
  434. u32 val;
  435. int ret;
  436. /* S0 */
  437. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
  438. rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0);
  439. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  440. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
  441. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
  442. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
  443. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
  444. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
  445. rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1);
  446. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
  447. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
  448. _check_addc(rtwdev, RF_PATH_A);
  449. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
  450. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
  451. udelay(1);
  452. rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
  453. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  454. false, rtwdev, R_ADDCKR0, BIT(0));
  455. if (ret) {
  456. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
  457. dack->addck_timeout[0] = true;
  458. }
  459. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
  460. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
  461. _check_addc(rtwdev, RF_PATH_A);
  462. rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0);
  463. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
  464. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
  465. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
  466. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
  467. /* S1 */
  468. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  469. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
  470. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
  471. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
  472. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
  473. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
  474. rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1);
  475. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
  476. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
  477. _check_addc(rtwdev, RF_PATH_B);
  478. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
  479. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
  480. udelay(1);
  481. rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
  482. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
  483. false, rtwdev, R_ADDCKR1, BIT(0));
  484. if (ret) {
  485. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
  486. dack->addck_timeout[1] = true;
  487. }
  488. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
  489. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
  490. _check_addc(rtwdev, RF_PATH_B);
  491. rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0);
  492. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
  493. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
  494. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
  495. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
  496. }
  497. static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  498. {
  499. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  500. &rtw8852b_check_dadc_en_defs_a_tbl,
  501. &rtw8852b_check_dadc_en_defs_b_tbl);
  502. _check_addc(rtwdev, path);
  503. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  504. &rtw8852b_check_dadc_dis_defs_a_tbl,
  505. &rtw8852b_check_dadc_dis_defs_b_tbl);
  506. }
  507. static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1)
  508. {
  509. if (part1) {
  510. if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
  511. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0)
  512. return false;
  513. } else {
  514. if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
  515. rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
  516. return false;
  517. }
  518. return true;
  519. }
  520. static void _dack_s0(struct rtw89_dev *rtwdev)
  521. {
  522. struct rtw89_dack_info *dack = &rtwdev->dack;
  523. bool done;
  524. int ret;
  525. rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl);
  526. ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
  527. false, rtwdev, true);
  528. if (ret) {
  529. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
  530. dack->msbk_timeout[0] = true;
  531. }
  532. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  533. rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl);
  534. ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
  535. false, rtwdev, false);
  536. if (ret) {
  537. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
  538. dack->dadck_timeout[0] = true;
  539. }
  540. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  541. rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl);
  542. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
  543. _dack_backup_s0(rtwdev);
  544. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
  545. }
  546. static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1)
  547. {
  548. if (part1) {
  549. if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 &&
  550. rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0)
  551. return false;
  552. } else {
  553. if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 &&
  554. rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0)
  555. return false;
  556. }
  557. return true;
  558. }
  559. static void _dack_s1(struct rtw89_dev *rtwdev)
  560. {
  561. struct rtw89_dack_info *dack = &rtwdev->dack;
  562. bool done;
  563. int ret;
  564. rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl);
  565. ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
  566. false, rtwdev, true);
  567. if (ret) {
  568. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
  569. dack->msbk_timeout[1] = true;
  570. }
  571. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  572. rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl);
  573. ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
  574. false, rtwdev, false);
  575. if (ret) {
  576. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
  577. dack->dadck_timeout[1] = true;
  578. }
  579. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
  580. rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl);
  581. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
  582. _check_dadc(rtwdev, RF_PATH_B);
  583. _dack_backup_s1(rtwdev);
  584. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
  585. }
  586. static void _dack(struct rtw89_dev *rtwdev)
  587. {
  588. _dack_s0(rtwdev);
  589. _dack_s1(rtwdev);
  590. }
  591. static void _dack_dump(struct rtw89_dev *rtwdev)
  592. {
  593. struct rtw89_dack_info *dack = &rtwdev->dack;
  594. u8 i;
  595. u8 t;
  596. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  597. "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  598. dack->addck_d[0][0], dack->addck_d[0][1]);
  599. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  600. "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  601. dack->addck_d[1][0], dack->addck_d[1][1]);
  602. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  603. "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  604. dack->dadck_d[0][0], dack->dadck_d[0][1]);
  605. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  606. "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  607. dack->dadck_d[1][0], dack->dadck_d[1][1]);
  608. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  609. "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
  610. dack->biask_d[0][0], dack->biask_d[0][1]);
  611. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  612. "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
  613. dack->biask_d[1][0], dack->biask_d[1][1]);
  614. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
  615. for (i = 0; i < 0x10; i++) {
  616. t = dack->msbk_d[0][0][i];
  617. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  618. }
  619. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
  620. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  621. t = dack->msbk_d[0][1][i];
  622. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  623. }
  624. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
  625. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  626. t = dack->msbk_d[1][0][i];
  627. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  628. }
  629. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
  630. for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  631. t = dack->msbk_d[1][1][i];
  632. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  633. }
  634. }
  635. static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
  636. {
  637. struct rtw89_dack_info *dack = &rtwdev->dack;
  638. u32 rf0_0, rf1_0;
  639. dack->dack_done = false;
  640. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n");
  641. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
  642. rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
  643. rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
  644. _afe_init(rtwdev);
  645. _drck(rtwdev);
  646. rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
  647. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
  648. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
  649. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
  650. _addck(rtwdev);
  651. _addck_backup(rtwdev);
  652. _addck_reload(rtwdev);
  653. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
  654. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
  655. _dack(rtwdev);
  656. _dack_dump(rtwdev);
  657. dack->dack_done = true;
  658. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
  659. rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
  660. rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
  661. rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
  662. dack->dack_cnt++;
  663. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
  664. }
  665. static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
  666. {
  667. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  668. u32 tmp;
  669. switch (iqk_info->iqk_band[path]) {
  670. case RTW89_BAND_2G:
  671. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  672. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
  673. tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
  674. rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
  675. break;
  676. case RTW89_BAND_5G:
  677. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  678. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
  679. tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
  680. rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
  681. break;
  682. default:
  683. break;
  684. }
  685. }
  686. static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  687. u8 path, u8 ktype)
  688. {
  689. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  690. u32 iqk_cmd;
  691. bool fail;
  692. switch (ktype) {
  693. case ID_FLOK_COARSE:
  694. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  695. iqk_cmd = 0x108 | (1 << (4 + path));
  696. break;
  697. case ID_FLOK_FINE:
  698. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  699. iqk_cmd = 0x208 | (1 << (4 + path));
  700. break;
  701. case ID_FLOK_VBUFFER:
  702. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  703. iqk_cmd = 0x308 | (1 << (4 + path));
  704. break;
  705. case ID_TXK:
  706. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
  707. iqk_cmd = 0x008 | (1 << (path + 4)) |
  708. (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
  709. break;
  710. case ID_RXAGC:
  711. iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
  712. break;
  713. case ID_RXK:
  714. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  715. iqk_cmd = 0x008 | (1 << (path + 4)) |
  716. (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
  717. break;
  718. case ID_NBTXK:
  719. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
  720. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
  721. iqk_cmd = 0x408 | (1 << (4 + path));
  722. break;
  723. case ID_NBRXK:
  724. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  725. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
  726. iqk_cmd = 0x608 | (1 << (4 + path));
  727. break;
  728. default:
  729. return false;
  730. }
  731. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
  732. udelay(1);
  733. fail = _iqk_check_cal(rtwdev, path);
  734. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
  735. return fail;
  736. }
  737. static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  738. u8 path)
  739. {
  740. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  741. bool kfail = false;
  742. bool fail;
  743. u8 gp;
  744. for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
  745. switch (iqk_info->iqk_band[path]) {
  746. case RTW89_BAND_2G:
  747. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
  748. _g_idxrxgain[gp]);
  749. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
  750. _g_idxattc2[gp]);
  751. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
  752. _g_idxattc1[gp]);
  753. break;
  754. case RTW89_BAND_5G:
  755. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
  756. _a_idxrxgain[gp]);
  757. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
  758. _a_idxattc2[gp]);
  759. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
  760. _a_idxattc1[gp]);
  761. break;
  762. default:
  763. break;
  764. }
  765. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  766. B_CFIR_LUT_SEL, 0x1);
  767. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  768. B_CFIR_LUT_SET, 0x0);
  769. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  770. B_CFIR_LUT_GP_V1, gp);
  771. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
  772. rtw89_phy_write32_mask(rtwdev, R_IQKINF,
  773. BIT(16 + gp + path * 4), fail);
  774. kfail |= fail;
  775. }
  776. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
  777. if (kfail) {
  778. iqk_info->nb_rxcfir[path] = 0x40000002;
  779. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
  780. B_IQK_RES_RXCFIR, 0x0);
  781. iqk_info->is_wb_rxiqk[path] = false;
  782. } else {
  783. iqk_info->nb_rxcfir[path] = 0x40000000;
  784. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
  785. B_IQK_RES_RXCFIR, 0x5);
  786. iqk_info->is_wb_rxiqk[path] = true;
  787. }
  788. return kfail;
  789. }
  790. static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
  791. u8 path)
  792. {
  793. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  794. const u8 gp = 0x3;
  795. bool kfail = false;
  796. bool fail;
  797. switch (iqk_info->iqk_band[path]) {
  798. case RTW89_BAND_2G:
  799. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
  800. _g_idxrxgain[gp]);
  801. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
  802. _g_idxattc2[gp]);
  803. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
  804. _g_idxattc1[gp]);
  805. break;
  806. case RTW89_BAND_5G:
  807. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
  808. _a_idxrxgain[gp]);
  809. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
  810. _a_idxattc2[gp]);
  811. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
  812. _a_idxattc1[gp]);
  813. break;
  814. default:
  815. break;
  816. }
  817. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
  818. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
  819. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
  820. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
  821. udelay(1);
  822. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
  823. rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
  824. kfail |= fail;
  825. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
  826. if (!kfail)
  827. iqk_info->nb_rxcfir[path] =
  828. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2;
  829. else
  830. iqk_info->nb_rxcfir[path] = 0x40000002;
  831. return kfail;
  832. }
  833. static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
  834. {
  835. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  836. if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
  837. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  838. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  839. udelay(1);
  840. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
  841. udelay(1);
  842. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
  843. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
  844. udelay(1);
  845. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
  846. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2);
  847. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
  848. rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2);
  849. rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
  850. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
  851. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
  852. } else {
  853. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  854. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  855. udelay(1);
  856. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
  857. udelay(1);
  858. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
  859. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
  860. udelay(1);
  861. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
  862. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1);
  863. rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
  864. rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1);
  865. rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
  866. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
  867. rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0);
  868. }
  869. }
  870. static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  871. {
  872. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  873. bool kfail = false;
  874. bool fail;
  875. u8 gp;
  876. for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
  877. switch (iqk_info->iqk_band[path]) {
  878. case RTW89_BAND_2G:
  879. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  880. _g_power_range[gp]);
  881. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  882. _g_track_range[gp]);
  883. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  884. _g_gain_bb[gp]);
  885. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  886. MASKDWORD, _g_itqt[gp]);
  887. break;
  888. case RTW89_BAND_5G:
  889. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  890. _a_power_range[gp]);
  891. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  892. _a_track_range[gp]);
  893. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  894. _a_gain_bb[gp]);
  895. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  896. MASKDWORD, _a_itqt[gp]);
  897. break;
  898. default:
  899. break;
  900. }
  901. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  902. B_CFIR_LUT_SEL, 0x1);
  903. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  904. B_CFIR_LUT_SET, 0x1);
  905. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  906. B_CFIR_LUT_G2, 0x0);
  907. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  908. B_CFIR_LUT_GP, gp);
  909. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  910. fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
  911. rtw89_phy_write32_mask(rtwdev, R_IQKINF,
  912. BIT(8 + gp + path * 4), fail);
  913. kfail |= fail;
  914. }
  915. if (kfail) {
  916. iqk_info->nb_txcfir[path] = 0x40000002;
  917. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
  918. B_IQK_RES_TXCFIR, 0x0);
  919. iqk_info->is_wb_txiqk[path] = false;
  920. } else {
  921. iqk_info->nb_txcfir[path] = 0x40000000;
  922. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
  923. B_IQK_RES_TXCFIR, 0x5);
  924. iqk_info->is_wb_txiqk[path] = true;
  925. }
  926. return kfail;
  927. }
  928. static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  929. {
  930. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  931. bool kfail;
  932. u8 gp = 0x2;
  933. switch (iqk_info->iqk_band[path]) {
  934. case RTW89_BAND_2G:
  935. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  936. _g_power_range[gp]);
  937. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  938. _g_track_range[gp]);
  939. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  940. _g_gain_bb[gp]);
  941. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  942. MASKDWORD, _g_itqt[gp]);
  943. break;
  944. case RTW89_BAND_5G:
  945. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  946. _a_power_range[gp]);
  947. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  948. _a_track_range[gp]);
  949. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  950. _a_gain_bb[gp]);
  951. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  952. MASKDWORD, _a_itqt[gp]);
  953. break;
  954. default:
  955. break;
  956. }
  957. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
  958. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
  959. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
  960. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
  961. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  962. kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
  963. if (!kfail)
  964. iqk_info->nb_txcfir[path] =
  965. rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
  966. MASKDWORD) | 0x2;
  967. else
  968. iqk_info->nb_txcfir[path] = 0x40000002;
  969. return kfail;
  970. }
  971. static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
  972. {
  973. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  974. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  975. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
  976. rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
  977. if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
  978. rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
  979. else
  980. rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
  981. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
  982. rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
  983. rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1);
  984. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path,
  985. rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK));
  986. }
  987. static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
  988. {
  989. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  990. bool is_fail1, is_fail2;
  991. u32 vbuff_i;
  992. u32 vbuff_q;
  993. u32 core_i;
  994. u32 core_q;
  995. u32 tmp;
  996. u8 ch;
  997. tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
  998. core_i = FIELD_GET(RR_TXMO_COI, tmp);
  999. core_q = FIELD_GET(RR_TXMO_COQ, tmp);
  1000. ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR;
  1001. if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
  1002. is_fail1 = true;
  1003. else
  1004. is_fail1 = false;
  1005. iqk_info->lok_idac[ch][path] = tmp;
  1006. tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
  1007. vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp);
  1008. vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp);
  1009. if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
  1010. is_fail2 = true;
  1011. else
  1012. is_fail2 = false;
  1013. iqk_info->lok_vbuf[ch][path] = tmp;
  1014. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1015. "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path,
  1016. iqk_info->lok_idac[ch][path]);
  1017. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1018. "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path,
  1019. iqk_info->lok_vbuf[ch][path]);
  1020. return is_fail1 | is_fail2;
  1021. }
  1022. static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  1023. {
  1024. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1025. bool tmp;
  1026. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
  1027. switch (iqk_info->iqk_band[path]) {
  1028. case RTW89_BAND_2G:
  1029. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
  1030. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
  1031. break;
  1032. case RTW89_BAND_5G:
  1033. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
  1034. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
  1035. break;
  1036. default:
  1037. break;
  1038. }
  1039. switch (iqk_info->iqk_band[path]) {
  1040. case RTW89_BAND_2G:
  1041. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
  1042. break;
  1043. case RTW89_BAND_5G:
  1044. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
  1045. break;
  1046. default:
  1047. break;
  1048. }
  1049. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
  1050. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
  1051. iqk_info->lok_cor_fail[0][path] = tmp;
  1052. switch (iqk_info->iqk_band[path]) {
  1053. case RTW89_BAND_2G:
  1054. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1055. break;
  1056. case RTW89_BAND_5G:
  1057. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1058. break;
  1059. default:
  1060. break;
  1061. }
  1062. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
  1063. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
  1064. switch (iqk_info->iqk_band[path]) {
  1065. case RTW89_BAND_2G:
  1066. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
  1067. break;
  1068. case RTW89_BAND_5G:
  1069. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
  1070. break;
  1071. default:
  1072. break;
  1073. }
  1074. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
  1075. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
  1076. tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
  1077. iqk_info->lok_fin_fail[0][path] = tmp;
  1078. switch (iqk_info->iqk_band[path]) {
  1079. case RTW89_BAND_2G:
  1080. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1081. break;
  1082. case RTW89_BAND_5G:
  1083. rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
  1084. break;
  1085. default:
  1086. break;
  1087. }
  1088. rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
  1089. _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
  1090. return _lok_finetune_check(rtwdev, path);
  1091. }
  1092. static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
  1093. {
  1094. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1095. switch (iqk_info->iqk_band[path]) {
  1096. case RTW89_BAND_2G:
  1097. rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00);
  1098. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
  1099. rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
  1100. rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
  1101. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
  1102. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1103. rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
  1104. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
  1105. udelay(1);
  1106. break;
  1107. case RTW89_BAND_5G:
  1108. rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
  1109. rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
  1110. rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
  1111. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
  1112. rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
  1113. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
  1114. udelay(1);
  1115. break;
  1116. default:
  1117. break;
  1118. }
  1119. }
  1120. static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
  1121. {
  1122. rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  1123. rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  1124. udelay(1);
  1125. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
  1126. udelay(1);
  1127. rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
  1128. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
  1129. udelay(1);
  1130. rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
  1131. }
  1132. static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  1133. {
  1134. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1135. u32 tmp;
  1136. bool flag;
  1137. flag = iqk_info->lok_cor_fail[0][path];
  1138. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
  1139. flag = iqk_info->lok_fin_fail[0][path];
  1140. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
  1141. flag = iqk_info->iqk_tx_fail[0][path];
  1142. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
  1143. flag = iqk_info->iqk_rx_fail[0][path];
  1144. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
  1145. tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
  1146. iqk_info->bp_iqkenable[path] = tmp;
  1147. tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
  1148. iqk_info->bp_txkresult[path] = tmp;
  1149. tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
  1150. iqk_info->bp_rxkresult[path] = tmp;
  1151. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times);
  1152. tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
  1153. if (tmp)
  1154. iqk_info->iqk_fail_cnt++;
  1155. rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
  1156. iqk_info->iqk_fail_cnt);
  1157. }
  1158. static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  1159. {
  1160. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1161. bool lok_is_fail = false;
  1162. const int try = 3;
  1163. u8 ibias = 0x1;
  1164. u8 i;
  1165. _iqk_txclk_setting(rtwdev, path);
  1166. /* LOK */
  1167. for (i = 0; i < try; i++) {
  1168. _lok_res_table(rtwdev, path, ibias++);
  1169. _iqk_txk_setting(rtwdev, path);
  1170. lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
  1171. if (!lok_is_fail)
  1172. break;
  1173. }
  1174. if (lok_is_fail)
  1175. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path);
  1176. /* TXK */
  1177. if (iqk_info->is_nbiqk)
  1178. iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
  1179. else
  1180. iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
  1181. /* RX */
  1182. _iqk_rxclk_setting(rtwdev, path);
  1183. _iqk_rxk_setting(rtwdev, path);
  1184. if (iqk_info->is_nbiqk)
  1185. iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
  1186. else
  1187. iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
  1188. _iqk_info_iqk(rtwdev, phy_idx, path);
  1189. }
  1190. static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
  1191. {
  1192. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1193. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1194. u32 reg_rf18;
  1195. u32 reg_35c;
  1196. u8 idx;
  1197. u8 get_empty_table = false;
  1198. for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
  1199. if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
  1200. get_empty_table = true;
  1201. break;
  1202. }
  1203. }
  1204. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
  1205. if (!get_empty_table) {
  1206. idx = iqk_info->iqk_table_idx[path] + 1;
  1207. if (idx > 1)
  1208. idx = 0;
  1209. }
  1210. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
  1211. reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
  1212. reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
  1213. iqk_info->iqk_band[path] = chan->band_type;
  1214. iqk_info->iqk_bw[path] = chan->band_width;
  1215. iqk_info->iqk_ch[path] = chan->channel;
  1216. iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
  1217. iqk_info->iqk_table_idx[path] = idx;
  1218. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
  1219. path, reg_rf18, idx);
  1220. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
  1221. path, reg_rf18);
  1222. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
  1223. iqk_info->iqk_times, idx);
  1224. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
  1225. idx, path, iqk_info->iqk_mcc_ch[idx][path]);
  1226. if (reg_35c == 0x01)
  1227. iqk_info->syn1to2 = 0x1;
  1228. else
  1229. iqk_info->syn1to2 = 0x0;
  1230. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1231. "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path,
  1232. iqk_info->syn1to2);
  1233. rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER);
  1234. /* 2GHz/5GHz/6GHz = 0/1/2 */
  1235. rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
  1236. iqk_info->iqk_band[path]);
  1237. /* 20/40/80 = 0/1/2 */
  1238. rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
  1239. iqk_info->iqk_bw[path]);
  1240. rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
  1241. iqk_info->iqk_ch[path]);
  1242. }
  1243. static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
  1244. {
  1245. _iqk_by_path(rtwdev, phy_idx, path);
  1246. }
  1247. static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
  1248. {
  1249. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1250. bool fail;
  1251. rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
  1252. iqk_info->nb_txcfir[path]);
  1253. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
  1254. iqk_info->nb_rxcfir[path]);
  1255. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
  1256. 0x00000e19 + (path << 4));
  1257. fail = _iqk_check_cal(rtwdev, path);
  1258. rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail);
  1259. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  1260. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
  1261. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
  1262. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0);
  1263. rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0);
  1264. rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
  1265. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
  1266. rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
  1267. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
  1268. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
  1269. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
  1270. }
  1271. static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
  1272. enum rtw89_phy_idx phy_idx, u8 path)
  1273. {
  1274. const struct rtw89_reg3_def *def;
  1275. int size;
  1276. u8 kpath;
  1277. int i;
  1278. rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
  1279. kpath = _kpath(rtwdev, phy_idx);
  1280. switch (kpath) {
  1281. case RF_A:
  1282. case RF_B:
  1283. return;
  1284. default:
  1285. size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01);
  1286. def = rtw8852b_restore_nondbcc_path01;
  1287. break;
  1288. }
  1289. for (i = 0; i < size; i++, def++)
  1290. rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
  1291. }
  1292. static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
  1293. {
  1294. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1295. u8 idx;
  1296. idx = iqk_info->iqk_table_idx[path];
  1297. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx);
  1298. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
  1299. rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
  1300. rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
  1301. rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
  1302. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
  1303. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
  1304. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path,
  1305. rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD));
  1306. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path,
  1307. rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD));
  1308. }
  1309. static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
  1310. enum rtw89_phy_idx phy_idx, u8 path)
  1311. {
  1312. const struct rtw89_reg3_def *def;
  1313. int size;
  1314. u8 kpath;
  1315. int i;
  1316. kpath = _kpath(rtwdev, phy_idx);
  1317. switch (kpath) {
  1318. case RF_A:
  1319. case RF_B:
  1320. return;
  1321. default:
  1322. size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01);
  1323. def = rtw8852b_set_nondbcc_path01;
  1324. break;
  1325. }
  1326. for (i = 0; i < size; i++, def++)
  1327. rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
  1328. }
  1329. static void _iqk_init(struct rtw89_dev *rtwdev)
  1330. {
  1331. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1332. u8 idx, path;
  1333. rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
  1334. if (iqk_info->is_iqk_init)
  1335. return;
  1336. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
  1337. iqk_info->is_iqk_init = true;
  1338. iqk_info->is_nbiqk = false;
  1339. iqk_info->iqk_fft_en = false;
  1340. iqk_info->iqk_sram_en = false;
  1341. iqk_info->iqk_cfir_en = false;
  1342. iqk_info->iqk_xym_en = false;
  1343. iqk_info->iqk_times = 0x0;
  1344. for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
  1345. iqk_info->iqk_channel[idx] = 0x0;
  1346. for (path = 0; path < RTW8852B_IQK_SS; path++) {
  1347. iqk_info->lok_cor_fail[idx][path] = false;
  1348. iqk_info->lok_fin_fail[idx][path] = false;
  1349. iqk_info->iqk_tx_fail[idx][path] = false;
  1350. iqk_info->iqk_rx_fail[idx][path] = false;
  1351. iqk_info->iqk_mcc_ch[idx][path] = 0x0;
  1352. iqk_info->iqk_table_idx[path] = 0x0;
  1353. }
  1354. }
  1355. }
  1356. static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
  1357. {
  1358. u32 rf_mode;
  1359. u8 path;
  1360. int ret;
  1361. for (path = 0; path < RF_PATH_MAX; path++) {
  1362. if (!(kpath & BIT(path)))
  1363. continue;
  1364. ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
  1365. rf_mode != 2, 2, 5000, false,
  1366. rtwdev, path, RR_MOD, RR_MOD_MASK);
  1367. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1368. "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
  1369. }
  1370. }
  1371. static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
  1372. bool is_pause)
  1373. {
  1374. if (!is_pause)
  1375. return;
  1376. _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
  1377. }
  1378. static void _doiqk(struct rtw89_dev *rtwdev, bool force,
  1379. enum rtw89_phy_idx phy_idx, u8 path)
  1380. {
  1381. struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  1382. u32 backup_bb_val[BACKUP_BB_REGS_NR];
  1383. u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
  1384. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
  1385. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
  1386. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1387. "[IQK]==========IQK start!!!!!==========\n");
  1388. iqk_info->iqk_times++;
  1389. iqk_info->version = RTW8852B_IQK_VER;
  1390. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
  1391. _iqk_get_ch_info(rtwdev, phy_idx, path);
  1392. _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
  1393. _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  1394. _iqk_macbb_setting(rtwdev, phy_idx, path);
  1395. _iqk_preset(rtwdev, path);
  1396. _iqk_start_iqk(rtwdev, phy_idx, path);
  1397. _iqk_restore(rtwdev, path);
  1398. _iqk_afebb_restore(rtwdev, phy_idx, path);
  1399. _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
  1400. _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  1401. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
  1402. }
  1403. static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
  1404. {
  1405. u8 kpath = _kpath(rtwdev, phy_idx);
  1406. switch (kpath) {
  1407. case RF_A:
  1408. _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
  1409. break;
  1410. case RF_B:
  1411. _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
  1412. break;
  1413. case RF_AB:
  1414. _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
  1415. _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
  1416. break;
  1417. default:
  1418. break;
  1419. }
  1420. }
  1421. static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
  1422. u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
  1423. {
  1424. u8 i;
  1425. for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
  1426. reg_bkup[path][i] =
  1427. rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
  1428. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
  1429. reg[i] + (path << 8), reg_bkup[path][i]);
  1430. }
  1431. }
  1432. static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
  1433. const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
  1434. {
  1435. u8 i;
  1436. for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
  1437. rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
  1438. reg_bkup[path][i]);
  1439. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
  1440. reg[i] + (path << 8), reg_bkup[path][i]);
  1441. }
  1442. }
  1443. static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
  1444. {
  1445. u8 order;
  1446. u8 val;
  1447. order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
  1448. val = 0x3 >> order;
  1449. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
  1450. return val;
  1451. }
  1452. static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
  1453. {
  1454. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1455. u8 val, kidx = dpk->cur_idx[path];
  1456. val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
  1457. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  1458. MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
  1459. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
  1460. kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
  1461. }
  1462. static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1463. enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
  1464. {
  1465. u16 dpk_cmd;
  1466. u32 val;
  1467. int ret;
  1468. dpk_cmd = (id << 8) | (0x19 + (path << 4));
  1469. rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
  1470. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
  1471. 1, 20000, false,
  1472. rtwdev, 0xbff8, MASKBYTE0);
  1473. if (ret)
  1474. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
  1475. udelay(1);
  1476. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
  1477. ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
  1478. 1, 2000, false,
  1479. rtwdev, 0x80fc, MASKLWORD);
  1480. if (ret)
  1481. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
  1482. rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
  1483. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1484. "[DPK] one-shot for %s = 0x%x\n",
  1485. id == 0x06 ? "LBK_RXIQK" :
  1486. id == 0x10 ? "SYNC" :
  1487. id == 0x11 ? "MDPK_IDL" :
  1488. id == 0x12 ? "MDPK_MPA" :
  1489. id == 0x13 ? "GAIN_LOSS" :
  1490. id == 0x14 ? "PWR_CAL" :
  1491. id == 0x15 ? "DPK_RXAGC" :
  1492. id == 0x16 ? "KIP_PRESET" :
  1493. id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC",
  1494. dpk_cmd);
  1495. }
  1496. static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1497. enum rtw89_rf_path path)
  1498. {
  1499. rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
  1500. _set_rx_dck(rtwdev, phy, path);
  1501. }
  1502. static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1503. enum rtw89_rf_path path)
  1504. {
  1505. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1506. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1507. u8 kidx = dpk->cur_idx[path];
  1508. dpk->bp[path][kidx].band = chan->band_type;
  1509. dpk->bp[path][kidx].ch = chan->channel;
  1510. dpk->bp[path][kidx].bw = chan->band_width;
  1511. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1512. "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
  1513. path, dpk->cur_idx[path], phy,
  1514. rtwdev->is_tssi_mode[path] ? "on" : "off",
  1515. rtwdev->dbcc_en ? "on" : "off",
  1516. dpk->bp[path][kidx].band == 0 ? "2G" :
  1517. dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
  1518. dpk->bp[path][kidx].ch,
  1519. dpk->bp[path][kidx].bw == 0 ? "20M" :
  1520. dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
  1521. }
  1522. static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
  1523. enum rtw89_phy_idx phy,
  1524. enum rtw89_rf_path path, u8 kpath)
  1525. {
  1526. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1527. rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
  1528. if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
  1529. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
  1530. rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
  1531. }
  1532. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1533. "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
  1534. }
  1535. static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
  1536. enum rtw89_phy_idx phy,
  1537. enum rtw89_rf_path path, u8 kpath)
  1538. {
  1539. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1540. rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
  1541. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1542. "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
  1543. if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
  1544. rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
  1545. rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x0);
  1546. }
  1547. }
  1548. static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
  1549. enum rtw89_rf_path path, bool is_pause)
  1550. {
  1551. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
  1552. B_P0_TSSI_TRK_EN, is_pause);
  1553. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
  1554. is_pause ? "pause" : "resume");
  1555. }
  1556. static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
  1557. enum rtw89_rf_path path)
  1558. {
  1559. rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
  1560. if (rtwdev->hal.cv > CHIP_CAV)
  1561. rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
  1562. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
  1563. }
  1564. static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1565. enum rtw89_rf_path path)
  1566. {
  1567. u8 cur_rxbb;
  1568. u32 tmp;
  1569. cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
  1570. rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
  1571. rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
  1572. tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
  1573. rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
  1574. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
  1575. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
  1576. if (cur_rxbb >= 0x11)
  1577. rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
  1578. else if (cur_rxbb <= 0xa)
  1579. rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
  1580. else
  1581. rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
  1582. rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
  1583. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
  1584. rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
  1585. udelay(70);
  1586. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  1587. rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
  1588. _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
  1589. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
  1590. rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
  1591. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
  1592. rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
  1593. rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
  1594. rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
  1595. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
  1596. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
  1597. }
  1598. static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
  1599. {
  1600. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1601. rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
  1602. rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
  1603. rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
  1604. udelay(200);
  1605. dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
  1606. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
  1607. dpk->bp[path][kidx].ther_dpk);
  1608. }
  1609. static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
  1610. enum rtw89_rf_path path, u8 kidx)
  1611. {
  1612. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1613. if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
  1614. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
  1615. rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
  1616. rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
  1617. rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
  1618. } else {
  1619. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
  1620. rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
  1621. rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
  1622. rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
  1623. rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
  1624. rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
  1625. rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
  1626. }
  1627. rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
  1628. rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
  1629. rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
  1630. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1631. "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
  1632. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
  1633. rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
  1634. rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
  1635. }
  1636. static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
  1637. enum rtw89_rf_path path, bool is_bypass)
  1638. {
  1639. if (is_bypass) {
  1640. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
  1641. B_RXIQC_BYPASS2, 0x1);
  1642. rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
  1643. B_RXIQC_BYPASS, 0x1);
  1644. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1645. "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
  1646. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
  1647. MASKDWORD));
  1648. } else {
  1649. rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
  1650. rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
  1651. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1652. "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
  1653. rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
  1654. MASKDWORD));
  1655. }
  1656. }
  1657. static
  1658. void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
  1659. {
  1660. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1661. if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
  1662. rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
  1663. else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
  1664. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
  1665. else
  1666. rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
  1667. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
  1668. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
  1669. dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
  1670. }
  1671. static void _dpk_table_select(struct rtw89_dev *rtwdev,
  1672. enum rtw89_rf_path path, u8 kidx, u8 gain)
  1673. {
  1674. u8 val;
  1675. val = 0x80 + kidx * 0x20 + gain * 0x10;
  1676. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
  1677. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1678. "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
  1679. gain, val);
  1680. }
  1681. static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
  1682. {
  1683. #define DPK_SYNC_TH_DC_I 200
  1684. #define DPK_SYNC_TH_DC_Q 200
  1685. #define DPK_SYNC_TH_CORR 170
  1686. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  1687. u16 dc_i, dc_q;
  1688. u8 corr_val, corr_idx;
  1689. rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
  1690. corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
  1691. corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
  1692. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1693. "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
  1694. path, corr_idx, corr_val);
  1695. dpk->corr_idx[path][kidx] = corr_idx;
  1696. dpk->corr_val[path][kidx] = corr_val;
  1697. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
  1698. dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  1699. dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
  1700. dc_i = abs(sign_extend32(dc_i, 11));
  1701. dc_q = abs(sign_extend32(dc_q, 11));
  1702. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
  1703. path, dc_i, dc_q);
  1704. dpk->dc_i[path][kidx] = dc_i;
  1705. dpk->dc_q[path][kidx] = dc_q;
  1706. if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
  1707. corr_val < DPK_SYNC_TH_CORR)
  1708. return true;
  1709. else
  1710. return false;
  1711. }
  1712. static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1713. enum rtw89_rf_path path, u8 kidx)
  1714. {
  1715. _dpk_one_shot(rtwdev, phy, path, SYNC);
  1716. return _dpk_sync_check(rtwdev, path, kidx);
  1717. }
  1718. static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
  1719. {
  1720. u16 dgain;
  1721. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
  1722. dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
  1723. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
  1724. return dgain;
  1725. }
  1726. static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
  1727. {
  1728. static const u16 bnd[15] = {
  1729. 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
  1730. 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
  1731. };
  1732. s8 offset;
  1733. if (dgain >= bnd[0])
  1734. offset = 0x6;
  1735. else if (bnd[0] > dgain && dgain >= bnd[1])
  1736. offset = 0x6;
  1737. else if (bnd[1] > dgain && dgain >= bnd[2])
  1738. offset = 0x5;
  1739. else if (bnd[2] > dgain && dgain >= bnd[3])
  1740. offset = 0x4;
  1741. else if (bnd[3] > dgain && dgain >= bnd[4])
  1742. offset = 0x3;
  1743. else if (bnd[4] > dgain && dgain >= bnd[5])
  1744. offset = 0x2;
  1745. else if (bnd[5] > dgain && dgain >= bnd[6])
  1746. offset = 0x1;
  1747. else if (bnd[6] > dgain && dgain >= bnd[7])
  1748. offset = 0x0;
  1749. else if (bnd[7] > dgain && dgain >= bnd[8])
  1750. offset = 0xff;
  1751. else if (bnd[8] > dgain && dgain >= bnd[9])
  1752. offset = 0xfe;
  1753. else if (bnd[9] > dgain && dgain >= bnd[10])
  1754. offset = 0xfd;
  1755. else if (bnd[10] > dgain && dgain >= bnd[11])
  1756. offset = 0xfc;
  1757. else if (bnd[11] > dgain && dgain >= bnd[12])
  1758. offset = 0xfb;
  1759. else if (bnd[12] > dgain && dgain >= bnd[13])
  1760. offset = 0xfa;
  1761. else if (bnd[13] > dgain && dgain >= bnd[14])
  1762. offset = 0xf9;
  1763. else if (bnd[14] > dgain)
  1764. offset = 0xf8;
  1765. else
  1766. offset = 0x0;
  1767. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
  1768. return offset;
  1769. }
  1770. static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
  1771. {
  1772. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
  1773. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
  1774. return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
  1775. }
  1776. static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1777. enum rtw89_rf_path path, u8 kidx)
  1778. {
  1779. _dpk_table_select(rtwdev, path, kidx, 1);
  1780. _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
  1781. }
  1782. static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1783. enum rtw89_rf_path path, u8 kidx)
  1784. {
  1785. _dpk_tpg_sel(rtwdev, path, kidx);
  1786. _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
  1787. }
  1788. static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
  1789. enum rtw89_rf_path path)
  1790. {
  1791. rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
  1792. rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
  1793. rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
  1794. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
  1795. }
  1796. static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1797. enum rtw89_rf_path path, u8 txagc)
  1798. {
  1799. rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
  1800. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  1801. _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
  1802. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
  1803. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
  1804. }
  1805. static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1806. enum rtw89_rf_path path)
  1807. {
  1808. u32 tmp;
  1809. tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
  1810. rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
  1811. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
  1812. _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
  1813. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
  1814. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
  1815. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1816. "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
  1817. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
  1818. rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
  1819. }
  1820. static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1821. enum rtw89_rf_path path, s8 gain_offset)
  1822. {
  1823. u8 txagc;
  1824. txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
  1825. if (txagc - gain_offset < DPK_TXAGC_LOWER)
  1826. txagc = DPK_TXAGC_LOWER;
  1827. else if (txagc - gain_offset > DPK_TXAGC_UPPER)
  1828. txagc = DPK_TXAGC_UPPER;
  1829. else
  1830. txagc = txagc - gain_offset;
  1831. _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
  1832. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
  1833. gain_offset, txagc);
  1834. return txagc;
  1835. }
  1836. static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
  1837. {
  1838. u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
  1839. u8 i;
  1840. rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
  1841. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
  1842. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
  1843. if (is_check) {
  1844. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
  1845. val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
  1846. val1_i = abs(sign_extend32(val1_i, 11));
  1847. val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
  1848. val1_q = abs(sign_extend32(val1_q, 11));
  1849. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
  1850. val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
  1851. val2_i = abs(sign_extend32(val2_i, 11));
  1852. val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
  1853. val2_q = abs(sign_extend32(val2_q, 11));
  1854. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
  1855. phy_div(val1_i * val1_i + val1_q * val1_q,
  1856. val2_i * val2_i + val2_q * val2_q));
  1857. } else {
  1858. for (i = 0; i < 32; i++) {
  1859. rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
  1860. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1861. "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
  1862. rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
  1863. }
  1864. }
  1865. if (val1_i * val1_i + val1_q * val1_q >=
  1866. (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
  1867. return true;
  1868. return false;
  1869. }
  1870. static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  1871. enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
  1872. bool loss_only)
  1873. {
  1874. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1875. u8 step = DPK_AGC_STEP_SYNC_DGAIN;
  1876. u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
  1877. u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
  1878. u16 dgain = 0;
  1879. s8 offset;
  1880. int limit = 200;
  1881. tmp_txagc = init_txagc;
  1882. do {
  1883. switch (step) {
  1884. case DPK_AGC_STEP_SYNC_DGAIN:
  1885. if (_dpk_sync(rtwdev, phy, path, kidx)) {
  1886. tmp_txagc = 0xff;
  1887. goout = 1;
  1888. break;
  1889. }
  1890. dgain = _dpk_dgain_read(rtwdev);
  1891. if (loss_only == 1 || limited_rxbb == 1)
  1892. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  1893. else
  1894. step = DPK_AGC_STEP_GAIN_ADJ;
  1895. break;
  1896. case DPK_AGC_STEP_GAIN_ADJ:
  1897. tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
  1898. RFREG_MASKRXBB);
  1899. offset = _dpk_dgain_mapping(rtwdev, dgain);
  1900. if (tmp_rxbb + offset > 0x1f) {
  1901. tmp_rxbb = 0x1f;
  1902. limited_rxbb = 1;
  1903. } else if (tmp_rxbb + offset < 0) {
  1904. tmp_rxbb = 0;
  1905. limited_rxbb = 1;
  1906. } else {
  1907. tmp_rxbb = tmp_rxbb + offset;
  1908. }
  1909. rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
  1910. tmp_rxbb);
  1911. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1912. "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
  1913. if (offset || agc_cnt == 0) {
  1914. if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
  1915. _dpk_bypass_rxcfir(rtwdev, path, true);
  1916. else
  1917. _dpk_lbk_rxiqk(rtwdev, phy, path);
  1918. }
  1919. if (dgain > 1922 || dgain < 342)
  1920. step = DPK_AGC_STEP_SYNC_DGAIN;
  1921. else
  1922. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  1923. agc_cnt++;
  1924. break;
  1925. case DPK_AGC_STEP_GAIN_LOSS_IDX:
  1926. _dpk_gainloss(rtwdev, phy, path, kidx);
  1927. tmp_gl_idx = _dpk_gainloss_read(rtwdev);
  1928. if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
  1929. tmp_gl_idx >= 7)
  1930. step = DPK_AGC_STEP_GL_GT_CRITERION;
  1931. else if (tmp_gl_idx == 0)
  1932. step = DPK_AGC_STEP_GL_LT_CRITERION;
  1933. else
  1934. step = DPK_AGC_STEP_SET_TX_GAIN;
  1935. break;
  1936. case DPK_AGC_STEP_GL_GT_CRITERION:
  1937. if (tmp_txagc == 0x2e) {
  1938. goout = 1;
  1939. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1940. "[DPK] Txagc@lower bound!!\n");
  1941. } else {
  1942. tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
  1943. }
  1944. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  1945. agc_cnt++;
  1946. break;
  1947. case DPK_AGC_STEP_GL_LT_CRITERION:
  1948. if (tmp_txagc == 0x3f) {
  1949. goout = 1;
  1950. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1951. "[DPK] Txagc@upper bound!!\n");
  1952. } else {
  1953. tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
  1954. }
  1955. step = DPK_AGC_STEP_GAIN_LOSS_IDX;
  1956. agc_cnt++;
  1957. break;
  1958. case DPK_AGC_STEP_SET_TX_GAIN:
  1959. tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
  1960. goout = 1;
  1961. agc_cnt++;
  1962. break;
  1963. default:
  1964. goout = 1;
  1965. break;
  1966. }
  1967. } while (!goout && agc_cnt < 6 && limit-- > 0);
  1968. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1969. "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
  1970. tmp_rxbb);
  1971. return tmp_txagc;
  1972. }
  1973. static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
  1974. {
  1975. switch (order) {
  1976. case 0:
  1977. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
  1978. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
  1979. rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
  1980. break;
  1981. case 1:
  1982. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
  1983. rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
  1984. rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
  1985. break;
  1986. case 2:
  1987. rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
  1988. rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
  1989. rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
  1990. break;
  1991. default:
  1992. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1993. "[DPK] Wrong MDPD order!!(0x%x)\n", order);
  1994. break;
  1995. }
  1996. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  1997. "[DPK] Set MDPD order to 0x%x for IDL\n", order);
  1998. }
  1999. static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2000. enum rtw89_rf_path path, u8 kidx, u8 gain)
  2001. {
  2002. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2003. if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
  2004. dpk->bp[path][kidx].band == RTW89_BAND_5G)
  2005. _dpk_set_mdpd_para(rtwdev, 0x2);
  2006. else
  2007. _dpk_set_mdpd_para(rtwdev, 0x0);
  2008. _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
  2009. }
  2010. static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2011. enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
  2012. {
  2013. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2014. const u16 pwsf = 0x78;
  2015. u8 gs = dpk->dpk_gs[phy];
  2016. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
  2017. B_COEF_SEL_MDPD, kidx);
  2018. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2019. "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
  2020. pwsf, gs);
  2021. dpk->bp[path][kidx].txagc_dpk = txagc;
  2022. rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
  2023. 0x3F << ((gain << 3) + (kidx << 4)), txagc);
  2024. dpk->bp[path][kidx].pwsf = pwsf;
  2025. rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
  2026. 0x1FF << (gain << 4), pwsf);
  2027. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
  2028. rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
  2029. dpk->bp[path][kidx].gs = gs;
  2030. if (dpk->dpk_gs[phy] == 0x7f)
  2031. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2032. MASKDWORD, 0x007f7f7f);
  2033. else
  2034. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2035. MASKDWORD, 0x005b5b5b);
  2036. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
  2037. B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
  2038. rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
  2039. rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
  2040. }
  2041. static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2042. enum rtw89_rf_path path)
  2043. {
  2044. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2045. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2046. bool is_reload = false;
  2047. u8 idx, cur_band, cur_ch;
  2048. cur_band = chan->band_type;
  2049. cur_ch = chan->channel;
  2050. for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
  2051. if (cur_band != dpk->bp[path][idx].band ||
  2052. cur_ch != dpk->bp[path][idx].ch)
  2053. continue;
  2054. rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
  2055. B_COEF_SEL_MDPD, idx);
  2056. dpk->cur_idx[path] = idx;
  2057. is_reload = true;
  2058. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2059. "[DPK] reload S%d[%d] success\n", path, idx);
  2060. }
  2061. return is_reload;
  2062. }
  2063. static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2064. enum rtw89_rf_path path, u8 gain)
  2065. {
  2066. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2067. u8 txagc = 0x38, kidx = dpk->cur_idx[path];
  2068. bool is_fail = false;
  2069. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2070. "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
  2071. _rfk_rf_direct_cntrl(rtwdev, path, false);
  2072. _rfk_drf_direct_cntrl(rtwdev, path, false);
  2073. _dpk_kip_pwr_clk_on(rtwdev, path);
  2074. _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
  2075. _dpk_rf_setting(rtwdev, gain, path, kidx);
  2076. _dpk_rx_dck(rtwdev, phy, path);
  2077. _dpk_kip_preset(rtwdev, phy, path, kidx);
  2078. _dpk_kip_set_rxagc(rtwdev, phy, path);
  2079. _dpk_table_select(rtwdev, path, kidx, gain);
  2080. txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
  2081. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
  2082. if (txagc == 0xff) {
  2083. is_fail = true;
  2084. } else {
  2085. _dpk_get_thermal(rtwdev, kidx, path);
  2086. _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
  2087. rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
  2088. _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
  2089. }
  2090. if (!is_fail)
  2091. dpk->bp[path][kidx].path_ok = true;
  2092. else
  2093. dpk->bp[path][kidx].path_ok = false;
  2094. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
  2095. is_fail ? "Check" : "Success");
  2096. return is_fail;
  2097. }
  2098. static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
  2099. enum rtw89_phy_idx phy, u8 kpath)
  2100. {
  2101. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2102. static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
  2103. u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
  2104. u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
  2105. u32 backup_bb_val[BACKUP_BB_REGS_NR];
  2106. bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
  2107. u8 path;
  2108. if (dpk->is_dpk_reload_en) {
  2109. for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
  2110. reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
  2111. if (!reloaded[path] && dpk->bp[path][0].ch)
  2112. dpk->cur_idx[path] = !dpk->cur_idx[path];
  2113. else
  2114. _dpk_onoff(rtwdev, path, false);
  2115. }
  2116. } else {
  2117. for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
  2118. dpk->cur_idx[path] = 0;
  2119. }
  2120. _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
  2121. for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
  2122. _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
  2123. _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  2124. _dpk_information(rtwdev, phy, path);
  2125. if (rtwdev->is_tssi_mode[path])
  2126. _dpk_tssi_pause(rtwdev, path, true);
  2127. }
  2128. _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
  2129. for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
  2130. is_fail = _dpk_main(rtwdev, phy, path, 1);
  2131. _dpk_onoff(rtwdev, path, is_fail);
  2132. }
  2133. _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
  2134. _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
  2135. for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
  2136. _dpk_kip_restore(rtwdev, path);
  2137. _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
  2138. _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
  2139. if (rtwdev->is_tssi_mode[path])
  2140. _dpk_tssi_pause(rtwdev, path, false);
  2141. }
  2142. }
  2143. static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2144. {
  2145. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2146. struct rtw89_fem_info *fem = &rtwdev->fem;
  2147. if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
  2148. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2149. "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
  2150. return true;
  2151. } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
  2152. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2153. "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
  2154. return true;
  2155. } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
  2156. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2157. "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
  2158. return true;
  2159. }
  2160. return false;
  2161. }
  2162. static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2163. {
  2164. u8 path, kpath;
  2165. kpath = _kpath(rtwdev, phy);
  2166. for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
  2167. if (kpath & BIT(path))
  2168. _dpk_onoff(rtwdev, path, true);
  2169. }
  2170. }
  2171. static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
  2172. {
  2173. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2174. "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
  2175. RTW8852B_DPK_VER, rtwdev->hal.cv,
  2176. RTW8852B_RF_REL_VERSION);
  2177. if (_dpk_bypass_check(rtwdev, phy))
  2178. _dpk_force_bypass(rtwdev, phy);
  2179. else
  2180. _dpk_cal_select(rtwdev, force, phy, RF_AB);
  2181. }
  2182. static void _dpk_track(struct rtw89_dev *rtwdev)
  2183. {
  2184. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2185. s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
  2186. s8 delta_ther[2] = {};
  2187. u8 trk_idx, txagc_rf;
  2188. u8 path, kidx;
  2189. u16 pwsf[2];
  2190. u8 cur_ther;
  2191. u32 tmp;
  2192. for (path = 0; path < RF_PATH_NUM_8852B; path++) {
  2193. kidx = dpk->cur_idx[path];
  2194. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2195. "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
  2196. path, kidx, dpk->bp[path][kidx].ch);
  2197. cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
  2198. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2199. "[DPK_TRK] thermal now = %d\n", cur_ther);
  2200. if (dpk->bp[path][kidx].ch && cur_ther)
  2201. delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
  2202. if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
  2203. delta_ther[path] = delta_ther[path] * 3 / 2;
  2204. else
  2205. delta_ther[path] = delta_ther[path] * 5 / 2;
  2206. txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
  2207. 0x0000003f);
  2208. if (rtwdev->is_tssi_mode[path]) {
  2209. trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
  2210. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2211. "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
  2212. txagc_rf, trk_idx);
  2213. txagc_bb =
  2214. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
  2215. MASKBYTE2);
  2216. txagc_bb_tp =
  2217. rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
  2218. B_TXAGC_TP);
  2219. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2220. "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
  2221. txagc_bb_tp, txagc_bb);
  2222. txagc_ofst =
  2223. rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
  2224. MASKBYTE3);
  2225. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2226. "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
  2227. txagc_ofst, delta_ther[path]);
  2228. tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
  2229. B_DPD_COM_OF);
  2230. if (tmp == 0x1) {
  2231. txagc_ofst = 0;
  2232. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2233. "[DPK_TRK] HW txagc offset mode\n");
  2234. }
  2235. if (txagc_rf && cur_ther)
  2236. ini_diff = txagc_ofst + (delta_ther[path]);
  2237. tmp = rtw89_phy_read32_mask(rtwdev,
  2238. R_P0_TXDPD + (path << 13),
  2239. B_P0_TXDPD);
  2240. if (tmp == 0x0) {
  2241. pwsf[0] = dpk->bp[path][kidx].pwsf +
  2242. txagc_bb_tp - txagc_bb + ini_diff;
  2243. pwsf[1] = dpk->bp[path][kidx].pwsf +
  2244. txagc_bb_tp - txagc_bb + ini_diff;
  2245. } else {
  2246. pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
  2247. pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
  2248. }
  2249. } else {
  2250. pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
  2251. pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
  2252. }
  2253. tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
  2254. if (!tmp && txagc_rf) {
  2255. rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
  2256. "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
  2257. pwsf[0], pwsf[1]);
  2258. rtw89_phy_write32_mask(rtwdev,
  2259. R_DPD_BND + (path << 8) + (kidx << 2),
  2260. B_DPD_BND_0, pwsf[0]);
  2261. rtw89_phy_write32_mask(rtwdev,
  2262. R_DPD_BND + (path << 8) + (kidx << 2),
  2263. B_DPD_BND_1, pwsf[1]);
  2264. }
  2265. }
  2266. }
  2267. static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2268. {
  2269. struct rtw89_dpk_info *dpk = &rtwdev->dpk;
  2270. u8 tx_scale, ofdm_bkof, path, kpath;
  2271. kpath = _kpath(rtwdev, phy);
  2272. ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
  2273. tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
  2274. if (ofdm_bkof + tx_scale >= 44) {
  2275. /* move dpd backoff to bb, and set dpd backoff to 0 */
  2276. dpk->dpk_gs[phy] = 0x7f;
  2277. for (path = 0; path < RF_PATH_NUM_8852B; path++) {
  2278. if (!(kpath & BIT(path)))
  2279. continue;
  2280. rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
  2281. B_DPD_CFG, 0x7f7f7f);
  2282. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2283. "[RFK] Set S%d DPD backoff to 0dB\n", path);
  2284. }
  2285. } else {
  2286. dpk->dpk_gs[phy] = 0x5b;
  2287. }
  2288. }
  2289. static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2290. enum rtw89_rf_path path)
  2291. {
  2292. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2293. enum rtw89_band band = chan->band_type;
  2294. if (band == RTW89_BAND_2G)
  2295. rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
  2296. else
  2297. rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
  2298. }
  2299. static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2300. enum rtw89_rf_path path)
  2301. {
  2302. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2303. enum rtw89_band band = chan->band_type;
  2304. rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
  2305. if (path == RF_PATH_A)
  2306. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2307. &rtw8852b_tssi_sys_a_defs_2g_tbl,
  2308. &rtw8852b_tssi_sys_a_defs_5g_tbl);
  2309. else
  2310. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2311. &rtw8852b_tssi_sys_b_defs_2g_tbl,
  2312. &rtw8852b_tssi_sys_b_defs_5g_tbl);
  2313. }
  2314. static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
  2315. enum rtw89_phy_idx phy,
  2316. enum rtw89_rf_path path)
  2317. {
  2318. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2319. &rtw8852b_tssi_init_txpwr_defs_a_tbl,
  2320. &rtw8852b_tssi_init_txpwr_defs_b_tbl);
  2321. }
  2322. static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
  2323. enum rtw89_phy_idx phy,
  2324. enum rtw89_rf_path path)
  2325. {
  2326. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2327. &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl,
  2328. &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl);
  2329. }
  2330. static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2331. enum rtw89_rf_path path)
  2332. {
  2333. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2334. &rtw8852b_tssi_dck_defs_a_tbl,
  2335. &rtw8852b_tssi_dck_defs_b_tbl);
  2336. }
  2337. static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2338. enum rtw89_rf_path path)
  2339. {
  2340. #define RTW8852B_TSSI_GET_VAL(ptr, idx) \
  2341. ({ \
  2342. s8 *__ptr = (ptr); \
  2343. u8 __idx = (idx), __i, __v; \
  2344. u32 __val = 0; \
  2345. for (__i = 0; __i < 4; __i++) { \
  2346. __v = (__ptr[__idx + __i]); \
  2347. __val |= (__v << (8 * __i)); \
  2348. } \
  2349. __val; \
  2350. })
  2351. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2352. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2353. u8 ch = chan->channel;
  2354. u8 subband = chan->subband_type;
  2355. const s8 *thm_up_a = NULL;
  2356. const s8 *thm_down_a = NULL;
  2357. const s8 *thm_up_b = NULL;
  2358. const s8 *thm_down_b = NULL;
  2359. u8 thermal = 0xff;
  2360. s8 thm_ofst[64] = {0};
  2361. u32 tmp = 0;
  2362. u8 i, j;
  2363. switch (subband) {
  2364. default:
  2365. case RTW89_CH_2G:
  2366. thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p;
  2367. thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n;
  2368. thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p;
  2369. thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n;
  2370. break;
  2371. case RTW89_CH_5G_BAND_1:
  2372. thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0];
  2373. thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0];
  2374. thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0];
  2375. thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0];
  2376. break;
  2377. case RTW89_CH_5G_BAND_3:
  2378. thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1];
  2379. thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1];
  2380. thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1];
  2381. thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1];
  2382. break;
  2383. case RTW89_CH_5G_BAND_4:
  2384. thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2];
  2385. thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2];
  2386. thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2];
  2387. thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2];
  2388. break;
  2389. }
  2390. if (path == RF_PATH_A) {
  2391. thermal = tssi_info->thermal[RF_PATH_A];
  2392. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2393. "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
  2394. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
  2395. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
  2396. if (thermal == 0xff) {
  2397. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
  2398. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
  2399. for (i = 0; i < 64; i += 4) {
  2400. rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
  2401. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2402. "[TSSI] write 0x%x val=0x%08x\n",
  2403. R_P0_TSSI_BASE + i, 0x0);
  2404. }
  2405. } else {
  2406. rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
  2407. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
  2408. thermal);
  2409. i = 0;
  2410. for (j = 0; j < 32; j++)
  2411. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2412. -thm_down_a[i++] :
  2413. -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
  2414. i = 1;
  2415. for (j = 63; j >= 32; j--)
  2416. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2417. thm_up_a[i++] :
  2418. thm_up_a[DELTA_SWINGIDX_SIZE - 1];
  2419. for (i = 0; i < 64; i += 4) {
  2420. tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
  2421. rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
  2422. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2423. "[TSSI] write 0x%x val=0x%08x\n",
  2424. 0x5c00 + i, tmp);
  2425. }
  2426. }
  2427. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
  2428. rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
  2429. } else {
  2430. thermal = tssi_info->thermal[RF_PATH_B];
  2431. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2432. "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
  2433. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
  2434. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
  2435. if (thermal == 0xff) {
  2436. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
  2437. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
  2438. for (i = 0; i < 64; i += 4) {
  2439. rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
  2440. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2441. "[TSSI] write 0x%x val=0x%08x\n",
  2442. 0x7c00 + i, 0x0);
  2443. }
  2444. } else {
  2445. rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
  2446. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
  2447. thermal);
  2448. i = 0;
  2449. for (j = 0; j < 32; j++)
  2450. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2451. -thm_down_b[i++] :
  2452. -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
  2453. i = 1;
  2454. for (j = 63; j >= 32; j--)
  2455. thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
  2456. thm_up_b[i++] :
  2457. thm_up_b[DELTA_SWINGIDX_SIZE - 1];
  2458. for (i = 0; i < 64; i += 4) {
  2459. tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
  2460. rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
  2461. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2462. "[TSSI] write 0x%x val=0x%08x\n",
  2463. 0x7c00 + i, tmp);
  2464. }
  2465. }
  2466. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
  2467. rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
  2468. }
  2469. #undef RTW8852B_TSSI_GET_VAL
  2470. }
  2471. static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2472. enum rtw89_rf_path path)
  2473. {
  2474. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2475. &rtw8852b_tssi_dac_gain_defs_a_tbl,
  2476. &rtw8852b_tssi_dac_gain_defs_b_tbl);
  2477. }
  2478. static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2479. enum rtw89_rf_path path)
  2480. {
  2481. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2482. enum rtw89_band band = chan->band_type;
  2483. if (path == RF_PATH_A)
  2484. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2485. &rtw8852b_tssi_slope_a_defs_2g_tbl,
  2486. &rtw8852b_tssi_slope_a_defs_5g_tbl);
  2487. else
  2488. rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
  2489. &rtw8852b_tssi_slope_b_defs_2g_tbl,
  2490. &rtw8852b_tssi_slope_b_defs_5g_tbl);
  2491. }
  2492. static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2493. enum rtw89_rf_path path, bool all)
  2494. {
  2495. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2496. enum rtw89_band band = chan->band_type;
  2497. const struct rtw89_rfk_tbl *tbl = NULL;
  2498. u8 ch = chan->channel;
  2499. if (path == RF_PATH_A) {
  2500. if (band == RTW89_BAND_2G) {
  2501. if (all)
  2502. tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl;
  2503. else
  2504. tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl;
  2505. } else if (ch >= 36 && ch <= 64) {
  2506. if (all)
  2507. tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl;
  2508. else
  2509. tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl;
  2510. } else if (ch >= 100 && ch <= 144) {
  2511. if (all)
  2512. tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl;
  2513. else
  2514. tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl;
  2515. } else if (ch >= 149 && ch <= 177) {
  2516. if (all)
  2517. tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl;
  2518. else
  2519. tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl;
  2520. }
  2521. } else {
  2522. if (ch >= 1 && ch <= 14) {
  2523. if (all)
  2524. tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl;
  2525. else
  2526. tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl;
  2527. } else if (ch >= 36 && ch <= 64) {
  2528. if (all)
  2529. tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl;
  2530. else
  2531. tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl;
  2532. } else if (ch >= 100 && ch <= 144) {
  2533. if (all)
  2534. tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl;
  2535. else
  2536. tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl;
  2537. } else if (ch >= 149 && ch <= 177) {
  2538. if (all)
  2539. tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl;
  2540. else
  2541. tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl;
  2542. }
  2543. }
  2544. if (tbl)
  2545. rtw89_rfk_parser(rtwdev, tbl);
  2546. }
  2547. static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2548. enum rtw89_rf_path path)
  2549. {
  2550. rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  2551. &rtw8852b_tssi_slope_defs_a_tbl,
  2552. &rtw8852b_tssi_slope_defs_b_tbl);
  2553. }
  2554. static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2555. enum rtw89_rf_path path)
  2556. {
  2557. if (path == RF_PATH_A)
  2558. rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
  2559. else
  2560. rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
  2561. }
  2562. static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
  2563. enum rtw89_phy_idx phy,
  2564. enum rtw89_rf_path path)
  2565. {
  2566. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
  2567. path);
  2568. if (path == RF_PATH_A)
  2569. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010);
  2570. else
  2571. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010);
  2572. }
  2573. static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2574. {
  2575. u8 i;
  2576. for (i = 0; i < RF_PATH_NUM_8852B; i++) {
  2577. _tssi_set_tssi_track(rtwdev, phy, i);
  2578. _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
  2579. if (i == RF_PATH_A) {
  2580. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
  2581. B_P0_TSSI_MV_CLR, 0x0);
  2582. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
  2583. B_P0_TSSI_EN, 0x0);
  2584. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
  2585. B_P0_TSSI_EN, 0x1);
  2586. rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
  2587. RR_TXGA_V1_TRK_EN, 0x1);
  2588. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
  2589. B_P0_TSSI_RFC, 0x3);
  2590. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
  2591. B_P0_TSSI_OFT, 0xc0);
  2592. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
  2593. B_P0_TSSI_OFT_EN, 0x0);
  2594. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
  2595. B_P0_TSSI_OFT_EN, 0x1);
  2596. rtwdev->is_tssi_mode[RF_PATH_A] = true;
  2597. } else {
  2598. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
  2599. B_P1_TSSI_MV_CLR, 0x0);
  2600. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
  2601. B_P1_TSSI_EN, 0x0);
  2602. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
  2603. B_P1_TSSI_EN, 0x1);
  2604. rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
  2605. RR_TXGA_V1_TRK_EN, 0x1);
  2606. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
  2607. B_P1_TSSI_RFC, 0x3);
  2608. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
  2609. B_P1_TSSI_OFT, 0xc0);
  2610. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
  2611. B_P1_TSSI_OFT_EN, 0x0);
  2612. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
  2613. B_P1_TSSI_OFT_EN, 0x1);
  2614. rtwdev->is_tssi_mode[RF_PATH_B] = true;
  2615. }
  2616. }
  2617. }
  2618. static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2619. {
  2620. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
  2621. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
  2622. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
  2623. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
  2624. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
  2625. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
  2626. rtwdev->is_tssi_mode[RF_PATH_A] = false;
  2627. rtwdev->is_tssi_mode[RF_PATH_B] = false;
  2628. }
  2629. static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
  2630. {
  2631. switch (ch) {
  2632. case 1 ... 2:
  2633. return 0;
  2634. case 3 ... 5:
  2635. return 1;
  2636. case 6 ... 8:
  2637. return 2;
  2638. case 9 ... 11:
  2639. return 3;
  2640. case 12 ... 13:
  2641. return 4;
  2642. case 14:
  2643. return 5;
  2644. }
  2645. return 0;
  2646. }
  2647. #define TSSI_EXTRA_GROUP_BIT (BIT(31))
  2648. #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
  2649. #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
  2650. #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
  2651. #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
  2652. static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
  2653. {
  2654. switch (ch) {
  2655. case 1 ... 2:
  2656. return 0;
  2657. case 3 ... 5:
  2658. return 1;
  2659. case 6 ... 8:
  2660. return 2;
  2661. case 9 ... 11:
  2662. return 3;
  2663. case 12 ... 14:
  2664. return 4;
  2665. case 36 ... 40:
  2666. return 5;
  2667. case 41 ... 43:
  2668. return TSSI_EXTRA_GROUP(5);
  2669. case 44 ... 48:
  2670. return 6;
  2671. case 49 ... 51:
  2672. return TSSI_EXTRA_GROUP(6);
  2673. case 52 ... 56:
  2674. return 7;
  2675. case 57 ... 59:
  2676. return TSSI_EXTRA_GROUP(7);
  2677. case 60 ... 64:
  2678. return 8;
  2679. case 100 ... 104:
  2680. return 9;
  2681. case 105 ... 107:
  2682. return TSSI_EXTRA_GROUP(9);
  2683. case 108 ... 112:
  2684. return 10;
  2685. case 113 ... 115:
  2686. return TSSI_EXTRA_GROUP(10);
  2687. case 116 ... 120:
  2688. return 11;
  2689. case 121 ... 123:
  2690. return TSSI_EXTRA_GROUP(11);
  2691. case 124 ... 128:
  2692. return 12;
  2693. case 129 ... 131:
  2694. return TSSI_EXTRA_GROUP(12);
  2695. case 132 ... 136:
  2696. return 13;
  2697. case 137 ... 139:
  2698. return TSSI_EXTRA_GROUP(13);
  2699. case 140 ... 144:
  2700. return 14;
  2701. case 149 ... 153:
  2702. return 15;
  2703. case 154 ... 156:
  2704. return TSSI_EXTRA_GROUP(15);
  2705. case 157 ... 161:
  2706. return 16;
  2707. case 162 ... 164:
  2708. return TSSI_EXTRA_GROUP(16);
  2709. case 165 ... 169:
  2710. return 17;
  2711. case 170 ... 172:
  2712. return TSSI_EXTRA_GROUP(17);
  2713. case 173 ... 177:
  2714. return 18;
  2715. }
  2716. return 0;
  2717. }
  2718. static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
  2719. {
  2720. switch (ch) {
  2721. case 1 ... 8:
  2722. return 0;
  2723. case 9 ... 14:
  2724. return 1;
  2725. case 36 ... 48:
  2726. return 2;
  2727. case 52 ... 64:
  2728. return 3;
  2729. case 100 ... 112:
  2730. return 4;
  2731. case 116 ... 128:
  2732. return 5;
  2733. case 132 ... 144:
  2734. return 6;
  2735. case 149 ... 177:
  2736. return 7;
  2737. }
  2738. return 0;
  2739. }
  2740. static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2741. enum rtw89_rf_path path)
  2742. {
  2743. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2744. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2745. u8 ch = chan->channel;
  2746. u32 gidx, gidx_1st, gidx_2nd;
  2747. s8 de_1st;
  2748. s8 de_2nd;
  2749. s8 val;
  2750. gidx = _tssi_get_ofdm_group(rtwdev, ch);
  2751. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2752. "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
  2753. if (IS_TSSI_EXTRA_GROUP(gidx)) {
  2754. gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
  2755. gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
  2756. de_1st = tssi_info->tssi_mcs[path][gidx_1st];
  2757. de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
  2758. val = (de_1st + de_2nd) / 2;
  2759. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2760. "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
  2761. path, val, de_1st, de_2nd);
  2762. } else {
  2763. val = tssi_info->tssi_mcs[path][gidx];
  2764. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2765. "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
  2766. }
  2767. return val;
  2768. }
  2769. static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2770. enum rtw89_rf_path path)
  2771. {
  2772. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2773. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2774. u8 ch = chan->channel;
  2775. u32 tgidx, tgidx_1st, tgidx_2nd;
  2776. s8 tde_1st;
  2777. s8 tde_2nd;
  2778. s8 val;
  2779. tgidx = _tssi_get_trim_group(rtwdev, ch);
  2780. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2781. "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
  2782. path, tgidx);
  2783. if (IS_TSSI_EXTRA_GROUP(tgidx)) {
  2784. tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
  2785. tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
  2786. tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
  2787. tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
  2788. val = (tde_1st + tde_2nd) / 2;
  2789. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2790. "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
  2791. path, val, tde_1st, tde_2nd);
  2792. } else {
  2793. val = tssi_info->tssi_trim[path][tgidx];
  2794. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2795. "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
  2796. path, val);
  2797. }
  2798. return val;
  2799. }
  2800. static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  2801. {
  2802. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2803. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2804. u8 ch = chan->channel;
  2805. u8 gidx;
  2806. s8 ofdm_de;
  2807. s8 trim_de;
  2808. s32 val;
  2809. u32 i;
  2810. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
  2811. phy, ch);
  2812. for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
  2813. gidx = _tssi_get_cck_group(rtwdev, ch);
  2814. trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
  2815. val = tssi_info->tssi_cck[i][gidx] + trim_de;
  2816. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2817. "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
  2818. i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
  2819. rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
  2820. rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
  2821. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2822. "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
  2823. _tssi_de_cck_long[i],
  2824. rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
  2825. _TSSI_DE_MASK));
  2826. ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
  2827. trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
  2828. val = ofdm_de + trim_de;
  2829. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2830. "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
  2831. i, ofdm_de, trim_de);
  2832. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
  2833. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
  2834. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
  2835. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
  2836. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
  2837. rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
  2838. rtw89_debug(rtwdev, RTW89_DBG_TSSI,
  2839. "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
  2840. _tssi_de_mcs_20m[i],
  2841. rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
  2842. _TSSI_DE_MASK));
  2843. }
  2844. }
  2845. static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  2846. {
  2847. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2848. "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
  2849. "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
  2850. R_TSSI_PA_K1 + (path << 13),
  2851. rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD),
  2852. R_TSSI_PA_K2 + (path << 13),
  2853. rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD),
  2854. R_P0_TSSI_ALIM1 + (path << 13),
  2855. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD),
  2856. R_P0_TSSI_ALIM3 + (path << 13),
  2857. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD),
  2858. R_TSSI_PA_K5 + (path << 13),
  2859. rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD),
  2860. R_P0_TSSI_ALIM2 + (path << 13),
  2861. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD),
  2862. R_P0_TSSI_ALIM4 + (path << 13),
  2863. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD),
  2864. R_TSSI_PA_K8 + (path << 13),
  2865. rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD));
  2866. }
  2867. static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
  2868. enum rtw89_phy_idx phy, enum rtw89_rf_path path)
  2869. {
  2870. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  2871. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2872. u8 channel = chan->channel;
  2873. u8 band;
  2874. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2875. "======>%s phy=%d path=%d\n", __func__, phy, path);
  2876. if (channel >= 1 && channel <= 14)
  2877. band = TSSI_ALIMK_2G;
  2878. else if (channel >= 36 && channel <= 64)
  2879. band = TSSI_ALIMK_5GL;
  2880. else if (channel >= 100 && channel <= 144)
  2881. band = TSSI_ALIMK_5GM;
  2882. else if (channel >= 149 && channel <= 177)
  2883. band = TSSI_ALIMK_5GH;
  2884. else
  2885. band = TSSI_ALIMK_2G;
  2886. if (tssi_info->alignment_done[path][band]) {
  2887. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
  2888. tssi_info->alignment_value[path][band][0]);
  2889. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
  2890. tssi_info->alignment_value[path][band][1]);
  2891. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
  2892. tssi_info->alignment_value[path][band][2]);
  2893. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
  2894. tssi_info->alignment_value[path][band][3]);
  2895. }
  2896. _tssi_alimentk_dump_result(rtwdev, path);
  2897. }
  2898. static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2899. enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
  2900. u8 enable)
  2901. {
  2902. enum rtw89_rf_path_bit rx_path;
  2903. if (path == RF_PATH_A)
  2904. rx_path = RF_A;
  2905. else if (path == RF_PATH_B)
  2906. rx_path = RF_B;
  2907. else if (path == RF_PATH_AB)
  2908. rx_path = RF_AB;
  2909. else
  2910. rx_path = RF_ABCD; /* don't change path, but still set others */
  2911. if (enable) {
  2912. rtw8852b_bb_set_plcp_tx(rtwdev);
  2913. rtw8852b_bb_cfg_tx_path(rtwdev, path);
  2914. rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
  2915. rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy);
  2916. }
  2917. rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
  2918. }
  2919. static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
  2920. enum rtw89_phy_idx phy, const u32 reg[],
  2921. u32 reg_backup[], u32 reg_num)
  2922. {
  2923. u32 i;
  2924. for (i = 0; i < reg_num; i++) {
  2925. reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
  2926. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2927. "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
  2928. reg_backup[i]);
  2929. }
  2930. }
  2931. static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
  2932. enum rtw89_phy_idx phy, const u32 reg[],
  2933. u32 reg_backup[], u32 reg_num)
  2934. {
  2935. u32 i;
  2936. for (i = 0; i < reg_num; i++) {
  2937. rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
  2938. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2939. "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
  2940. reg_backup[i]);
  2941. }
  2942. }
  2943. static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
  2944. {
  2945. u8 channel_index;
  2946. if (channel >= 1 && channel <= 14)
  2947. channel_index = channel - 1;
  2948. else if (channel >= 36 && channel <= 64)
  2949. channel_index = (channel - 36) / 2 + 14;
  2950. else if (channel >= 100 && channel <= 144)
  2951. channel_index = ((channel - 100) / 2) + 15 + 14;
  2952. else if (channel >= 149 && channel <= 177)
  2953. channel_index = ((channel - 149) / 2) + 38 + 14;
  2954. else
  2955. channel_index = 0;
  2956. return channel_index;
  2957. }
  2958. static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  2959. enum rtw89_rf_path path, const s16 *power,
  2960. u32 *tssi_cw_rpt)
  2961. {
  2962. u32 tx_counter, tx_counter_tmp;
  2963. const int retry = 100;
  2964. u32 tmp;
  2965. int j, k;
  2966. for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
  2967. rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
  2968. rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
  2969. tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
  2970. tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
  2971. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2972. "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
  2973. _tssi_trigger[path], tmp, path);
  2974. if (j == 0)
  2975. _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
  2976. else
  2977. _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
  2978. tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
  2979. tx_counter_tmp -= tx_counter;
  2980. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2981. "[TSSI PA K] First HWTXcounter=%d path=%d\n",
  2982. tx_counter_tmp, path);
  2983. for (k = 0; k < retry; k++) {
  2984. tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
  2985. B_TSSI_CWRPT_RDY);
  2986. if (tmp)
  2987. break;
  2988. udelay(30);
  2989. tx_counter_tmp =
  2990. rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
  2991. tx_counter_tmp -= tx_counter;
  2992. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2993. "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
  2994. k, tx_counter_tmp, path);
  2995. }
  2996. if (k >= retry) {
  2997. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  2998. "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
  2999. k, path);
  3000. _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
  3001. return false;
  3002. }
  3003. tssi_cw_rpt[j] =
  3004. rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
  3005. _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
  3006. tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
  3007. tx_counter_tmp -= tx_counter;
  3008. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3009. "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
  3010. tx_counter_tmp, path);
  3011. }
  3012. return true;
  3013. }
  3014. static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3015. enum rtw89_rf_path path)
  3016. {
  3017. static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
  3018. 0x78e4, 0x49c0, 0x0d18, 0x0d80};
  3019. static const s16 power_2g[4] = {48, 20, 4, 4};
  3020. static const s16 power_5g[4] = {48, 20, 4, 4};
  3021. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3022. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3023. s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
  3024. u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
  3025. u8 channel = chan->channel;
  3026. u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
  3027. struct rtw8852b_bb_tssi_bak tssi_bak;
  3028. s32 aliment_diff, tssi_cw_default;
  3029. u32 start_time, finish_time;
  3030. u32 bb_reg_backup[8] = {0};
  3031. const s16 *power;
  3032. u8 band;
  3033. bool ok;
  3034. u32 tmp;
  3035. u8 j;
  3036. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3037. "======> %s channel=%d path=%d\n", __func__, channel,
  3038. path);
  3039. if (tssi_info->check_backup_aligmk[path][ch_idx]) {
  3040. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
  3041. tssi_info->alignment_backup_by_ch[path][ch_idx][0]);
  3042. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
  3043. tssi_info->alignment_backup_by_ch[path][ch_idx][1]);
  3044. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
  3045. tssi_info->alignment_backup_by_ch[path][ch_idx][2]);
  3046. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
  3047. tssi_info->alignment_backup_by_ch[path][ch_idx][3]);
  3048. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3049. "======> %s Reload TSSI Alignment !!!\n", __func__);
  3050. _tssi_alimentk_dump_result(rtwdev, path);
  3051. return;
  3052. }
  3053. start_time = ktime_get_ns();
  3054. if (chan->band_type == RTW89_BAND_2G)
  3055. power = power_2g;
  3056. else
  3057. power = power_5g;
  3058. if (channel >= 1 && channel <= 14)
  3059. band = TSSI_ALIMK_2G;
  3060. else if (channel >= 36 && channel <= 64)
  3061. band = TSSI_ALIMK_5GL;
  3062. else if (channel >= 100 && channel <= 144)
  3063. band = TSSI_ALIMK_5GM;
  3064. else if (channel >= 149 && channel <= 177)
  3065. band = TSSI_ALIMK_5GH;
  3066. else
  3067. band = TSSI_ALIMK_2G;
  3068. rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak);
  3069. _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
  3070. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
  3071. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
  3072. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
  3073. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
  3074. ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
  3075. if (!ok)
  3076. goto out;
  3077. for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
  3078. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3079. "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
  3080. power[j], j, tssi_cw_rpt[j]);
  3081. }
  3082. tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
  3083. _tssi_cw_default_mask[1]);
  3084. tssi_cw_default = sign_extend32(tmp, 8);
  3085. tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
  3086. tssi_cw_rpt[1] + tssi_cw_default;
  3087. aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
  3088. tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
  3089. _tssi_cw_default_mask[2]);
  3090. tssi_cw_default = sign_extend32(tmp, 8);
  3091. tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
  3092. tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
  3093. _tssi_cw_default_mask[3]);
  3094. tssi_cw_default = sign_extend32(tmp, 8);
  3095. tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
  3096. if (path == RF_PATH_A) {
  3097. tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
  3098. FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
  3099. FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
  3100. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
  3101. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
  3102. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3103. "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
  3104. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
  3105. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
  3106. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
  3107. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
  3108. } else {
  3109. tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
  3110. FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
  3111. FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
  3112. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
  3113. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
  3114. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3115. "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
  3116. rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
  3117. rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
  3118. rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
  3119. rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
  3120. }
  3121. tssi_info->alignment_done[path][band] = true;
  3122. tssi_info->alignment_value[path][band][0] =
  3123. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
  3124. tssi_info->alignment_value[path][band][1] =
  3125. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
  3126. tssi_info->alignment_value[path][band][2] =
  3127. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
  3128. tssi_info->alignment_value[path][band][3] =
  3129. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
  3130. tssi_info->check_backup_aligmk[path][ch_idx] = true;
  3131. tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
  3132. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
  3133. tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
  3134. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
  3135. tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
  3136. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
  3137. tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
  3138. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
  3139. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3140. "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
  3141. path, band, R_P0_TSSI_ALIM1 + (path << 13),
  3142. tssi_info->alignment_value[path][band][0]);
  3143. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3144. "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
  3145. path, band, R_P0_TSSI_ALIM3 + (path << 13),
  3146. tssi_info->alignment_value[path][band][1]);
  3147. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3148. "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
  3149. path, band, R_P0_TSSI_ALIM2 + (path << 13),
  3150. tssi_info->alignment_value[path][band][2]);
  3151. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3152. "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
  3153. path, band, R_P0_TSSI_ALIM4 + (path << 13),
  3154. tssi_info->alignment_value[path][band][3]);
  3155. out:
  3156. _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
  3157. rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak);
  3158. rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0);
  3159. finish_time = ktime_get_ns();
  3160. tssi_info->tssi_alimk_time += finish_time - start_time;
  3161. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3162. "[TSSI PA K] %s processing time = %d ms\n", __func__,
  3163. tssi_info->tssi_alimk_time);
  3164. }
  3165. void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
  3166. {
  3167. _set_dpd_backoff(rtwdev, RTW89_PHY_0);
  3168. }
  3169. void rtw8852b_rck(struct rtw89_dev *rtwdev)
  3170. {
  3171. u8 path;
  3172. for (path = 0; path < RF_PATH_NUM_8852B; path++)
  3173. _rck(rtwdev, path);
  3174. }
  3175. void rtw8852b_dack(struct rtw89_dev *rtwdev)
  3176. {
  3177. u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
  3178. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
  3179. _dac_cal(rtwdev, false);
  3180. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
  3181. }
  3182. void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3183. {
  3184. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3185. u32 tx_en;
  3186. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
  3187. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3188. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3189. _iqk_init(rtwdev);
  3190. _iqk(rtwdev, phy_idx, false);
  3191. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3192. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
  3193. }
  3194. void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3195. {
  3196. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3197. u32 tx_en;
  3198. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
  3199. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3200. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3201. _rx_dck(rtwdev, phy_idx);
  3202. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3203. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
  3204. }
  3205. void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
  3206. {
  3207. u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
  3208. u32 tx_en;
  3209. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
  3210. rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3211. _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
  3212. rtwdev->dpk.is_dpk_enable = true;
  3213. rtwdev->dpk.is_dpk_reload_en = false;
  3214. _dpk(rtwdev, phy_idx, false);
  3215. rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
  3216. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
  3217. }
  3218. void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
  3219. {
  3220. _dpk_track(rtwdev);
  3221. }
  3222. void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
  3223. {
  3224. u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
  3225. u32 tx_en;
  3226. u8 i;
  3227. rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
  3228. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
  3229. _tssi_disable(rtwdev, phy);
  3230. for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
  3231. _tssi_rf_setting(rtwdev, phy, i);
  3232. _tssi_set_sys(rtwdev, phy, i);
  3233. _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
  3234. _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
  3235. _tssi_set_dck(rtwdev, phy, i);
  3236. _tssi_set_tmeter_tbl(rtwdev, phy, i);
  3237. _tssi_set_dac_gain_tbl(rtwdev, phy, i);
  3238. _tssi_slope_cal_org(rtwdev, phy, i);
  3239. _tssi_alignment_default(rtwdev, phy, i, true);
  3240. _tssi_set_tssi_slope(rtwdev, phy, i);
  3241. rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
  3242. _tmac_tx_pause(rtwdev, phy, true);
  3243. if (hwtx_en)
  3244. _tssi_alimentk(rtwdev, phy, i);
  3245. _tmac_tx_pause(rtwdev, phy, false);
  3246. rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
  3247. }
  3248. _tssi_enable(rtwdev, phy);
  3249. _tssi_set_efuse_to_de(rtwdev, phy);
  3250. rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
  3251. }
  3252. void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
  3253. {
  3254. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3255. struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
  3256. u8 channel = chan->channel;
  3257. u8 band;
  3258. u32 i;
  3259. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3260. "======>%s phy=%d channel=%d\n", __func__, phy, channel);
  3261. if (channel >= 1 && channel <= 14)
  3262. band = TSSI_ALIMK_2G;
  3263. else if (channel >= 36 && channel <= 64)
  3264. band = TSSI_ALIMK_5GL;
  3265. else if (channel >= 100 && channel <= 144)
  3266. band = TSSI_ALIMK_5GM;
  3267. else if (channel >= 149 && channel <= 177)
  3268. band = TSSI_ALIMK_5GH;
  3269. else
  3270. band = TSSI_ALIMK_2G;
  3271. _tssi_disable(rtwdev, phy);
  3272. for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
  3273. _tssi_rf_setting(rtwdev, phy, i);
  3274. _tssi_set_sys(rtwdev, phy, i);
  3275. _tssi_set_tmeter_tbl(rtwdev, phy, i);
  3276. if (tssi_info->alignment_done[i][band])
  3277. _tssi_alimentk_done(rtwdev, phy, i);
  3278. else
  3279. _tssi_alignment_default(rtwdev, phy, i, true);
  3280. }
  3281. _tssi_enable(rtwdev, phy);
  3282. _tssi_set_efuse_to_de(rtwdev, phy);
  3283. }
  3284. static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
  3285. enum rtw89_phy_idx phy, bool enable)
  3286. {
  3287. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  3288. u8 channel = chan->channel;
  3289. rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
  3290. __func__, channel);
  3291. if (enable) {
  3292. if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
  3293. rtw8852b_tssi(rtwdev, phy, true);
  3294. return;
  3295. }
  3296. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3297. "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
  3298. __func__,
  3299. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
  3300. rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
  3301. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
  3302. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
  3303. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
  3304. rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
  3305. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
  3306. rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
  3307. _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
  3308. _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
  3309. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3310. "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
  3311. __func__,
  3312. rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
  3313. rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
  3314. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3315. "======> %s SCAN_END\n", __func__);
  3316. }
  3317. void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
  3318. enum rtw89_phy_idx phy_idx)
  3319. {
  3320. if (scan_start)
  3321. rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
  3322. else
  3323. rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
  3324. }
  3325. static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
  3326. enum rtw89_bandwidth bw, bool dav)
  3327. {
  3328. u32 rf_reg18;
  3329. u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
  3330. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
  3331. rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
  3332. if (rf_reg18 == INV_RF_DATA) {
  3333. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3334. "[RFK]Invalid RF_0x18 for Path-%d\n", path);
  3335. return;
  3336. }
  3337. rf_reg18 &= ~RR_CFGCH_BW;
  3338. switch (bw) {
  3339. case RTW89_CHANNEL_WIDTH_5:
  3340. case RTW89_CHANNEL_WIDTH_10:
  3341. case RTW89_CHANNEL_WIDTH_20:
  3342. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
  3343. break;
  3344. case RTW89_CHANNEL_WIDTH_40:
  3345. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
  3346. break;
  3347. case RTW89_CHANNEL_WIDTH_80:
  3348. rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
  3349. break;
  3350. default:
  3351. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
  3352. }
  3353. rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
  3354. RR_CFGCH_BW2) & RFREG_MASK;
  3355. rf_reg18 |= RR_CFGCH_BW2;
  3356. rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
  3357. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
  3358. bw, path, reg18_addr,
  3359. rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
  3360. }
  3361. static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3362. enum rtw89_bandwidth bw)
  3363. {
  3364. _bw_setting(rtwdev, RF_PATH_A, bw, true);
  3365. _bw_setting(rtwdev, RF_PATH_B, bw, true);
  3366. _bw_setting(rtwdev, RF_PATH_A, bw, false);
  3367. _bw_setting(rtwdev, RF_PATH_B, bw, false);
  3368. }
  3369. static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
  3370. {
  3371. u32 bak;
  3372. u32 tmp;
  3373. int ret;
  3374. bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
  3375. rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
  3376. rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
  3377. ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
  3378. false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
  3379. if (ret)
  3380. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
  3381. rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
  3382. return !!ret;
  3383. }
  3384. static void _lck_check(struct rtw89_dev *rtwdev)
  3385. {
  3386. u32 tmp;
  3387. if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
  3388. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
  3389. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
  3390. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
  3391. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
  3392. rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
  3393. }
  3394. udelay(10);
  3395. if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
  3396. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
  3397. rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
  3398. tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
  3399. _set_s0_arfc18(rtwdev, tmp);
  3400. rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
  3401. }
  3402. if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
  3403. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
  3404. tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
  3405. rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
  3406. tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
  3407. rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
  3408. rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
  3409. rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
  3410. rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
  3411. rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
  3412. rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
  3413. tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
  3414. _set_s0_arfc18(rtwdev, tmp);
  3415. rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
  3416. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
  3417. rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
  3418. rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
  3419. }
  3420. }
  3421. static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
  3422. {
  3423. bool timeout;
  3424. timeout = _set_s0_arfc18(rtwdev, val);
  3425. if (!timeout)
  3426. _lck_check(rtwdev);
  3427. }
  3428. static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
  3429. u8 central_ch, bool dav)
  3430. {
  3431. u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
  3432. bool is_2g_ch = central_ch <= 14;
  3433. u32 rf_reg18;
  3434. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
  3435. rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
  3436. rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
  3437. RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
  3438. rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
  3439. if (!is_2g_ch)
  3440. rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
  3441. FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
  3442. rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
  3443. RR_CFGCH_BW2) & RFREG_MASK;
  3444. rf_reg18 |= RR_CFGCH_BW2;
  3445. if (path == RF_PATH_A && dav)
  3446. _set_ch(rtwdev, rf_reg18);
  3447. else
  3448. rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
  3449. rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
  3450. rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
  3451. rtw89_debug(rtwdev, RTW89_DBG_RFK,
  3452. "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
  3453. central_ch, path, reg18_addr,
  3454. rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
  3455. }
  3456. static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
  3457. {
  3458. _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
  3459. _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
  3460. _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
  3461. _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
  3462. }
  3463. static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
  3464. enum rtw89_rf_path path)
  3465. {
  3466. rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
  3467. rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
  3468. if (bw == RTW89_CHANNEL_WIDTH_20)
  3469. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
  3470. else if (bw == RTW89_CHANNEL_WIDTH_40)
  3471. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
  3472. else if (bw == RTW89_CHANNEL_WIDTH_80)
  3473. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
  3474. else
  3475. rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
  3476. rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
  3477. rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
  3478. rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
  3479. }
  3480. static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
  3481. enum rtw89_bandwidth bw)
  3482. {
  3483. u8 kpath, path;
  3484. kpath = _kpath(rtwdev, phy);
  3485. for (path = 0; path < RF_PATH_NUM_8852B; path++) {
  3486. if (!(kpath & BIT(path)))
  3487. continue;
  3488. _set_rxbb_bw(rtwdev, bw, path);
  3489. }
  3490. }
  3491. static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
  3492. enum rtw89_phy_idx phy, u8 central_ch,
  3493. enum rtw89_band band, enum rtw89_bandwidth bw)
  3494. {
  3495. _ctrl_ch(rtwdev, central_ch);
  3496. _ctrl_bw(rtwdev, phy, bw);
  3497. _rxbb_bw(rtwdev, phy, bw);
  3498. }
  3499. void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
  3500. const struct rtw89_chan *chan,
  3501. enum rtw89_phy_idx phy_idx)
  3502. {
  3503. rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
  3504. chan->band_width);
  3505. }